aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Muxi Yan <muxi@users.noreply.github.com>2018-12-10 10:15:45 -0800
committerGravatar GitHub <noreply@github.com>2018-12-10 10:15:45 -0800
commit037173217011b38ff4675d028eba27a068db5975 (patch)
tree7bf135f4481365dad05cb4ce181cc8259647a3da /src
parent3f00d61b04874cc5f0159c16f2c598a8f2fb93a7 (diff)
parent60f2d379fec3364ff59f4f0d463b16275525863d (diff)
Merge branch 'master' into config-isolation
Diffstat (limited to 'src')
-rw-r--r--src/compiler/cpp_generator.cc325
-rw-r--r--src/compiler/csharp_generator.cc37
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc490
-rw-r--r--src/core/ext/filters/client_channel/health/health_check_client.cc18
-rw-r--r--src/core/ext/filters/client_channel/health/health_check_client.h7
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc2
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h24
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc26
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc103
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc82
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/xds/xds.cc458
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc5
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h4
-rw-r--r--src/core/ext/filters/client_channel/method_params.cc178
-rw-r--r--src/core/ext/filters/client_channel/method_params.h78
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc2
-rw-r--r--src/core/ext/filters/client_channel/resolver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc44
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc36
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h1
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc12
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h4
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc3
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc17
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h6
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver_result_parsing.cc369
-rw-r--r--src/core/ext/filters/client_channel/resolver_result_parsing.h146
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc32
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h2
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc47
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.cc42
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h22
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc48
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc44
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc51
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc2
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.cc95
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.cc5
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc2
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.cc39
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc2
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc400
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h7
-rw-r--r--src/core/ext/transport/chttp2/transport/context_list.cc51
-rw-r--r--src/core/ext/transport/chttp2/transport/context_list.h72
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.cc14
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.h22
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.cc2
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.cc53
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.cc12
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h21
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h252
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc5
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.cc153
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.cc578
-rw-r--r--src/core/lib/channel/channel_stack.cc9
-rw-r--r--src/core/lib/channel/channel_stack.h8
-rw-r--r--src/core/lib/channel/channel_stack_builder.cc16
-rw-r--r--src/core/lib/channel/channel_stack_builder.h8
-rw-r--r--src/core/lib/channel/channelz.cc84
-rw-r--r--src/core/lib/channel/channelz.h16
-rw-r--r--src/core/lib/channel/channelz_registry.cc29
-rw-r--r--src/core/lib/channel/channelz_registry.h6
-rw-r--r--src/core/lib/channel/context.h8
-rw-r--r--src/core/lib/debug/trace.cc3
-rw-r--r--src/core/lib/debug/trace.h8
-rw-r--r--src/core/lib/gpr/arena.cc121
-rw-r--r--src/core/lib/gpr/arena.h2
-rw-r--r--src/core/lib/gprpp/inlined_vector.h44
-rw-r--r--src/core/lib/gprpp/orphanable.h100
-rw-r--r--src/core/lib/gprpp/ref_counted.h230
-rw-r--r--src/core/lib/gprpp/ref_counted_ptr.h42
-rw-r--r--src/core/lib/iomgr/buffer_list.cc21
-rw-r--r--src/core/lib/iomgr/buffer_list.h13
-rw-r--r--src/core/lib/iomgr/call_combiner.cc54
-rw-r--r--src/core/lib/iomgr/call_combiner.h35
-rw-r--r--src/core/lib/iomgr/closure.h1
-rw-r--r--src/core/lib/iomgr/dynamic_annotations.h67
-rw-r--r--src/core/lib/iomgr/endpoint.cc4
-rw-r--r--src/core/lib/iomgr/endpoint.h3
-rw-r--r--src/core/lib/iomgr/endpoint_cfstream.cc5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.cc4
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc4
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc4
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc4
-rw-r--r--src/core/lib/iomgr/ev_posix.cc22
-rw-r--r--src/core/lib/iomgr/ev_posix.h10
-rw-r--r--src/core/lib/iomgr/fork_posix.cc2
-rw-r--r--src/core/lib/iomgr/internal_errqueue.cc39
-rw-r--r--src/core/lib/iomgr/internal_errqueue.h8
-rw-r--r--src/core/lib/iomgr/iomgr.cc7
-rw-r--r--src/core/lib/iomgr/iomgr.h4
-rw-r--r--src/core/lib/iomgr/iomgr_custom.cc4
-rw-r--r--src/core/lib/iomgr/iomgr_internal.cc4
-rw-r--r--src/core/lib/iomgr/iomgr_internal.h4
-rw-r--r--src/core/lib/iomgr/iomgr_posix.cc7
-rw-r--r--src/core/lib/iomgr/iomgr_posix_cfstream.cc7
-rw-r--r--src/core/lib/iomgr/iomgr_windows.cc5
-rw-r--r--src/core/lib/iomgr/polling_entity.h8
-rw-r--r--src/core/lib/iomgr/port.h3
-rw-r--r--src/core/lib/iomgr/resolve_address.h2
-rw-r--r--src/core/lib/iomgr/resource_quota.cc74
-rw-r--r--src/core/lib/iomgr/resource_quota.h27
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.cc11
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc2
-rw-r--r--src/core/lib/iomgr/tcp_custom.cc5
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc105
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc99
-rw-r--r--src/core/lib/security/context/security_context.cc33
-rw-r--r--src/core/lib/security/context/security_context.h46
-rw-r--r--src/core/lib/security/credentials/credentials.h4
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.cc221
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h5
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc44
-rw-r--r--src/core/lib/security/transport/secure_endpoint.cc100
-rw-r--r--src/core/lib/security/transport/security_handshaker.cc3
-rw-r--r--src/core/lib/security/transport/server_auth_filter.cc70
-rw-r--r--src/core/lib/surface/call.cc180
-rw-r--r--src/core/lib/surface/call.h4
-rw-r--r--src/core/lib/surface/channel.cc26
-rw-r--r--src/core/lib/surface/channel.h3
-rw-r--r--src/core/lib/surface/completion_queue.cc2
-rw-r--r--src/core/lib/surface/init.cc2
-rw-r--r--src/core/lib/surface/server.cc248
-rw-r--r--src/core/lib/surface/server.h13
-rw-r--r--src/core/lib/surface/version.cc2
-rw-r--r--src/core/lib/transport/metadata_batch.h6
-rw-r--r--src/core/lib/transport/static_metadata.cc449
-rw-r--r--src/core/lib/transport/static_metadata.h146
-rw-r--r--src/core/lib/transport/transport.cc3
-rw-r--r--src/core/lib/transport/transport.h74
-rw-r--r--src/core/lib/uri/uri_parser.cc (renamed from src/core/ext/filters/client_channel/uri_parser.cc)2
-rw-r--r--src/core/lib/uri/uri_parser.h (renamed from src/core/ext/filters/client_channel/uri_parser.h)6
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.cc4
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.cc4
-rw-r--r--src/core/tsi/alts/handshaker/alts_handshaker_client.cc440
-rw-r--r--src/core/tsi/alts/handshaker/alts_handshaker_client.h82
-rw-r--r--src/core/tsi/alts/handshaker/alts_shared_resource.cc83
-rw-r--r--src/core/tsi/alts/handshaker/alts_shared_resource.h73
-rw-r--r--src/core/tsi/alts/handshaker/alts_tsi_event.cc75
-rw-r--r--src/core/tsi/alts/handshaker/alts_tsi_event.h93
-rw-r--r--src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc294
-rw-r--r--src/core/tsi/alts/handshaker/alts_tsi_handshaker.h50
-rw-r--r--src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h48
-rw-r--r--src/core/tsi/alts_transport_security.cc65
-rw-r--r--src/core/tsi/alts_transport_security.h47
-rw-r--r--src/core/tsi/transport_security.cc2
-rw-r--r--src/cpp/client/channel_cc.cc45
-rw-r--r--src/cpp/client/client_context.cc21
-rw-r--r--src/cpp/client/client_interceptor.cc34
-rw-r--r--src/cpp/client/create_channel.cc36
-rw-r--r--src/cpp/client/create_channel_internal.cc4
-rw-r--r--src/cpp/client/create_channel_internal.h4
-rw-r--r--src/cpp/client/create_channel_posix.cc10
-rw-r--r--src/cpp/client/cronet_credentials.cc9
-rw-r--r--src/cpp/client/generic_stub.cc9
-rw-r--r--src/cpp/client/insecure_credentials.cc9
-rw-r--r--src/cpp/client/secure_credentials.cc16
-rw-r--r--src/cpp/client/secure_credentials.h4
-rw-r--r--src/cpp/common/completion_queue_cc.cc10
-rw-r--r--src/cpp/common/core_codegen.cc7
-rw-r--r--src/cpp/common/version_cc.cc2
-rw-r--r--src/cpp/ext/filters/census/context.cc12
-rw-r--r--src/cpp/server/channelz/channelz_service.cc17
-rw-r--r--src/cpp/server/channelz/channelz_service.h4
-rw-r--r--src/cpp/server/health/default_health_check_service.cc19
-rw-r--r--src/cpp/server/health/default_health_check_service.h3
-rw-r--r--src/cpp/server/secure_server_credentials.cc7
-rw-r--r--src/cpp/server/server_builder.cc38
-rw-r--r--src/cpp/server/server_cc.cc511
-rw-r--r--src/cpp/server/server_context.cc246
-rw-r--r--src/csharp/BUILD-INTEGRATION.md357
-rw-r--r--src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs2
-rwxr-xr-xsrc/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj2
-rw-r--r--src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs2
-rw-r--r--src/csharp/Grpc.Core.Tests/MarshallerTest.cs7
-rw-r--r--src/csharp/Grpc.Core.Tests/NUnitMain.cs6
-rw-r--r--src/csharp/Grpc.Core.Tests/SanityTest.cs4
-rw-r--r--src/csharp/Grpc.Core/DeserializationContext.cs7
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCall.cs2
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCallBase.cs27
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCallServer.cs2
-rw-r--r--src/csharp/Grpc.Core/Internal/DefaultDeserializationContext.cs66
-rw-r--r--src/csharp/Grpc.Core/Internal/DefaultSerializationContext.cs62
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeExtension.cs4
-rw-r--r--src/csharp/Grpc.Core/Internal/ServerCallHandler.cs16
-rw-r--r--src/csharp/Grpc.Core/Marshaller.cs67
-rw-r--r--src/csharp/Grpc.Core/SerializationContext.cs7
-rw-r--r--src/csharp/Grpc.Core/ServerServiceDefinition.cs8
-rw-r--r--src/csharp/Grpc.Core/ServiceBinderBase.cs101
-rwxr-xr-xsrc/csharp/Grpc.Core/Version.csproj.include2
-rw-r--r--src/csharp/Grpc.Core/VersionInfo.cs4
-rwxr-xr-xsrc/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj2
-rw-r--r--src/csharp/Grpc.Examples.Tests/NUnitMain.cs6
-rwxr-xr-xsrc/csharp/Grpc.Examples/Grpc.Examples.csproj2
-rw-r--r--src/csharp/Grpc.Examples/MathGrpc.cs12
-rwxr-xr-xsrc/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj2
-rw-r--r--src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs6
-rw-r--r--src/csharp/Grpc.HealthCheck/Health.cs20
-rw-r--r--src/csharp/Grpc.HealthCheck/HealthGrpc.cs135
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj4
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs13
-rw-r--r--src/csharp/Grpc.IntegrationTesting/Control.cs144
-rw-r--r--src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs8
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs10
-rw-r--r--src/csharp/Grpc.IntegrationTesting/NUnitMain.cs6
-rw-r--r--src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs9
-rw-r--r--src/csharp/Grpc.IntegrationTesting/TestGrpc.cs35
-rw-r--r--src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs12
-rw-r--r--src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj2
-rw-r--r--src/csharp/Grpc.Reflection.Tests/NUnitMain.cs6
-rw-r--r--src/csharp/Grpc.Reflection/ReflectionGrpc.cs9
-rw-r--r--src/csharp/Grpc.Tools.Tests/Grpc.Tools.Tests.csproj2
-rw-r--r--src/csharp/Grpc.Tools.Tests/NUnitMain.cs4
-rw-r--r--src/csharp/README.md1
-rwxr-xr-xsrc/csharp/build_packages_dotnetcli.bat2
-rw-r--r--src/csharp/build_unitypackage.bat2
-rw-r--r--src/csharp/doc/integration.md-fig.1-classic.pngbin0 -> 15266 bytes
-rw-r--r--src/csharp/doc/integration.md-fig.2-sdk.pngbin0 -> 15863 bytes
-rw-r--r--src/objective-c/!ProtoCompiler-gRPCPlugin.podspec2
-rw-r--r--src/objective-c/GRPCClient/private/version.h2
-rw-r--r--src/objective-c/tests/version.h2
-rw-r--r--src/php/composer.json2
-rw-r--r--src/php/ext/grpc/channel.c2
-rw-r--r--src/php/ext/grpc/channel.h3
-rwxr-xr-xsrc/php/ext/grpc/config.m42
-rw-r--r--src/php/ext/grpc/php_grpc.c122
-rw-r--r--src/php/ext/grpc/version.h2
-rw-r--r--src/proto/grpc/channelz/BUILD7
-rw-r--r--src/proto/grpc/channelz/channelz.proto13
-rw-r--r--src/proto/grpc/health/v1/BUILD1
-rw-r--r--src/proto/grpc/reflection/v1alpha/BUILD8
-rw-r--r--src/proto/grpc/testing/BUILD43
-rw-r--r--src/proto/grpc/testing/compiler_test.proto3
-rw-r--r--src/proto/grpc/testing/control.proto1
-rw-r--r--src/proto/grpc/testing/echo.proto4
-rw-r--r--src/proto/grpc/testing/proto2/BUILD.bazel30
-rw-r--r--src/proto/grpc/testing/simple_messages.proto24
-rw-r--r--src/python/.gitignore1
-rw-r--r--src/python/grpcio/_parallel_compile_patch.py63
-rw-r--r--src/python/grpcio/commands.py69
-rw-r--r--src/python/grpcio/grpc/BUILD.bazel1
-rw-r--r--src/python/grpcio/grpc/__init__.py130
-rw-r--r--src/python/grpcio/grpc/_channel.py128
-rw-r--r--src/python/grpcio/grpc/_common.py14
-rw-r--r--src/python/grpcio/grpc/_cython/BUILD.bazel1
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/channelz.pyx.pxi69
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi12
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi16
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi21
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi2
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi1
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi4
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi10
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/operation.pxd.pxi36
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/operation.pyx.pxi36
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi8
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi1
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi4
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi4
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/time.pxd.pxi2
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/time.pyx.pxi2
-rw-r--r--src/python/grpcio/grpc/_cython/cygrpc.pyx2
-rw-r--r--src/python/grpcio/grpc/_grpcio_metadata.py2
-rw-r--r--src/python/grpcio/grpc/_interceptor.py139
-rw-r--r--src/python/grpcio/grpc/_plugin_wrapping.py1
-rw-r--r--src/python/grpcio/grpc/_server.py1
-rw-r--r--src/python/grpcio/grpc/beta/BUILD.bazel58
-rw-r--r--src/python/grpcio/grpc/framework/foundation/callable_util.py1
-rw-r--r--src/python/grpcio/grpc/framework/foundation/logging_pool.py1
-rw-r--r--src/python/grpcio/grpc/framework/foundation/stream_util.py1
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py8
-rw-r--r--src/python/grpcio/grpc_version.py2
-rw-r--r--src/python/grpcio_channelz/.gitignore6
-rw-r--r--src/python/grpcio_channelz/MANIFEST.in4
-rw-r--r--src/python/grpcio_channelz/README.rst9
-rw-r--r--src/python/grpcio_channelz/channelz_commands.py67
-rw-r--r--src/python/grpcio_channelz/grpc_channelz/__init__.py13
-rw-r--r--src/python/grpcio_channelz/grpc_channelz/v1/BUILD.bazel38
-rw-r--r--src/python/grpcio_channelz/grpc_channelz/v1/__init__.py13
-rw-r--r--src/python/grpcio_channelz/grpc_channelz/v1/channelz.py141
-rw-r--r--src/python/grpcio_channelz/grpc_version.py17
-rw-r--r--src/python/grpcio_channelz/setup.py96
-rw-r--r--src/python/grpcio_health_checking/MANIFEST.in1
-rw-r--r--src/python/grpcio_health_checking/grpc_version.py2
-rw-r--r--src/python/grpcio_health_checking/health_commands.py8
-rw-r--r--src/python/grpcio_health_checking/setup.py2
-rw-r--r--src/python/grpcio_reflection/MANIFEST.in1
-rw-r--r--src/python/grpcio_reflection/grpc_reflection/v1alpha/BUILD.bazel34
-rw-r--r--src/python/grpcio_reflection/grpc_version.py2
-rw-r--r--src/python/grpcio_reflection/reflection_commands.py8
-rw-r--r--src/python/grpcio_reflection/setup.py2
-rw-r--r--src/python/grpcio_testing/MANIFEST.in1
-rw-r--r--src/python/grpcio_testing/grpc_version.py2
-rw-r--r--src/python/grpcio_testing/setup.py33
-rw-r--r--src/python/grpcio_testing/testing_commands.py39
-rw-r--r--src/python/grpcio_tests/commands.py7
-rw-r--r--src/python/grpcio_tests/grpc_version.py2
-rw-r--r--src/python/grpcio_tests/setup.py1
-rw-r--r--src/python/grpcio_tests/tests/_sanity/_sanity_test.py4
-rw-r--r--src/python/grpcio_tests/tests/channelz/BUILD.bazel15
-rw-r--r--src/python/grpcio_tests/tests/channelz/__init__.py13
-rw-r--r--src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py470
-rw-r--r--src/python/grpcio_tests/tests/interop/BUILD.bazel101
-rw-r--r--src/python/grpcio_tests/tests/interop/credentials/BUILD.bazel9
-rw-r--r--src/python/grpcio_tests/tests/interop/methods.py30
-rw-r--r--src/python/grpcio_tests/tests/interop/resources.py11
-rw-r--r--src/python/grpcio_tests/tests/interop/server.py2
-rw-r--r--src/python/grpcio_tests/tests/qps/worker_server.py4
-rw-r--r--src/python/grpcio_tests/tests/reflection/BUILD.bazel21
-rw-r--r--src/python/grpcio_tests/tests/tests.json4
-rw-r--r--src/python/grpcio_tests/tests/unit/BUILD.bazel1
-rw-r--r--src/python/grpcio_tests/tests/unit/_api_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_auth_context_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_auth_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_args_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_close_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_compression_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_credentials_test.py13
-rw-r--r--src/python/grpcio_tests/tests/unit/_empty_message_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_error_message_encoding_test.py86
-rw-r--r--src/python/grpcio_tests/tests/unit/_exit_scenarios.py4
-rw-r--r--src/python/grpcio_tests/tests/unit/_exit_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_interceptor_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py5
-rw-r--r--src/python/grpcio_tests/tests/unit/_invocation_defects_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_logging_test.py80
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_flags_test.py251
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_reconnect_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_rpc_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py53
-rw-r--r--src/python/grpcio_tests/tests/unit/_server_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/_session_cache_test.py2
-rw-r--r--src/python/grpcio_tests/tests/unit/beta/BUILD.bazel75
-rw-r--r--src/python/grpcio_tests/tests/unit/resources.py35
-rw-r--r--src/python/grpcio_tests/tests/unit/test_common.py26
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h3
-rw-r--r--src/ruby/lib/grpc/generic/service.rb2
-rw-r--r--src/ruby/lib/grpc/version.rb2
-rw-r--r--src/ruby/pb/grpc/health/checker.rb5
-rw-r--r--src/ruby/spec/generic/rpc_server_spec.rb22
-rw-r--r--src/ruby/spec/support/services.rb1
-rw-r--r--src/ruby/tools/version.rb2
363 files changed, 9899 insertions, 4508 deletions
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index 56716493dc..b004687250 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -132,9 +132,11 @@ grpc::string GetHeaderIncludes(grpc_generator::File* file,
"grpcpp/impl/codegen/async_generic_service.h",
"grpcpp/impl/codegen/async_stream.h",
"grpcpp/impl/codegen/async_unary_call.h",
+ "grpcpp/impl/codegen/client_callback.h",
"grpcpp/impl/codegen/method_handler_impl.h",
"grpcpp/impl/codegen/proto_utils.h",
"grpcpp/impl/codegen/rpc_method.h",
+ "grpcpp/impl/codegen/server_callback.h",
"grpcpp/impl/codegen/service_type.h",
"grpcpp/impl/codegen/status.h",
"grpcpp/impl/codegen/stub_options.h",
@@ -579,11 +581,22 @@ void PrintHeaderClientMethodCallbackInterfaces(
"const $Request$* request, $Response$* response, "
"std::function<void(::grpc::Status)>) = 0;\n");
} else if (ClientOnlyStreaming(method)) {
- // TODO(vjpai): Add support for client-side streaming
+ printer->Print(*vars,
+ "virtual void $Method$(::grpc::ClientContext* context, "
+ "$Response$* response, "
+ "::grpc::experimental::ClientWriteReactor< $Request$>* "
+ "reactor) = 0;\n");
} else if (ServerOnlyStreaming(method)) {
- // TODO(vjpai): Add support for server-side streaming
+ printer->Print(*vars,
+ "virtual void $Method$(::grpc::ClientContext* context, "
+ "$Request$* request, "
+ "::grpc::experimental::ClientReadReactor< $Response$>* "
+ "reactor) = 0;\n");
} else if (method->BidiStreaming()) {
- // TODO(vjpai): Add support for bidi streaming
+ printer->Print(*vars,
+ "virtual void $Method$(::grpc::ClientContext* context, "
+ "::grpc::experimental::ClientBidiReactor< "
+ "$Request$,$Response$>* reactor) = 0;\n");
}
}
@@ -630,11 +643,23 @@ void PrintHeaderClientMethodCallback(grpc_generator::Printer* printer,
"const $Request$* request, $Response$* response, "
"std::function<void(::grpc::Status)>) override;\n");
} else if (ClientOnlyStreaming(method)) {
- // TODO(vjpai): Add support for client-side streaming
+ printer->Print(*vars,
+ "void $Method$(::grpc::ClientContext* context, "
+ "$Response$* response, "
+ "::grpc::experimental::ClientWriteReactor< $Request$>* "
+ "reactor) override;\n");
} else if (ServerOnlyStreaming(method)) {
- // TODO(vjpai): Add support for server-side streaming
+ printer->Print(*vars,
+ "void $Method$(::grpc::ClientContext* context, "
+ "$Request$* request, "
+ "::grpc::experimental::ClientReadReactor< $Response$>* "
+ "reactor) override;\n");
+
} else if (method->BidiStreaming()) {
- // TODO(vjpai): Add support for bidi streaming
+ printer->Print(*vars,
+ "void $Method$(::grpc::ClientContext* context, "
+ "::grpc::experimental::ClientBidiReactor< "
+ "$Request$,$Response$>* reactor) override;\n");
}
}
@@ -700,9 +725,9 @@ void PrintHeaderServerMethodSync(grpc_generator::Printer* printer,
printer->Print(method->GetTrailingComments("//").c_str());
}
-// Helper generator. Disabled the sync API for Request and Response, then adds
+// Helper generator. Disables the sync API for Request and Response, then adds
// in an async API for RealRequest and RealResponse types. This is to be used
-// to generate async and raw APIs.
+// to generate async and raw async APIs.
void PrintHeaderServerAsyncMethodsHelper(
grpc_generator::Printer* printer, const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
@@ -829,6 +854,213 @@ void PrintHeaderServerMethodAsync(grpc_generator::Printer* printer,
printer->Print(*vars, "};\n");
}
+// Helper generator. Disables the sync API for Request and Response, then adds
+// in a callback API for RealRequest and RealResponse types. This is to be used
+// to generate callback and raw callback APIs.
+void PrintHeaderServerCallbackMethodsHelper(
+ grpc_generator::Printer* printer, const grpc_generator::Method* method,
+ std::map<grpc::string, grpc::string>* vars) {
+ if (method->NoStreaming()) {
+ printer->Print(
+ *vars,
+ "// disable synchronous version of this method\n"
+ "::grpc::Status $Method$("
+ "::grpc::ServerContext* context, const $Request$* request, "
+ "$Response$* response) override {\n"
+ " abort();\n"
+ " return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
+ "}\n");
+ printer->Print(
+ *vars,
+ "virtual void $Method$("
+ "::grpc::ServerContext* context, const $RealRequest$* request, "
+ "$RealResponse$* response, "
+ "::grpc::experimental::ServerCallbackRpcController* "
+ "controller) { controller->Finish(::grpc::Status("
+ "::grpc::StatusCode::UNIMPLEMENTED, \"\")); }\n");
+ } else if (ClientOnlyStreaming(method)) {
+ printer->Print(
+ *vars,
+ "// disable synchronous version of this method\n"
+ "::grpc::Status $Method$("
+ "::grpc::ServerContext* context, "
+ "::grpc::ServerReader< $Request$>* reader, "
+ "$Response$* response) override {\n"
+ " abort();\n"
+ " return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
+ "}\n");
+ printer->Print(*vars,
+ "virtual ::grpc::experimental::ServerReadReactor< "
+ "$RealRequest$, $RealResponse$>* $Method$() {\n"
+ " return new ::grpc::internal::UnimplementedReadReactor<\n"
+ " $RealRequest$, $RealResponse$>;}\n");
+ } else if (ServerOnlyStreaming(method)) {
+ printer->Print(
+ *vars,
+ "// disable synchronous version of this method\n"
+ "::grpc::Status $Method$("
+ "::grpc::ServerContext* context, const $Request$* request, "
+ "::grpc::ServerWriter< $Response$>* writer) override "
+ "{\n"
+ " abort();\n"
+ " return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
+ "}\n");
+ printer->Print(*vars,
+ "virtual ::grpc::experimental::ServerWriteReactor< "
+ "$RealRequest$, $RealResponse$>* $Method$() {\n"
+ " return new ::grpc::internal::UnimplementedWriteReactor<\n"
+ " $RealRequest$, $RealResponse$>;}\n");
+ } else if (method->BidiStreaming()) {
+ printer->Print(
+ *vars,
+ "// disable synchronous version of this method\n"
+ "::grpc::Status $Method$("
+ "::grpc::ServerContext* context, "
+ "::grpc::ServerReaderWriter< $Response$, $Request$>* stream) "
+ " override {\n"
+ " abort();\n"
+ " return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
+ "}\n");
+ printer->Print(*vars,
+ "virtual ::grpc::experimental::ServerBidiReactor< "
+ "$RealRequest$, $RealResponse$>* $Method$() {\n"
+ " return new ::grpc::internal::UnimplementedBidiReactor<\n"
+ " $RealRequest$, $RealResponse$>;}\n");
+ }
+}
+
+void PrintHeaderServerMethodCallback(
+ grpc_generator::Printer* printer, const grpc_generator::Method* method,
+ std::map<grpc::string, grpc::string>* vars) {
+ (*vars)["Method"] = method->name();
+ // These will be disabled
+ (*vars)["Request"] = method->input_type_name();
+ (*vars)["Response"] = method->output_type_name();
+ // These will be used for the callback API
+ (*vars)["RealRequest"] = method->input_type_name();
+ (*vars)["RealResponse"] = method->output_type_name();
+ printer->Print(*vars, "template <class BaseClass>\n");
+ printer->Print(
+ *vars,
+ "class ExperimentalWithCallbackMethod_$Method$ : public BaseClass {\n");
+ printer->Print(
+ " private:\n"
+ " void BaseClassMustBeDerivedFromService(const Service *service) {}\n");
+ printer->Print(" public:\n");
+ printer->Indent();
+ printer->Print(*vars, "ExperimentalWithCallbackMethod_$Method$() {\n");
+ if (method->NoStreaming()) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackUnaryHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this](::grpc::ServerContext* context,\n"
+ " const $RealRequest$* request,\n"
+ " $RealResponse$* response,\n"
+ " ::grpc::experimental::ServerCallbackRpcController* "
+ "controller) {\n"
+ " return this->$"
+ "Method$(context, request, response, controller);\n"
+ " }));\n");
+ } else if (ClientOnlyStreaming(method)) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackClientStreamingHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this] { return this->$Method$(); }));\n");
+ } else if (ServerOnlyStreaming(method)) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackServerStreamingHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this] { return this->$Method$(); }));\n");
+ } else if (method->BidiStreaming()) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackBidiHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this] { return this->$Method$(); }));\n");
+ }
+ printer->Print(*vars, "}\n");
+ printer->Print(*vars,
+ "~ExperimentalWithCallbackMethod_$Method$() override {\n"
+ " BaseClassMustBeDerivedFromService(this);\n"
+ "}\n");
+ PrintHeaderServerCallbackMethodsHelper(printer, method, vars);
+ printer->Outdent();
+ printer->Print(*vars, "};\n");
+}
+
+void PrintHeaderServerMethodRawCallback(
+ grpc_generator::Printer* printer, const grpc_generator::Method* method,
+ std::map<grpc::string, grpc::string>* vars) {
+ (*vars)["Method"] = method->name();
+ // These will be disabled
+ (*vars)["Request"] = method->input_type_name();
+ (*vars)["Response"] = method->output_type_name();
+ // These will be used for raw API
+ (*vars)["RealRequest"] = "::grpc::ByteBuffer";
+ (*vars)["RealResponse"] = "::grpc::ByteBuffer";
+ printer->Print(*vars, "template <class BaseClass>\n");
+ printer->Print(*vars,
+ "class ExperimentalWithRawCallbackMethod_$Method$ : public "
+ "BaseClass {\n");
+ printer->Print(
+ " private:\n"
+ " void BaseClassMustBeDerivedFromService(const Service *service) {}\n");
+ printer->Print(" public:\n");
+ printer->Indent();
+ printer->Print(*vars, "ExperimentalWithRawCallbackMethod_$Method$() {\n");
+ if (method->NoStreaming()) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackUnaryHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this](::grpc::ServerContext* context,\n"
+ " const $RealRequest$* request,\n"
+ " $RealResponse$* response,\n"
+ " ::grpc::experimental::ServerCallbackRpcController* "
+ "controller) {\n"
+ " this->$"
+ "Method$(context, request, response, controller);\n"
+ " }));\n");
+ } else if (ClientOnlyStreaming(method)) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackClientStreamingHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this] { return this->$Method$(); }));\n");
+ } else if (ServerOnlyStreaming(method)) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackServerStreamingHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this] { return this->$Method$(); }));\n");
+ } else if (method->BidiStreaming()) {
+ printer->Print(
+ *vars,
+ " ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
+ " new ::grpc::internal::CallbackBidiHandler< "
+ "$RealRequest$, $RealResponse$>(\n"
+ " [this] { return this->$Method$(); }));\n");
+ }
+ printer->Print(*vars, "}\n");
+ printer->Print(*vars,
+ "~ExperimentalWithRawCallbackMethod_$Method$() override {\n"
+ " BaseClassMustBeDerivedFromService(this);\n"
+ "}\n");
+ PrintHeaderServerCallbackMethodsHelper(printer, method, vars);
+ printer->Outdent();
+ printer->Print(*vars, "};\n");
+}
+
void PrintHeaderServerMethodStreamedUnary(
grpc_generator::Printer* printer, const grpc_generator::Method* method,
std::map<grpc::string, grpc::string>* vars) {
@@ -1137,7 +1369,7 @@ void PrintHeaderService(grpc_generator::Printer* printer,
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
- (*vars)["method_name"] = service->method(i).get()->name();
+ (*vars)["method_name"] = service->method(i)->name();
printer->Print(*vars, "WithAsyncMethod_$method_name$<");
}
printer->Print("Service");
@@ -1146,6 +1378,24 @@ void PrintHeaderService(grpc_generator::Printer* printer,
}
printer->Print(" AsyncService;\n");
+ // Server side - Callback
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["Idx"] = as_string(i);
+ PrintHeaderServerMethodCallback(printer, service->method(i).get(), vars);
+ }
+
+ printer->Print("typedef ");
+
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["method_name"] = service->method(i)->name();
+ printer->Print(*vars, "ExperimentalWithCallbackMethod_$method_name$<");
+ }
+ printer->Print("Service");
+ for (int i = 0; i < service->method_count(); ++i) {
+ printer->Print(" >");
+ }
+ printer->Print(" ExperimentalCallbackService;\n");
+
// Server side - Generic
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["Idx"] = as_string(i);
@@ -1158,6 +1408,12 @@ void PrintHeaderService(grpc_generator::Printer* printer,
PrintHeaderServerMethodRaw(printer, service->method(i).get(), vars);
}
+ // Server side - Raw Callback
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["Idx"] = as_string(i);
+ PrintHeaderServerMethodRawCallback(printer, service->method(i).get(), vars);
+ }
+
// Server side - Streamed Unary
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["Idx"] = as_string(i);
@@ -1167,7 +1423,7 @@ void PrintHeaderService(grpc_generator::Printer* printer,
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
- (*vars)["method_name"] = service->method(i).get()->name();
+ (*vars)["method_name"] = service->method(i)->name();
if (service->method(i)->NoStreaming()) {
printer->Print(*vars, "WithStreamedUnaryMethod_$method_name$<");
}
@@ -1189,7 +1445,7 @@ void PrintHeaderService(grpc_generator::Printer* printer,
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
- (*vars)["method_name"] = service->method(i).get()->name();
+ (*vars)["method_name"] = service->method(i)->name();
auto method = service->method(i);
if (ServerOnlyStreaming(method.get())) {
printer->Print(*vars, "WithSplitStreamingMethod_$method_name$<");
@@ -1207,7 +1463,7 @@ void PrintHeaderService(grpc_generator::Printer* printer,
// Server side - typedef for controlled both unary and server-side streaming
printer->Print("typedef ");
for (int i = 0; i < service->method_count(); ++i) {
- (*vars)["method_name"] = service->method(i).get()->name();
+ (*vars)["method_name"] = service->method(i)->name();
auto method = service->method(i);
if (ServerOnlyStreaming(method.get())) {
printer->Print(*vars, "WithSplitStreamingMethod_$method_name$<");
@@ -1333,6 +1589,7 @@ grpc::string GetSourceIncludes(grpc_generator::File* file,
"grpcpp/impl/codegen/client_callback.h",
"grpcpp/impl/codegen/method_handler_impl.h",
"grpcpp/impl/codegen/rpc_service_method.h",
+ "grpcpp/impl/codegen/server_callback.h",
"grpcpp/impl/codegen/service_type.h",
"grpcpp/impl/codegen/sync_stream.h"};
std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
@@ -1417,7 +1674,19 @@ void PrintSourceClientMethod(grpc_generator::Printer* printer,
"context, response);\n"
"}\n\n");
- // TODO(vjpai): Add callback version
+ printer->Print(
+ *vars,
+ "void $ns$$Service$::"
+ "Stub::experimental_async::$Method$(::grpc::ClientContext* context, "
+ "$Response$* response, "
+ "::grpc::experimental::ClientWriteReactor< $Request$>* reactor) {\n");
+ printer->Print(*vars,
+ " ::grpc::internal::ClientCallbackWriterFactory< "
+ "$Request$>::Create("
+ "stub_->channel_.get(), "
+ "stub_->rpcmethod_$Method$_, "
+ "context, response, reactor);\n"
+ "}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
@@ -1451,7 +1720,19 @@ void PrintSourceClientMethod(grpc_generator::Printer* printer,
"context, request);\n"
"}\n\n");
- // TODO(vjpai): Add callback version
+ printer->Print(
+ *vars,
+ "void $ns$$Service$::Stub::experimental_async::$Method$(::grpc::"
+ "ClientContext* context, "
+ "$Request$* request, "
+ "::grpc::experimental::ClientReadReactor< $Response$>* reactor) {\n");
+ printer->Print(*vars,
+ " ::grpc::internal::ClientCallbackReaderFactory< "
+ "$Response$>::Create("
+ "stub_->channel_.get(), "
+ "stub_->rpcmethod_$Method$_, "
+ "context, request, reactor);\n"
+ "}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
@@ -1485,7 +1766,19 @@ void PrintSourceClientMethod(grpc_generator::Printer* printer,
"context);\n"
"}\n\n");
- // TODO(vjpai): Add callback version
+ printer->Print(
+ *vars,
+ "void $ns$$Service$::Stub::experimental_async::$Method$(::grpc::"
+ "ClientContext* context, "
+ "::grpc::experimental::ClientBidiReactor< $Request$,$Response$>* "
+ "reactor) {\n");
+ printer->Print(*vars,
+ " ::grpc::internal::ClientCallbackReaderWriterFactory< "
+ "$Request$,$Response$>::Create("
+ "stub_->channel_.get(), "
+ "stub_->rpcmethod_$Method$_, "
+ "context, reactor);\n"
+ "}\n\n");
for (auto async_prefix : async_prefixes) {
(*vars)["AsyncPrefix"] = async_prefix.prefix;
@@ -1577,7 +1870,7 @@ void PrintSourceService(grpc_generator::Printer* printer,
printer->Print(*vars,
"static const char* $prefix$$Service$_method_names[] = {\n");
for (int i = 0; i < service->method_count(); ++i) {
- (*vars)["Method"] = service->method(i).get()->name();
+ (*vars)["Method"] = service->method(i)->name();
printer->Print(*vars, " \"/$Package$$Service$/$Method$\",\n");
}
printer->Print(*vars, "};\n\n");
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index a923ce8e38..59ddbd82f6 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -609,6 +609,42 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor* service) {
out->Print("\n");
}
+void GenerateBindServiceWithBinderMethod(Printer* out,
+ const ServiceDescriptor* service) {
+ out->Print(
+ "/// <summary>Register service method implementations with a service "
+ "binder. Useful when customizing the service binding logic.\n"
+ "/// Note: this method is part of an experimental API that can change or "
+ "be "
+ "removed without any prior notice.</summary>\n");
+ out->Print(
+ "/// <param name=\"serviceBinder\">Service methods will be bound by "
+ "calling <c>AddMethod</c> on this object."
+ "</param>\n");
+ out->Print(
+ "/// <param name=\"serviceImpl\">An object implementing the server-side"
+ " handling logic.</param>\n");
+ out->Print(
+ "public static void BindService(grpc::ServiceBinderBase serviceBinder, "
+ "$implclass$ "
+ "serviceImpl)\n",
+ "implclass", GetServerClassName(service));
+ out->Print("{\n");
+ out->Indent();
+
+ for (int i = 0; i < service->method_count(); i++) {
+ const MethodDescriptor* method = service->method(i);
+ out->Print(
+ "serviceBinder.AddMethod($methodfield$, serviceImpl.$methodname$);\n",
+ "methodfield", GetMethodFieldName(method), "methodname",
+ method->name());
+ }
+
+ out->Outdent();
+ out->Print("}\n");
+ out->Print("\n");
+}
+
void GenerateService(Printer* out, const ServiceDescriptor* service,
bool generate_client, bool generate_server,
bool internal_access) {
@@ -637,6 +673,7 @@ void GenerateService(Printer* out, const ServiceDescriptor* service,
}
if (generate_server) {
GenerateBindServiceMethod(out, service);
+ GenerateBindServiceWithBinderMethod(out, service);
}
out->Outdent();
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index daf1b89b09..ebc412b468 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -34,9 +34,9 @@
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
-#include "src/core/ext/filters/client_channel/method_params.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
@@ -63,6 +63,8 @@
#include "src/core/lib/transport/status_metadata.h"
using grpc_core::internal::ClientChannelMethodParams;
+using grpc_core::internal::ClientChannelMethodParamsTable;
+using grpc_core::internal::ProcessedResolverResult;
using grpc_core::internal::ServerRetryThrottleData;
/* Client channel implementation */
@@ -83,10 +85,6 @@ grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
struct external_connectivity_watcher;
-typedef grpc_core::SliceHashTable<
- grpc_core::RefCountedPtr<ClientChannelMethodParams>>
- MethodParamsTable;
-
typedef struct client_channel_channel_data {
grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
bool started_resolving;
@@ -102,7 +100,7 @@ typedef struct client_channel_channel_data {
/** retry throttle data */
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
/** maps method names to method_parameters structs */
- grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table;
/** incoming resolver result - set by resolver.next() */
grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
@@ -251,66 +249,6 @@ static void start_resolving_locked(channel_data* chand) {
&chand->on_resolver_result_changed);
}
-typedef struct {
- char* server_name;
- grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
-} service_config_parsing_state;
-
-static void parse_retry_throttle_params(
- const grpc_json* field, service_config_parsing_state* parsing_state) {
- if (strcmp(field->key, "retryThrottling") == 0) {
- if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
- if (field->type != GRPC_JSON_OBJECT) return;
- int max_milli_tokens = 0;
- int milli_token_ratio = 0;
- for (grpc_json* sub_field = field->child; sub_field != nullptr;
- sub_field = sub_field->next) {
- if (sub_field->key == nullptr) return;
- if (strcmp(sub_field->key, "maxTokens") == 0) {
- if (max_milli_tokens != 0) return; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return;
- max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
- if (max_milli_tokens == -1) return;
- max_milli_tokens *= 1000;
- } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
- if (milli_token_ratio != 0) return; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return;
- // We support up to 3 decimal digits.
- size_t whole_len = strlen(sub_field->value);
- uint32_t multiplier = 1;
- uint32_t decimal_value = 0;
- const char* decimal_point = strchr(sub_field->value, '.');
- if (decimal_point != nullptr) {
- whole_len = static_cast<size_t>(decimal_point - sub_field->value);
- multiplier = 1000;
- size_t decimal_len = strlen(decimal_point + 1);
- if (decimal_len > 3) decimal_len = 3;
- if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
- &decimal_value)) {
- return;
- }
- uint32_t decimal_multiplier = 1;
- for (size_t i = 0; i < (3 - decimal_len); ++i) {
- decimal_multiplier *= 10;
- }
- decimal_value *= decimal_multiplier;
- }
- uint32_t whole_value;
- if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
- &whole_value)) {
- return;
- }
- milli_token_ratio =
- static_cast<int>((whole_value * multiplier) + decimal_value);
- if (milli_token_ratio <= 0) return;
- }
- }
- parsing_state->retry_throttle_data =
- grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
- parsing_state->server_name, max_milli_tokens, milli_token_ratio);
- }
-}
-
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
static void on_resolver_shutdown_locked(channel_data* chand,
@@ -352,37 +290,6 @@ static void on_resolver_shutdown_locked(channel_data* chand,
GRPC_ERROR_UNREF(error);
}
-// Returns the LB policy name from the resolver result.
-static grpc_core::UniquePtr<char>
-get_lb_policy_name_from_resolver_result_locked(channel_data* chand) {
- // Find LB policy name in channel args.
- const grpc_arg* channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
- const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
- // Special case: If at least one balancer address is present, we use
- // the grpclb policy, regardless of what the resolver actually specified.
- channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses* addresses =
- static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
- if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
- if (lb_policy_name != nullptr &&
- gpr_stricmp(lb_policy_name, "grpclb") != 0) {
- gpr_log(GPR_INFO,
- "resolver requested LB policy %s but provided at least one "
- "balancer address -- forcing use of grpclb LB policy",
- lb_policy_name);
- }
- lb_policy_name = "grpclb";
- }
- }
- // Use pick_first if nothing was specified and we didn't select grpclb
- // above.
- if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
- return grpc_core::UniquePtr<char>(gpr_strdup(lb_policy_name));
-}
-
static void request_reresolution_locked(void* arg, grpc_error* error) {
reresolution_request_args* args =
static_cast<reresolution_request_args*>(arg);
@@ -410,13 +317,14 @@ using TraceStringVector = grpc_core::InlinedVector<char*, 3>;
// *connectivity_error to its initial connectivity state; otherwise,
// leaves them unchanged.
static void create_new_lb_policy_locked(
- channel_data* chand, char* lb_policy_name,
+ channel_data* chand, char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error, TraceStringVector* trace_strings) {
grpc_core::LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = chand->combiner;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.args = chand->resolver_result;
+ lb_policy_args.lb_config = lb_config;
grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
lb_policy_name, lb_policy_args);
@@ -473,44 +381,6 @@ static void create_new_lb_policy_locked(
}
}
-// Returns the service config (as a JSON string) from the resolver result.
-// Also updates state in chand.
-static grpc_core::UniquePtr<char>
-get_service_config_from_resolver_result_locked(channel_data* chand) {
- const grpc_arg* channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
- const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
- if (service_config_json != nullptr) {
- if (grpc_client_channel_trace.enabled()) {
- gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
- chand, service_config_json);
- }
- grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
- grpc_core::ServiceConfig::Create(service_config_json);
- if (service_config != nullptr) {
- if (chand->enable_retries) {
- channel_arg =
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
- GPR_ASSERT(server_uri != nullptr);
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
- GPR_ASSERT(uri->path[0] != '\0');
- service_config_parsing_state parsing_state;
- parsing_state.server_name =
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
- service_config->ParseGlobalParams(parse_retry_throttle_params,
- &parsing_state);
- grpc_uri_destroy(uri);
- chand->retry_throttle_data =
- std::move(parsing_state.retry_throttle_data);
- }
- chand->method_params_table = service_config->CreateMethodConfigTable(
- ClientChannelMethodParams::CreateFromJson);
- }
- }
- return grpc_core::UniquePtr<char>(gpr_strdup(service_config_json));
-}
-
static void maybe_add_trace_message_for_address_changes_locked(
channel_data* chand, TraceStringVector* trace_strings) {
int resolution_contains_addresses = false;
@@ -597,36 +467,47 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
}
+ // Don't override connectivity state if we already have an LB policy.
+ if (chand->lb_policy != nullptr) set_connectivity_state = false;
} else {
+ // Parse the resolver result.
+ ProcessedResolverResult resolver_result(chand->resolver_result,
+ chand->enable_retries);
+ chand->retry_throttle_data = resolver_result.retry_throttle_data();
+ chand->method_params_table = resolver_result.method_params_table();
+ grpc_core::UniquePtr<char> service_config_json =
+ resolver_result.service_config_json();
+ if (service_config_json != nullptr && grpc_client_channel_trace.enabled()) {
+ gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
+ chand, service_config_json.get());
+ }
grpc_core::UniquePtr<char> lb_policy_name =
- get_lb_policy_name_from_resolver_result_locked(chand);
+ resolver_result.lb_policy_name();
+ grpc_json* lb_policy_config = resolver_result.lb_policy_config();
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
- bool lb_policy_name_changed = chand->info_lb_policy_name == nullptr ||
- gpr_stricmp(chand->info_lb_policy_name.get(),
- lb_policy_name.get()) != 0;
+ bool lb_policy_name_changed =
+ chand->info_lb_policy_name == nullptr ||
+ strcmp(chand->info_lb_policy_name.get(), lb_policy_name.get()) != 0;
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
chand, lb_policy_name.get(), chand->lb_policy.get());
}
- chand->lb_policy->UpdateLocked(*chand->resolver_result);
+ chand->lb_policy->UpdateLocked(*chand->resolver_result, lb_policy_config);
// No need to set the channel's connectivity state; the existing
// watch on the LB policy will take care of that.
set_connectivity_state = false;
} else {
// Instantiate new LB policy.
- create_new_lb_policy_locked(chand, lb_policy_name.get(),
+ create_new_lb_policy_locked(chand, lb_policy_name.get(), lb_policy_config,
&connectivity_state, &connectivity_error,
&trace_strings);
}
- // Find service config.
- grpc_core::UniquePtr<char> service_config_json =
- get_service_config_from_resolver_result_locked(chand);
// Note: It's safe to use chand->info_service_config_json here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
@@ -689,12 +570,6 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
} else {
grpc_error* error = GRPC_ERROR_NONE;
grpc_core::LoadBalancingPolicy::PickState pick_state;
- pick_state.initial_metadata = nullptr;
- pick_state.initial_metadata_flags = 0;
- pick_state.on_complete = nullptr;
- memset(&pick_state.subchannel_call_context, 0,
- sizeof(pick_state.subchannel_call_context));
- pick_state.user_data = nullptr;
// Pick must return synchronously, because pick_state.on_complete is null.
GPR_ASSERT(chand->lb_policy->PickLocked(&pick_state, &error));
if (pick_state.connected_subchannel != nullptr) {
@@ -938,12 +813,26 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
// (census filter is on top of this one)
// - add census stats for retries
+namespace {
+struct call_data;
+
// State used for starting a retryable batch on a subchannel call.
// This provides its own grpc_transport_stream_op_batch and other data
// structures needed to populate the ops in the batch.
// We allocate one struct on the arena for each attempt at starting a
// batch on a given subchannel call.
-typedef struct {
+struct subchannel_batch_data {
+ subchannel_batch_data(grpc_call_element* elem, call_data* calld, int refcount,
+ bool set_on_complete);
+ // All dtor code must be added in `destroy`. This is because we may
+ // call closures in `subchannel_batch_data` after they are unrefed by
+ // `batch_data_unref`, and msan would complain about accessing this class
+ // after calling dtor. As a result we cannot call the `dtor` in
+ // `batch_data_unref`.
+ // TODO(soheil): We should try to call the dtor in `batch_data_unref`.
+ ~subchannel_batch_data() { destroy(); }
+ void destroy();
+
gpr_refcount refs;
grpc_call_element* elem;
grpc_subchannel_call* subchannel_call; // Holds a ref.
@@ -952,11 +841,23 @@ typedef struct {
grpc_transport_stream_op_batch batch;
// For intercepting on_complete.
grpc_closure on_complete;
-} subchannel_batch_data;
+};
// Retry state associated with a subchannel call.
// Stored in the parent_data of the subchannel call object.
-typedef struct {
+struct subchannel_call_retry_state {
+ explicit subchannel_call_retry_state(grpc_call_context_element* context)
+ : batch_payload(context),
+ started_send_initial_metadata(false),
+ completed_send_initial_metadata(false),
+ started_send_trailing_metadata(false),
+ completed_send_trailing_metadata(false),
+ started_recv_initial_metadata(false),
+ completed_recv_initial_metadata(false),
+ started_recv_trailing_metadata(false),
+ completed_recv_trailing_metadata(false),
+ retry_dispatched(false) {}
+
// subchannel_batch_data.batch.payload points to this.
grpc_transport_stream_op_batch_payload batch_payload;
// For send_initial_metadata.
@@ -975,7 +876,7 @@ typedef struct {
// For intercepting recv_initial_metadata.
grpc_metadata_batch recv_initial_metadata;
grpc_closure recv_initial_metadata_ready;
- bool trailing_metadata_available;
+ bool trailing_metadata_available = false;
// For intercepting recv_message.
grpc_closure recv_message_ready;
grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
@@ -985,10 +886,10 @@ typedef struct {
grpc_closure recv_trailing_metadata_ready;
// These fields indicate which ops have been started and completed on
// this subchannel call.
- size_t started_send_message_count;
- size_t completed_send_message_count;
- size_t started_recv_message_count;
- size_t completed_recv_message_count;
+ size_t started_send_message_count = 0;
+ size_t completed_send_message_count = 0;
+ size_t started_recv_message_count = 0;
+ size_t completed_recv_message_count = 0;
bool started_send_initial_metadata : 1;
bool completed_send_initial_metadata : 1;
bool started_send_trailing_metadata : 1;
@@ -997,14 +898,18 @@ typedef struct {
bool completed_recv_initial_metadata : 1;
bool started_recv_trailing_metadata : 1;
bool completed_recv_trailing_metadata : 1;
+ subchannel_batch_data* recv_initial_metadata_ready_deferred_batch = nullptr;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
+ subchannel_batch_data* recv_message_ready_deferred_batch = nullptr;
+ grpc_error* recv_message_error = GRPC_ERROR_NONE;
+ subchannel_batch_data* recv_trailing_metadata_internal_batch = nullptr;
// State for callback processing.
+ // NOTE: Do not move this next to the metadata bitfields above. That would
+ // save space but will also result in a data race because compiler will
+ // generate a 2 byte store which overwrites the meta-data fields upon
+ // setting this field.
bool retry_dispatched : 1;
- subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
- grpc_error* recv_initial_metadata_error;
- subchannel_batch_data* recv_message_ready_deferred_batch;
- grpc_error* recv_message_error;
- subchannel_batch_data* recv_trailing_metadata_internal_batch;
-} subchannel_call_retry_state;
+};
// Pending batches stored in call data.
typedef struct {
@@ -1019,7 +924,44 @@ typedef struct {
Handles queueing of stream ops until a call object is ready, waiting
for initial metadata before trying to create a call object,
and handling cancellation gracefully. */
-typedef struct client_channel_call_data {
+struct call_data {
+ call_data(grpc_call_element* elem, const channel_data& chand,
+ const grpc_call_element_args& args)
+ : deadline_state(elem, args.call_stack, args.call_combiner,
+ GPR_LIKELY(chand.deadline_checking_enabled)
+ ? args.deadline
+ : GRPC_MILLIS_INF_FUTURE),
+ path(grpc_slice_ref_internal(args.path)),
+ call_start_time(args.start_time),
+ deadline(args.deadline),
+ arena(args.arena),
+ owning_call(args.call_stack),
+ call_combiner(args.call_combiner),
+ pending_send_initial_metadata(false),
+ pending_send_message(false),
+ pending_send_trailing_metadata(false),
+ enable_retries(chand.enable_retries),
+ retry_committed(false),
+ last_attempt_got_server_pushback(false) {}
+
+ ~call_data() {
+ if (GPR_LIKELY(subchannel_call != nullptr)) {
+ GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call,
+ "client_channel_destroy_call");
+ }
+ grpc_slice_unref_internal(path);
+ GRPC_ERROR_UNREF(cancel_error);
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
+ GPR_ASSERT(pending_batches[i].batch == nullptr);
+ }
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
+ if (pick.subchannel_call_context[i].value != nullptr) {
+ pick.subchannel_call_context[i].destroy(
+ pick.subchannel_call_context[i].value);
+ }
+ }
+ }
+
// State for handling deadlines.
// The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
@@ -1038,24 +980,24 @@ typedef struct client_channel_call_data {
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
- grpc_subchannel_call* subchannel_call;
+ grpc_subchannel_call* subchannel_call = nullptr;
// Set when we get a cancel_stream op.
- grpc_error* cancel_error;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
grpc_core::LoadBalancingPolicy::PickState pick;
grpc_closure pick_closure;
grpc_closure pick_cancel_closure;
- grpc_polling_entity* pollent;
- bool pollent_added_to_interested_parties;
+ grpc_polling_entity* pollent = nullptr;
+ bool pollent_added_to_interested_parties = false;
// Batches are added to this list when received from above.
// They are removed when we are done handling the batch (i.e., when
// either we have invoked all of the batch's callbacks or we have
// passed the batch down to the subchannel call and are not
// intercepting any of its callbacks).
- pending_batch pending_batches[MAX_PENDING_BATCHES];
+ pending_batch pending_batches[MAX_PENDING_BATCHES] = {};
bool pending_send_initial_metadata : 1;
bool pending_send_message : 1;
bool pending_send_trailing_metadata : 1;
@@ -1064,8 +1006,8 @@ typedef struct client_channel_call_data {
bool enable_retries : 1;
bool retry_committed : 1;
bool last_attempt_got_server_pushback : 1;
- int num_attempts_completed;
- size_t bytes_buffered_for_retry;
+ int num_attempts_completed = 0;
+ size_t bytes_buffered_for_retry = 0;
grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
grpc_timer retry_timer;
@@ -1076,12 +1018,12 @@ typedef struct client_channel_call_data {
// until all of these batches have completed.
// Note that we actually only need to track replay batches, but it's
// easier to track all batches with send ops.
- int num_pending_retriable_subchannel_send_batches;
+ int num_pending_retriable_subchannel_send_batches = 0;
// Cached data for retrying send ops.
// send_initial_metadata
- bool seen_send_initial_metadata;
- grpc_linked_mdelem* send_initial_metadata_storage;
+ bool seen_send_initial_metadata = false;
+ grpc_linked_mdelem* send_initial_metadata_storage = nullptr;
grpc_metadata_batch send_initial_metadata;
uint32_t send_initial_metadata_flags;
gpr_atm* peer_string;
@@ -1092,14 +1034,13 @@ typedef struct client_channel_call_data {
// Note: We inline the cache for the first 3 send_message ops and use
// dynamic allocation after that. This number was essentially picked
// at random; it could be changed in the future to tune performance.
- grpc_core::ManualConstructor<
- grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3>>
- send_messages;
+ grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
// send_trailing_metadata
- bool seen_send_trailing_metadata;
- grpc_linked_mdelem* send_trailing_metadata_storage;
+ bool seen_send_trailing_metadata = false;
+ grpc_linked_mdelem* send_trailing_metadata_storage = nullptr;
grpc_metadata_batch send_trailing_metadata;
-} call_data;
+};
+} // namespace
// Forward declarations.
static void retry_commit(grpc_call_element* elem,
@@ -1143,7 +1084,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
new (cache) grpc_core::ByteStreamCache(
std::move(batch->payload->send_message.send_message));
- calld->send_messages->push_back(cache);
+ calld->send_messages.push_back(cache);
}
// Save metadata batch for send_trailing_metadata ops.
if (batch->send_trailing_metadata) {
@@ -1180,7 +1121,7 @@ static void free_cached_send_message(channel_data* chand, call_data* calld,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
chand, calld, idx);
}
- (*calld->send_messages)[idx]->Destroy();
+ calld->send_messages[idx]->Destroy();
}
// Frees cached send_trailing_metadata.
@@ -1650,55 +1591,66 @@ static bool maybe_retry(grpc_call_element* elem,
// subchannel_batch_data
//
-// Creates a subchannel_batch_data object on the call's arena with the
-// specified refcount. If set_on_complete is true, the batch's
-// on_complete callback will be set to point to on_complete();
-// otherwise, the batch's on_complete callback will be null.
-static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
- int refcount,
- bool set_on_complete) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
+namespace {
+subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
+ call_data* calld, int refcount,
+ bool set_on_complete)
+ : elem(elem),
+ subchannel_call(GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call,
+ "batch_data_create")) {
subchannel_call_retry_state* retry_state =
static_cast<subchannel_call_retry_state*>(
grpc_connected_subchannel_call_get_parent_data(
calld->subchannel_call));
- subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
- gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
- batch_data->elem = elem;
- batch_data->subchannel_call =
- GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
- batch_data->batch.payload = &retry_state->batch_payload;
- gpr_ref_init(&batch_data->refs, refcount);
+ batch.payload = &retry_state->batch_payload;
+ gpr_ref_init(&refs, refcount);
if (set_on_complete) {
- GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
+ GRPC_CLOSURE_INIT(&on_complete, ::on_complete, this,
grpc_schedule_on_exec_ctx);
- batch_data->batch.on_complete = &batch_data->on_complete;
+ batch.on_complete = &on_complete;
}
GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
+}
+
+void subchannel_batch_data::destroy() {
+ subchannel_call_retry_state* retry_state =
+ static_cast<subchannel_call_retry_state*>(
+ grpc_connected_subchannel_call_get_parent_data(subchannel_call));
+ if (batch.send_initial_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
+ }
+ if (batch.send_trailing_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
+ }
+ if (batch.recv_initial_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
+ }
+ if (batch.recv_trailing_metadata) {
+ grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
+ }
+ GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "batch_data_unref");
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+}
+} // namespace
+
+// Creates a subchannel_batch_data object on the call's arena with the
+// specified refcount. If set_on_complete is true, the batch's
+// on_complete callback will be set to point to on_complete();
+// otherwise, the batch's on_complete callback will be null.
+static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
+ int refcount,
+ bool set_on_complete) {
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ subchannel_batch_data* batch_data =
+ new (gpr_arena_alloc(calld->arena, sizeof(*batch_data)))
+ subchannel_batch_data(elem, calld, refcount, set_on_complete);
return batch_data;
}
static void batch_data_unref(subchannel_batch_data* batch_data) {
if (gpr_unref(&batch_data->refs)) {
- subchannel_call_retry_state* retry_state =
- static_cast<subchannel_call_retry_state*>(
- grpc_connected_subchannel_call_get_parent_data(
- batch_data->subchannel_call));
- if (batch_data->batch.send_initial_metadata) {
- grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
- }
- if (batch_data->batch.send_trailing_metadata) {
- grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
- }
- if (batch_data->batch.recv_initial_metadata) {
- grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
- }
- if (batch_data->batch.recv_trailing_metadata) {
- grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
- }
- GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
- call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
- GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+ batch_data->destroy();
}
}
@@ -1996,7 +1948,7 @@ static bool pending_batch_is_unstarted(
return true;
}
if (pending->batch->send_message &&
- retry_state->started_send_message_count < calld->send_messages->size()) {
+ retry_state->started_send_message_count < calld->send_messages.size()) {
return true;
}
if (pending->batch->send_trailing_metadata &&
@@ -2152,7 +2104,7 @@ static void add_closures_for_replay_or_pending_send_ops(
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
bool have_pending_send_message_ops =
- retry_state->started_send_message_count < calld->send_messages->size();
+ retry_state->started_send_message_count < calld->send_messages.size();
bool have_pending_send_trailing_metadata_op =
calld->seen_send_trailing_metadata &&
!retry_state->started_send_trailing_metadata;
@@ -2344,7 +2296,7 @@ static void add_retriable_send_message_op(
chand, calld, retry_state->started_send_message_count);
}
grpc_core::ByteStreamCache* cache =
- (*calld->send_messages)[retry_state->started_send_message_count];
+ calld->send_messages[retry_state->started_send_message_count];
++retry_state->started_send_message_count;
retry_state->send_message.Init(cache);
batch_data->batch.send_message = true;
@@ -2476,7 +2428,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
}
// send_message.
// Note that we can only have one send_message op in flight at a time.
- if (retry_state->started_send_message_count < calld->send_messages->size() &&
+ if (retry_state->started_send_message_count < calld->send_messages.size() &&
retry_state->started_send_message_count ==
retry_state->completed_send_message_count &&
!calld->pending_send_message) {
@@ -2497,7 +2449,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
// to start, since we can't send down any more send_message ops after
// send_trailing_metadata.
if (calld->seen_send_trailing_metadata &&
- retry_state->started_send_message_count == calld->send_messages->size() &&
+ retry_state->started_send_message_count == calld->send_messages.size() &&
!retry_state->started_send_trailing_metadata &&
!calld->pending_send_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) {
@@ -2549,7 +2501,7 @@ static void add_subchannel_batches_for_pending_batches(
// send_message ops after send_trailing_metadata.
if (batch->send_trailing_metadata &&
(retry_state->started_send_message_count + batch->send_message <
- calld->send_messages->size() ||
+ calld->send_messages.size() ||
retry_state->started_send_trailing_metadata)) {
continue;
}
@@ -2715,17 +2667,10 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
new_error = grpc_error_add_child(new_error, error);
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
} else {
- grpc_core::channelz::SubchannelNode* channelz_subchannel =
- calld->pick.connected_subchannel->channelz_subchannel();
- if (channelz_subchannel != nullptr) {
- channelz_subchannel->RecordCallStarted();
- }
if (parent_data_size > 0) {
- subchannel_call_retry_state* retry_state =
- static_cast<subchannel_call_retry_state*>(
- grpc_connected_subchannel_call_get_parent_data(
- calld->subchannel_call));
- retry_state->batch_payload.context = calld->pick.subchannel_call_context;
+ new (grpc_connected_subchannel_call_get_parent_data(
+ calld->subchannel_call))
+ subchannel_call_retry_state(calld->pick.subchannel_call_context);
}
pending_batches_resume(elem);
}
@@ -2951,6 +2896,27 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
}
}
+// If the channel is in TRANSIENT_FAILURE and the call is not
+// wait_for_ready=true, fails the call and returns true.
+static bool fail_call_if_in_transient_failure(grpc_call_element* elem) {
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ call_data* calld = static_cast<call_data*>(elem->call_data);
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[0].batch;
+ if (grpc_connectivity_state_check(&chand->state_tracker) ==
+ GRPC_CHANNEL_TRANSIENT_FAILURE &&
+ (batch->payload->send_initial_metadata.send_initial_metadata_flags &
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
+ pending_batches_fail(
+ elem,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "channel is in state TRANSIENT_FAILURE"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+ true /* yield_call_combiner */);
+ return true;
+ }
+ return false;
+}
+
// Invoked once resolver results are available.
static void process_service_config_and_start_lb_pick_locked(
grpc_call_element* elem) {
@@ -2958,6 +2924,9 @@ static void process_service_config_and_start_lb_pick_locked(
// Only get service config data on the first attempt.
if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
apply_service_config_to_call_locked(elem);
+ // Check this after applying service config, since it may have
+ // affected the call's wait_for_ready value.
+ if (fail_call_if_in_transient_failure(elem)) return;
}
// Start LB pick.
grpc_core::LbPicker::StartLocked(elem);
@@ -3127,6 +3096,16 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
// We do not yet have an LB policy, so wait for a resolver result.
if (GPR_UNLIKELY(!chand->started_resolving)) {
start_resolving_locked(chand);
+ } else {
+ // Normally, we want to do this check in
+ // process_service_config_and_start_lb_pick_locked(), so that we
+ // can honor the wait_for_ready setting in the service config.
+ // However, if the channel is in TRANSIENT_FAILURE at this point, that
+ // means that the resolver has returned a failure, so we're not going
+ // to get a service config right away. In that case, we fail the
+ // call now based on the wait_for_ready value passed in from the
+ // application.
+ if (fail_call_if_in_transient_failure(elem)) return;
}
// Create a new waiter, which will delete itself when done.
grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
@@ -3231,21 +3210,8 @@ static void cc_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* cc_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- // Initialize data members.
- calld->path = grpc_slice_ref_internal(args->path);
- calld->call_start_time = args->start_time;
- calld->deadline = args->deadline;
- calld->arena = args->arena;
- calld->owning_call = args->call_stack;
- calld->call_combiner = args->call_combiner;
- if (GPR_LIKELY(chand->deadline_checking_enabled)) {
- grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
- calld->deadline);
- }
- calld->enable_retries = chand->enable_retries;
- calld->send_messages.Init();
+ new (elem->call_data) call_data(elem, *chand, *args);
return GRPC_ERROR_NONE;
}
@@ -3254,34 +3220,12 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* then_schedule_closure) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- if (GPR_LIKELY(chand->deadline_checking_enabled)) {
- grpc_deadline_state_destroy(elem);
- }
- grpc_slice_unref_internal(calld->path);
- calld->retry_throttle_data.reset();
- calld->method_params.reset();
- GRPC_ERROR_UNREF(calld->cancel_error);
if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
then_schedule_closure = nullptr;
- GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
- "client_channel_destroy_call");
- }
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
- GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
- }
- if (GPR_LIKELY(calld->pick.connected_subchannel != nullptr)) {
- calld->pick.connected_subchannel.reset();
- }
- for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
- if (calld->pick.subchannel_call_context[i].value != nullptr) {
- calld->pick.subchannel_call_context[i].destroy(
- calld->pick.subchannel_call_context[i].value);
- }
}
- calld->send_messages.Destroy();
+ calld->~call_data();
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/filters/client_channel/health/health_check_client.cc b/src/core/ext/filters/client_channel/health/health_check_client.cc
index 400d99b07c..2232c57120 100644
--- a/src/core/ext/filters/client_channel/health/health_check_client.cc
+++ b/src/core/ext/filters/client_channel/health/health_check_client.cc
@@ -19,6 +19,7 @@
#include <grpc/support/port_platform.h>
#include <stdint.h>
+#include <stdio.h>
#include "src/core/ext/filters/client_channel/health/health_check_client.h"
@@ -50,8 +51,7 @@ HealthCheckClient::HealthCheckClient(
RefCountedPtr<ConnectedSubchannel> connected_subchannel,
grpc_pollset_set* interested_parties,
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode> channelz_node)
- : InternallyRefCountedWithTracing<HealthCheckClient>(
- &grpc_health_check_client_trace),
+ : InternallyRefCounted<HealthCheckClient>(&grpc_health_check_client_trace),
service_name_(service_name),
connected_subchannel_(std::move(connected_subchannel)),
interested_parties_(interested_parties),
@@ -280,15 +280,13 @@ bool DecodeResponse(grpc_slice_buffer* slice_buffer, grpc_error** error) {
HealthCheckClient::CallState::CallState(
RefCountedPtr<HealthCheckClient> health_check_client,
grpc_pollset_set* interested_parties)
- : InternallyRefCountedWithTracing<CallState>(
- &grpc_health_check_client_trace),
+ : InternallyRefCounted<CallState>(&grpc_health_check_client_trace),
health_check_client_(std::move(health_check_client)),
pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
arena_(gpr_arena_create(health_check_client_->connected_subchannel_
- ->GetInitialCallSizeEstimate(0))) {
- memset(&call_combiner_, 0, sizeof(call_combiner_));
+ ->GetInitialCallSizeEstimate(0))),
+ payload_(context_) {
grpc_call_combiner_init(&call_combiner_);
- memset(context_, 0, sizeof(context_));
gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(0));
}
@@ -298,6 +296,11 @@ HealthCheckClient::CallState::~CallState() {
health_check_client_.get(), this);
}
if (call_ != nullptr) GRPC_SUBCHANNEL_CALL_UNREF(call_, "call_ended");
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) {
+ if (context_[i].destroy != nullptr) {
+ context_[i].destroy(context_[i].value);
+ }
+ }
// Unset the call combiner cancellation closure. This has the
// effect of scheduling the previously set cancellation closure, if
// any, so that it can release any internal references it may be
@@ -345,6 +348,7 @@ void HealthCheckClient::CallState::StartCall() {
}
// Initialize payload and batch.
memset(&batch_, 0, sizeof(batch_));
+ payload_.context = context_;
batch_.payload = &payload_;
// on_complete callback takes ref, handled manually.
Ref(DEBUG_LOCATION, "on_complete").release();
diff --git a/src/core/ext/filters/client_channel/health/health_check_client.h b/src/core/ext/filters/client_channel/health/health_check_client.h
index 7f77348f18..2369b73fea 100644
--- a/src/core/ext/filters/client_channel/health/health_check_client.h
+++ b/src/core/ext/filters/client_channel/health/health_check_client.h
@@ -41,8 +41,7 @@
namespace grpc_core {
-class HealthCheckClient
- : public InternallyRefCountedWithTracing<HealthCheckClient> {
+class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
public:
HealthCheckClient(const char* service_name,
RefCountedPtr<ConnectedSubchannel> connected_subchannel,
@@ -61,7 +60,7 @@ class HealthCheckClient
private:
// Contains a call to the backend and all the data related to the call.
- class CallState : public InternallyRefCountedWithTracing<CallState> {
+ class CallState : public InternallyRefCounted<CallState> {
public:
CallState(RefCountedPtr<HealthCheckClient> health_check_client,
grpc_pollset_set* interested_parties_);
@@ -97,7 +96,7 @@ class HealthCheckClient
gpr_arena* arena_;
grpc_call_combiner call_combiner_;
- grpc_call_context_element context_[GRPC_CONTEXT_COUNT];
+ grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
// The streaming call to the backend. Always non-NULL.
grpc_subchannel_call* call_;
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index bfabc68c66..0716e46818 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -29,7 +29,6 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/gpr/env.h"
@@ -37,6 +36,7 @@
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/uri/uri_parser.h"
typedef struct http_connect_handshaker {
// Base class. Must be first.
diff --git a/src/core/ext/filters/client_channel/http_proxy.cc b/src/core/ext/filters/client_channel/http_proxy.cc
index 26d3f479b7..8951a2920c 100644
--- a/src/core/ext/filters/client_channel/http_proxy.cc
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -29,12 +29,12 @@
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/slice/b64.h"
+#include "src/core/lib/uri/uri_parser.h"
/**
* Parses the 'https_proxy' env var (fallback on 'http_proxy') and returns the
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index e065f45639..b4e803689e 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -27,7 +27,7 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
namespace grpc_core {
LoadBalancingPolicy::LoadBalancingPolicy(const Args& args)
- : InternallyRefCountedWithTracing(&grpc_trace_lb_policy_refcount),
+ : InternallyRefCounted(&grpc_trace_lb_policy_refcount),
combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
client_channel_factory_(args.client_channel_factory),
interested_parties_(grpc_pollset_set_create()),
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 21f80b7b94..7034da6249 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -42,8 +42,7 @@ namespace grpc_core {
///
/// Any I/O done by the LB policy should be done under the pollset_set
/// returned by \a interested_parties().
-class LoadBalancingPolicy
- : public InternallyRefCountedWithTracing<LoadBalancingPolicy> {
+class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
public:
struct Args {
/// The combiner under which all LB policy calls will be run.
@@ -58,44 +57,47 @@ class LoadBalancingPolicy
/// Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_LB_ADDRESSES channel arg.
grpc_channel_args* args = nullptr;
+ /// Load balancing config from the resolver.
+ grpc_json* lb_config = nullptr;
};
/// State used for an LB pick.
struct PickState {
/// Initial metadata associated with the picking call.
- grpc_metadata_batch* initial_metadata;
+ grpc_metadata_batch* initial_metadata = nullptr;
/// Bitmask used for selective cancelling. See
/// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
- uint32_t initial_metadata_flags;
+ uint32_t initial_metadata_flags = 0;
/// Storage for LB token in \a initial_metadata, or nullptr if not used.
grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously.
/// If null, pick will fail if a result is not available synchronously.
- grpc_closure* on_complete;
+ grpc_closure* on_complete = nullptr;
/// Will be set to the selected subchannel, or nullptr on failure or when
/// the LB policy decides to drop the call.
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if
/// needed.
- grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
+ grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {};
/// Upon success, \a *user_data will be set to whatever opaque information
/// may need to be propagated from the LB policy, or nullptr if not needed.
// TODO(roth): As part of revamping our metadata APIs, try to find a
// way to clean this up and C++-ify it.
- void** user_data;
+ void** user_data = nullptr;
/// Next pointer. For internal use by LB policy.
- PickState* next;
+ PickState* next = nullptr;
};
// Not copyable nor movable.
LoadBalancingPolicy(const LoadBalancingPolicy&) = delete;
LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete;
- /// Updates the policy with a new set of \a args from the resolver.
- /// Note that the LB policy gets the set of addresses from the
+ /// Updates the policy with a new set of \a args and a new \a lb_config from
+ /// the resolver. Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_LB_ADDRESSES channel arg.
- virtual void UpdateLocked(const grpc_channel_args& args) GRPC_ABSTRACT;
+ virtual void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) GRPC_ABSTRACT;
/// Finds an appropriate subchannel for a call, based on data in \a pick.
/// \a pick must remain alive until the pick is complete.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index cc259bcdbf..399bb452f4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -37,16 +37,27 @@ static void destroy_channel_elem(grpc_channel_element* elem) {}
namespace {
struct call_data {
+ call_data(const grpc_call_element_args& args) {
+ if (args.context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
+ // Get stats object from context and take a ref.
+ client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
+ args.context[GRPC_GRPCLB_CLIENT_STATS].value)
+ ->Ref();
+ // Record call started.
+ client_stats->AddCallStarted();
+ }
+ }
+
// Stats object to update.
grpc_core::RefCountedPtr<grpc_core::GrpcLbClientStats> client_stats;
// State for intercepting send_initial_metadata.
grpc_closure on_complete_for_send;
grpc_closure* original_on_complete_for_send;
- bool send_initial_metadata_succeeded;
+ bool send_initial_metadata_succeeded = false;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
grpc_closure* original_recv_initial_metadata_ready;
- bool recv_initial_metadata_succeeded;
+ bool recv_initial_metadata_succeeded = false;
};
} // namespace
@@ -70,16 +81,8 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- // Get stats object from context and take a ref.
GPR_ASSERT(args->context != nullptr);
- if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
- calld->client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
- args->context[GRPC_GRPCLB_CLIENT_STATS].value)
- ->Ref();
- // Record call started.
- calld->client_stats->AddCallStarted();
- }
+ new (elem->call_data) call_data(*args);
return GRPC_ERROR_NONE;
}
@@ -97,6 +100,7 @@ static void destroy_call_elem(grpc_call_element* elem,
// TODO(roth): Eliminate this once filter stack is converted to C++.
calld->client_stats.reset();
}
+ calld->~call_data();
}
static void start_transport_stream_op_batch(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 17e0d26875..a46579c7f7 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -123,7 +123,8 @@ class GrpcLb : public LoadBalancingPolicy {
public:
GrpcLb(const grpc_lb_addresses* addresses, const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -170,8 +171,7 @@ class GrpcLb : public LoadBalancingPolicy {
};
/// Contains a call to the LB server and all the data related to the call.
- class BalancerCallState
- : public InternallyRefCountedWithTracing<BalancerCallState> {
+ class BalancerCallState : public InternallyRefCounted<BalancerCallState> {
public:
explicit BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_grpclb_policy);
@@ -498,7 +498,7 @@ grpc_lb_addresses* ProcessServerlist(const grpc_grpclb_serverlist* serverlist) {
GrpcLb::BalancerCallState::BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_grpclb_policy)
- : InternallyRefCountedWithTracing<BalancerCallState>(&grpc_lb_glb_trace),
+ : InternallyRefCounted<BalancerCallState>(&grpc_lb_glb_trace),
grpclb_policy_(std::move(parent_grpclb_policy)) {
GPR_ASSERT(grpclb_policy_ != nullptr);
GPR_ASSERT(!grpclb_policy()->shutting_down_);
@@ -748,7 +748,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
- // Empty payload means the LB call was cancelled.
+ // Null payload means the LB call was cancelled.
if (lb_calld != grpclb_policy->lb_calld_.get() ||
lb_calld->recv_message_payload_ == nullptr) {
lb_calld->Unref(DEBUG_LOCATION, "on_message_received");
@@ -802,54 +802,45 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
gpr_free(ipport);
}
}
- /* update serverlist */
- if (serverlist->num_servers > 0) {
- // Start sending client load report only after we start using the
- // serverlist returned from the current LB call.
- if (lb_calld->client_stats_report_interval_ > 0 &&
- lb_calld->client_stats_ == nullptr) {
- lb_calld->client_stats_.reset(New<GrpcLbClientStats>());
- // TODO(roth): We currently track this ref manually. Once the
- // ClosureRef API is ready, we should pass the RefCountedPtr<> along
- // with the callback.
- auto self = lb_calld->Ref(DEBUG_LOCATION, "client_load_report");
- self.release();
- lb_calld->ScheduleNextClientLoadReportLocked();
- }
- if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_,
- serverlist)) {
- if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[grpclb %p] Incoming server list identical to current, "
- "ignoring.",
- grpclb_policy);
- }
- grpc_grpclb_destroy_serverlist(serverlist);
- } else { /* new serverlist */
- if (grpclb_policy->serverlist_ != nullptr) {
- /* dispose of the old serverlist */
- grpc_grpclb_destroy_serverlist(grpclb_policy->serverlist_);
- } else {
- /* or dispose of the fallback */
- grpc_lb_addresses_destroy(grpclb_policy->fallback_backend_addresses_);
- grpclb_policy->fallback_backend_addresses_ = nullptr;
- if (grpclb_policy->fallback_timer_callback_pending_) {
- grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
- }
- }
- // and update the copy in the GrpcLb instance. This
- // serverlist instance will be destroyed either upon the next
- // update or when the GrpcLb instance is destroyed.
- grpclb_policy->serverlist_ = serverlist;
- grpclb_policy->serverlist_index_ = 0;
- grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
- }
- } else {
+ // Start sending client load report only after we start using the
+ // serverlist returned from the current LB call.
+ if (lb_calld->client_stats_report_interval_ > 0 &&
+ lb_calld->client_stats_ == nullptr) {
+ lb_calld->client_stats_.reset(New<GrpcLbClientStats>());
+ // TODO(roth): We currently track this ref manually. Once the
+ // ClosureRef API is ready, we should pass the RefCountedPtr<> along
+ // with the callback.
+ auto self = lb_calld->Ref(DEBUG_LOCATION, "client_load_report");
+ self.release();
+ lb_calld->ScheduleNextClientLoadReportLocked();
+ }
+ // Check if the serverlist differs from the previous one.
+ if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_, serverlist)) {
if (grpc_lb_glb_trace.enabled()) {
- gpr_log(GPR_INFO, "[grpclb %p] Received empty server list, ignoring.",
+ gpr_log(GPR_INFO,
+ "[grpclb %p] Incoming server list identical to current, "
+ "ignoring.",
grpclb_policy);
}
grpc_grpclb_destroy_serverlist(serverlist);
+ } else { // New serverlist.
+ if (grpclb_policy->serverlist_ != nullptr) {
+ // Dispose of the old serverlist.
+ grpc_grpclb_destroy_serverlist(grpclb_policy->serverlist_);
+ } else {
+ // Dispose of the fallback.
+ grpc_lb_addresses_destroy(grpclb_policy->fallback_backend_addresses_);
+ grpclb_policy->fallback_backend_addresses_ = nullptr;
+ if (grpclb_policy->fallback_timer_callback_pending_) {
+ grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_);
+ }
+ }
+ // Update the serverlist in the GrpcLb instance. This serverlist
+ // instance will be destroyed either upon the next update or when the
+ // GrpcLb instance is destroyed.
+ grpclb_policy->serverlist_ = serverlist;
+ grpclb_policy->serverlist_index_ = 0;
+ grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
}
} else {
// No valid initial response or serverlist found.
@@ -1331,13 +1322,10 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
grpc_channel_args_destroy(lb_channel_args);
}
-void GrpcLb::UpdateLocked(const grpc_channel_args& args) {
+void GrpcLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
ProcessChannelArgsLocked(args);
- // If fallback is configured and the RR policy already exists, update
- // it with the new fallback addresses.
- if (lb_fallback_timeout_ms_ > 0 && rr_policy_ != nullptr) {
- CreateOrUpdateRoundRobinPolicyLocked();
- }
+ // Update the existing RR policy.
+ if (rr_policy_ != nullptr) CreateOrUpdateRoundRobinPolicyLocked();
// Start watching the LB channel connectivity for connection, if not
// already doing so.
if (!watching_lb_channel_) {
@@ -1585,7 +1573,7 @@ void GrpcLb::AddPendingPick(PendingPick* pp) {
bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
grpc_error** error) {
// Check for drops if we are not using fallback backend addresses.
- if (serverlist_ != nullptr) {
+ if (serverlist_ != nullptr && serverlist_->num_servers > 0) {
// Look at the index into the serverlist to see if we should drop this call.
grpc_grpclb_server* server = serverlist_->servers[serverlist_index_++];
if (serverlist_index_ == serverlist_->num_servers) {
@@ -1683,7 +1671,6 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
grpc_lb_addresses* addresses;
bool is_backend_from_grpclb_load_balancer = false;
if (serverlist_ != nullptr) {
- GPR_ASSERT(serverlist_->num_servers > 0);
addresses = ProcessServerlist(serverlist_);
is_backend_from_grpclb_load_balancer = true;
} else {
@@ -1730,7 +1717,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get());
}
- rr_policy_->UpdateLocked(*args);
+ rr_policy_->UpdateLocked(*args, nullptr);
} else {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index eb494486b9..d1a05f1255 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -46,7 +46,8 @@ class PickFirst : public LoadBalancingPolicy {
public:
explicit PickFirst(const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -159,7 +160,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p created.", this);
}
- UpdateLocked(*args.args);
+ UpdateLocked(*args.args, args.lb_config);
grpc_subchannel_index_ref();
}
@@ -333,7 +334,8 @@ void PickFirst::UpdateChildRefsLocked() {
child_subchannels_ = std::move(cs);
}
-void PickFirst::UpdateLocked(const grpc_channel_args& args) {
+void PickFirst::UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) {
AutoChildRefsUpdater guard(this);
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
@@ -378,6 +380,31 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
selected_ = nullptr;
return;
}
+ // If one of the subchannels in the new list is already in state
+ // READY, then select it immediately. This can happen when the
+ // currently selected subchannel is also present in the update. It
+ // can also happen if one of the subchannels in the update is already
+ // in the subchannel index because it's in use by another channel.
+ for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
+ PickFirstSubchannelData* sd = subchannel_list->subchannel(i);
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_connectivity_state state = sd->CheckConnectivityStateLocked(&error);
+ GRPC_ERROR_UNREF(error);
+ if (state == GRPC_CHANNEL_READY) {
+ subchannel_list_ = std::move(subchannel_list);
+ sd->ProcessUnselectedReadyLocked();
+ sd->StartConnectivityWatchLocked();
+ // If there was a previously pending update (which may or may
+ // not have contained the currently selected subchannel), drop
+ // it, so that it doesn't override what we've done here.
+ latest_pending_subchannel_list_.reset();
+ // Make sure that subsequent calls to ExitIdleLocked() don't cause
+ // us to start watching a subchannel other than the one we've
+ // selected.
+ started_picking_ = true;
+ return;
+ }
+ }
if (selected_ == nullptr) {
// We don't yet have a selected subchannel, so replace the current
// subchannel list immediately.
@@ -385,46 +412,14 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (started_picking_) {
- subchannel_list_->subchannel(0)
- ->CheckConnectivityStateAndStartWatchingLocked();
+ // Note: No need to use CheckConnectivityStateAndStartWatchingLocked()
+ // here, since we've already checked the initial connectivity
+ // state of all subchannels above.
+ subchannel_list_->subchannel(0)->StartConnectivityWatchLocked();
}
} else {
- // We do have a selected subchannel.
- // Check if it's present in the new list. If so, we're done.
- for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
- PickFirstSubchannelData* sd = subchannel_list->subchannel(i);
- if (sd->subchannel() == selected_->subchannel()) {
- // The currently selected subchannel is in the update: we are done.
- if (grpc_lb_pick_first_trace.enabled()) {
- gpr_log(GPR_INFO,
- "Pick First %p found already selected subchannel %p "
- "at update index %" PRIuPTR " of %" PRIuPTR "; update done",
- this, selected_->subchannel(), i,
- subchannel_list->num_subchannels());
- }
- // Make sure it's in state READY. It might not be if we grabbed
- // the combiner while a connectivity state notification
- // informing us otherwise is pending.
- // Note that CheckConnectivityStateLocked() also takes a ref to
- // the connected subchannel.
- grpc_error* error = GRPC_ERROR_NONE;
- if (sd->CheckConnectivityStateLocked(&error) == GRPC_CHANNEL_READY) {
- selected_ = sd;
- subchannel_list_ = std::move(subchannel_list);
- sd->StartConnectivityWatchLocked();
- // If there was a previously pending update (which may or may
- // not have contained the currently selected subchannel), drop
- // it, so that it doesn't override what we've done here.
- latest_pending_subchannel_list_.reset();
- return;
- }
- GRPC_ERROR_UNREF(error);
- }
- }
- // Not keeping the previous selected subchannel, so set the latest
- // pending subchannel list to the new subchannel list. We will wait
- // for it to report READY before swapping it into the current
- // subchannel list.
+ // We do have a selected subchannel, so keep using it until one of
+ // the subchannels in the new list reports READY.
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO,
@@ -438,8 +433,11 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// If we've started picking, start trying to connect to the first
// subchannel in the new list.
if (started_picking_) {
+ // Note: No need to use CheckConnectivityStateAndStartWatchingLocked()
+ // here, since we've already checked the initial connectivity
+ // state of all subchannels above.
latest_pending_subchannel_list_->subchannel(0)
- ->CheckConnectivityStateAndStartWatchingLocked();
+ ->StartConnectivityWatchLocked();
}
}
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index e9ed85cf66..2a16975131 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -57,7 +57,8 @@ class RoundRobin : public LoadBalancingPolicy {
public:
explicit RoundRobin(const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -232,7 +233,7 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"round_robin");
- UpdateLocked(*args.args);
+ UpdateLocked(*args.args, args.lb_config);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this,
subchannel_list_->num_subchannels());
@@ -664,7 +665,8 @@ void RoundRobin::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
notify);
}
-void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
+void RoundRobin::UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) {
const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES);
AutoChildRefsUpdater guard(this);
if (GPR_UNLIKELY(arg == nullptr || arg->type != GRPC_ARG_POINTER)) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 4ec9e935ed..f31401502c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -186,8 +186,7 @@ class SubchannelData {
// A list of subchannels.
template <typename SubchannelListType, typename SubchannelDataType>
-class SubchannelList
- : public InternallyRefCountedWithTracing<SubchannelListType> {
+class SubchannelList : public InternallyRefCounted<SubchannelListType> {
public:
typedef InlinedVector<SubchannelDataType, 10> SubchannelVector;
@@ -226,8 +225,7 @@ class SubchannelList
// Note: Caller must ensure that this is invoked inside of the combiner.
void Orphan() override {
ShutdownLocked();
- InternallyRefCountedWithTracing<SubchannelListType>::Unref(DEBUG_LOCATION,
- "shutdown");
+ InternallyRefCounted<SubchannelListType>::Unref(DEBUG_LOCATION, "shutdown");
}
GRPC_ABSTRACT_BASE_CLASS
@@ -493,7 +491,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
const grpc_lb_addresses* addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args)
- : InternallyRefCountedWithTracing<SubchannelListType>(tracer),
+ : InternallyRefCounted<SubchannelListType>(tracer),
policy_(policy),
tracer_(tracer),
combiner_(GRPC_COMBINER_REF(combiner, "subchannel_list")) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
index 7fb4cbdcd2..faedc0a919 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
@@ -26,30 +26,26 @@
/// channel that uses pick_first to select from the list of balancer
/// addresses.
///
-/// The first time the policy gets a request for a pick, a ping, or to exit
-/// the idle state, \a StartPickingLocked() is called. This method is
-/// responsible for instantiating the internal *streaming* call to the LB
-/// server (whichever address pick_first chose). The call will be complete
-/// when either the balancer sends status or when we cancel the call (e.g.,
-/// because we are shutting down). In needed, we retry the call. If we
-/// received at least one valid message from the server, a new call attempt
-/// will be made immediately; otherwise, we apply back-off delays between
-/// attempts.
+/// The first time the xDS policy gets a request for a pick or to exit the idle
+/// state, \a StartPickingLocked() is called. This method is responsible for
+/// instantiating the internal *streaming* call to the LB server (whichever
+/// address pick_first chose). The call will be complete when either the
+/// balancer sends status or when we cancel the call (e.g., because we are
+/// shutting down). In needed, we retry the call. If we received at least one
+/// valid message from the server, a new call attempt will be made immediately;
+/// otherwise, we apply back-off delays between attempts.
///
-/// We maintain an internal round_robin policy instance for distributing
+/// We maintain an internal child policy (round_robin) instance for distributing
/// requests across backends. Whenever we receive a new serverlist from
-/// the balancer, we update the round_robin policy with the new list of
-/// addresses. If we cannot communicate with the balancer on startup,
-/// however, we may enter fallback mode, in which case we will populate
-/// the RR policy's addresses from the backend addresses returned by the
-/// resolver.
+/// the balancer, we update the child policy with the new list of
+/// addresses.
///
-/// Once an RR policy instance is in place (and getting updated as described),
-/// calls for a pick, a ping, or a cancellation will be serviced right
-/// away by forwarding them to the RR instance. Any time there's no RR
-/// policy available (i.e., right after the creation of the gRPCLB policy),
-/// pick and ping requests are added to a list of pending picks and pings
-/// to be flushed and serviced when the RR policy instance becomes available.
+/// Once a child policy instance is in place (and getting updated as
+/// described), calls for a pick, or a cancellation will be serviced right away
+/// by forwarding them to the child policy instance. Any time there's no child
+/// policy available (i.e., right after the creation of the xDS policy), pick
+/// requests are added to a list of pending picks to be flushed and serviced
+/// when the child policy instance becomes available.
///
/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
/// high level design and details.
@@ -122,7 +118,8 @@ class XdsLb : public LoadBalancingPolicy {
public:
XdsLb(const grpc_lb_addresses* addresses, const Args& args);
- void UpdateLocked(const grpc_channel_args& args) override;
+ void UpdateLocked(const grpc_channel_args& args,
+ grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -141,10 +138,10 @@ class XdsLb : public LoadBalancingPolicy {
private:
/// Linked list of pending pick requests. It stores all information needed to
- /// eventually call (Round Robin's) pick() on them. They mainly stay pending
- /// waiting for the RR policy to be created.
+ /// eventually call pick() on them. They mainly stay pending waiting for the
+ /// child policy to be created.
///
- /// Note that when a pick is sent to the RR policy, we inject our own
+ /// Note that when a pick is sent to the child policy, we inject our own
/// on_complete callback, so that we can intercept the result before
/// invoking the original on_complete callback. This allows us to set the
/// LB token metadata and add client_stats to the call context.
@@ -169,8 +166,7 @@ class XdsLb : public LoadBalancingPolicy {
};
/// Contains a call to the LB server and all the data related to the call.
- class BalancerCallState
- : public InternallyRefCountedWithTracing<BalancerCallState> {
+ class BalancerCallState : public InternallyRefCounted<BalancerCallState> {
public:
explicit BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_xdslb_policy);
@@ -202,7 +198,6 @@ class XdsLb : public LoadBalancingPolicy {
static bool LoadReportCountersAreZero(xds_grpclb_request* request);
static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error);
- static void ClientLoadReportDoneLocked(void* arg, grpc_error* error);
static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error);
static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error);
@@ -266,18 +261,18 @@ class XdsLb : public LoadBalancingPolicy {
void AddPendingPick(PendingPick* pp);
static void OnPendingPickComplete(void* arg, grpc_error* error);
- // Methods for dealing with the RR policy.
- void CreateOrUpdateRoundRobinPolicyLocked();
- grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
- void CreateRoundRobinPolicyLocked(const Args& args);
- bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
- grpc_error** error);
- void UpdateConnectivityStateFromRoundRobinPolicyLocked(
- grpc_error* rr_state_error);
- static void OnRoundRobinConnectivityChangedLocked(void* arg,
- grpc_error* error);
- static void OnRoundRobinRequestReresolutionLocked(void* arg,
- grpc_error* error);
+ // Methods for dealing with the child policy.
+ void CreateOrUpdateChildPolicyLocked();
+ grpc_channel_args* CreateChildPolicyArgsLocked();
+ void CreateChildPolicyLocked(const Args& args);
+ bool PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
+ grpc_error** error);
+ void UpdateConnectivityStateFromChildPolicyLocked(
+ grpc_error* child_state_error);
+ static void OnChildPolicyConnectivityChangedLocked(void* arg,
+ grpc_error* error);
+ static void OnChildPolicyRequestReresolutionLocked(void* arg,
+ grpc_error* error);
// Who the client is trying to communicate with.
const char* server_name_ = nullptr;
@@ -319,10 +314,6 @@ class XdsLb : public LoadBalancingPolicy {
// The deserialized response from the balancer. May be nullptr until one
// such response has arrived.
xds_grpclb_serverlist* serverlist_ = nullptr;
- // Index into serverlist for next pick.
- // If the server at this index is a drop, we return a drop.
- // Otherwise, we delegate to the RR policy.
- size_t serverlist_index_ = 0;
// Timeout in milliseconds for before using fallback backend addresses.
// 0 means not using fallback.
@@ -334,14 +325,14 @@ class XdsLb : public LoadBalancingPolicy {
grpc_timer lb_fallback_timer_;
grpc_closure lb_on_fallback_;
- // Pending picks that are waiting on the RR policy's connectivity.
+ // Pending picks that are waiting on the xDS policy's connectivity.
PendingPick* pending_picks_ = nullptr;
- // The RR policy to use for the backends.
- OrphanablePtr<LoadBalancingPolicy> rr_policy_;
- grpc_connectivity_state rr_connectivity_state_;
- grpc_closure on_rr_connectivity_changed_;
- grpc_closure on_rr_request_reresolution_;
+ // The policy to use for the backends.
+ OrphanablePtr<LoadBalancingPolicy> child_policy_;
+ grpc_connectivity_state child_connectivity_state_;
+ grpc_closure on_child_connectivity_changed_;
+ grpc_closure on_child_request_reresolution_;
};
//
@@ -448,7 +439,7 @@ grpc_lb_addresses* ProcessServerlist(const xds_grpclb_serverlist* serverlist) {
grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_valid, &lb_token_vtable);
/* second pass: actually populate the addresses and LB tokens (aka user data
- * to the outside world) to be read by the RR policy during its creation.
+ * to the outside world) to be read by the child policy during its creation.
* Given that the validity tests are very cheap, they are performed again
* instead of marking the valid ones during the first pass, as this would
* incurr in an allocation due to the arbitrary number of server */
@@ -496,7 +487,7 @@ grpc_lb_addresses* ProcessServerlist(const xds_grpclb_serverlist* serverlist) {
XdsLb::BalancerCallState::BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_xdslb_policy)
- : InternallyRefCountedWithTracing<BalancerCallState>(&grpc_lb_xds_trace),
+ : InternallyRefCounted<BalancerCallState>(&grpc_lb_xds_trace),
xdslb_policy_(std::move(parent_xdslb_policy)) {
GPR_ASSERT(xdslb_policy_ != nullptr);
GPR_ASSERT(!xdslb_policy()->shutting_down_);
@@ -675,6 +666,7 @@ bool XdsLb::BalancerCallState::LoadReportCountersAreZero(
(drop_entries == nullptr || drop_entries->empty());
}
+// TODO(vpowar): Use LRS to send the client Load Report.
void XdsLb::BalancerCallState::SendClientLoadReportLocked() {
// Construct message payload.
GPR_ASSERT(send_message_payload_ == nullptr);
@@ -692,38 +684,8 @@ void XdsLb::BalancerCallState::SendClientLoadReportLocked() {
} else {
last_client_load_report_counters_were_zero_ = false;
}
- grpc_slice request_payload_slice = xds_grpclb_request_encode(request);
- send_message_payload_ =
- grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- grpc_slice_unref_internal(request_payload_slice);
+ // TODO(vpowar): Send the report on LRS stream.
xds_grpclb_request_destroy(request);
- // Send the report.
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = send_message_payload_;
- GRPC_CLOSURE_INIT(&client_load_report_closure_, ClientLoadReportDoneLocked,
- this, grpc_combiner_scheduler(xdslb_policy()->combiner()));
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lb_call_, &op, 1, &client_load_report_closure_);
- if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
- gpr_log(GPR_ERROR, "[xdslb %p] call_error=%d", xdslb_policy_.get(),
- call_error);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- }
-}
-
-void XdsLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg,
- grpc_error* error) {
- BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
- XdsLb* xdslb_policy = lb_calld->xdslb_policy();
- grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
- lb_calld->send_message_payload_ = nullptr;
- if (error != GRPC_ERROR_NONE || lb_calld != xdslb_policy->lb_calld_.get()) {
- lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
- return;
- }
- lb_calld->ScheduleNextClientLoadReportLocked();
}
void XdsLb::BalancerCallState::OnInitialRequestSentLocked(void* arg,
@@ -837,8 +799,7 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// serverlist instance will be destroyed either upon the next
// update or when the XdsLb instance is destroyed.
xdslb_policy->serverlist_ = serverlist;
- xdslb_policy->serverlist_index_ = 0;
- xdslb_policy->CreateOrUpdateRoundRobinPolicyLocked();
+ xdslb_policy->CreateOrUpdateChildPolicyLocked();
}
} else {
if (grpc_lb_xds_trace.enabled()) {
@@ -871,7 +832,7 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
&lb_calld->lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else {
- lb_calld->Unref(DEBUG_LOCATION, "on_message_received+grpclb_shutdown");
+ lb_calld->Unref(DEBUG_LOCATION, "on_message_received+xds_shutdown");
}
}
@@ -949,7 +910,7 @@ grpc_lb_addresses* ExtractBalancerAddresses(
* - \a addresses: corresponding to the balancers.
* - \a response_generator: in order to propagate updates from the resolver
* above the grpclb policy.
- * - \a args: other args inherited from the grpclb policy. */
+ * - \a args: other args inherited from the xds policy. */
grpc_channel_args* BuildBalancerChannelArgs(
const grpc_lb_addresses* addresses,
FakeResolverResponseGenerator* response_generator,
@@ -971,10 +932,10 @@ grpc_channel_args* BuildBalancerChannelArgs(
// resolver will have is_balancer=false, whereas our own addresses have
// is_balancer=true. We need the LB channel to return addresses with
// is_balancer=false so that it does not wind up recursively using the
- // grpclb LB policy, as per the special case logic in client_channel.c.
+ // xds LB policy, as per the special case logic in client_channel.c.
GRPC_ARG_LB_ADDRESSES,
// The fake resolver response generator, because we are replacing it
- // with the one from the grpclb policy, used to propagate updates to
+ // with the one from the xds policy, used to propagate updates to
// the LB channel.
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
// The LB channel should use the authority indicated by the target
@@ -996,7 +957,7 @@ grpc_channel_args* BuildBalancerChannelArgs(
// address updates into the LB channel.
grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
response_generator),
- // A channel arg indicating the target is a grpclb load balancer.
+ // A channel arg indicating the target is a xds load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER), 1),
// A channel arg indicating this is an internal channels, aka it is
@@ -1019,6 +980,7 @@ grpc_channel_args* BuildBalancerChannelArgs(
// ctor and dtor
//
+// TODO(vishalpowar): Use lb_config in args to configure LB policy.
XdsLb::XdsLb(const grpc_lb_addresses* addresses,
const LoadBalancingPolicy::Args& args)
: LoadBalancingPolicy(args),
@@ -1036,11 +998,11 @@ XdsLb::XdsLb(const grpc_lb_addresses* addresses,
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&XdsLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
- GRPC_CLOSURE_INIT(&on_rr_connectivity_changed_,
- &XdsLb::OnRoundRobinConnectivityChangedLocked, this,
+ GRPC_CLOSURE_INIT(&on_child_connectivity_changed_,
+ &XdsLb::OnChildPolicyConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
- GRPC_CLOSURE_INIT(&on_rr_request_reresolution_,
- &XdsLb::OnRoundRobinRequestReresolutionLocked, this,
+ GRPC_CLOSURE_INIT(&on_child_request_reresolution_,
+ &XdsLb::OnChildPolicyRequestReresolutionLocked, this,
grpc_combiner_scheduler(args.combiner));
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, "xds");
// Record server name.
@@ -1092,7 +1054,7 @@ void XdsLb::ShutdownLocked() {
if (fallback_timer_callback_pending_) {
grpc_timer_cancel(&lb_fallback_timer_);
}
- rr_policy_.reset();
+ child_policy_.reset();
TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_CANCELLED);
// We destroy the LB channel here instead of in our destructor because
// destroying the channel triggers a last callback to
@@ -1105,7 +1067,7 @@ void XdsLb::ShutdownLocked() {
gpr_mu_unlock(&lb_channel_mu_);
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_REF(error), "grpclb_shutdown");
+ GRPC_ERROR_REF(error), "xds_shutdown");
// Clear pending picks.
PendingPick* pp;
while ((pp = pending_picks_) != nullptr) {
@@ -1138,13 +1100,13 @@ void XdsLb::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
// Cancel a specific pending pick.
//
-// A grpclb pick progresses as follows:
-// - If there's a Round Robin policy (rr_policy_) available, it'll be
-// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From
-// that point onwards, it'll be RR's responsibility. For cancellations, that
-// implies the pick needs also be cancelled by the RR instance.
-// - Otherwise, without an RR instance, picks stay pending at this policy's
-// level (grpclb), inside the pending_picks_ list. To cancel these,
+// A pick progresses as follows:
+// - If there's a child policy available, it'll be handed over to child policy
+// (in CreateChildPolicyLocked()). From that point onwards, it'll be the
+// child policy's responsibility. For cancellations, that implies the pick
+// needs to be also cancelled by the child policy instance.
+// - Otherwise, without a child policy instance, picks stay pending at this
+// policy's level (xds), inside the pending_picks_ list. To cancel these,
// we invoke the completion closure and set the pick's connected
// subchannel to nullptr right here.
void XdsLb::CancelPickLocked(PickState* pick, grpc_error* error) {
@@ -1164,21 +1126,21 @@ void XdsLb::CancelPickLocked(PickState* pick, grpc_error* error) {
}
pp = next;
}
- if (rr_policy_ != nullptr) {
- rr_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error));
+ if (child_policy_ != nullptr) {
+ child_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
// Cancel all pending picks.
//
-// A grpclb pick progresses as follows:
-// - If there's a Round Robin policy (rr_policy_) available, it'll be
-// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From
-// that point onwards, it'll be RR's responsibility. For cancellations, that
-// implies the pick needs also be cancelled by the RR instance.
-// - Otherwise, without an RR instance, picks stay pending at this policy's
-// level (grpclb), inside the pending_picks_ list. To cancel these,
+// A pick progresses as follows:
+// - If there's a child policy available, it'll be handed over to child policy
+// (in CreateChildPolicyLocked()). From that point onwards, it'll be the
+// child policy's responsibility. For cancellations, that implies the pick
+// needs to be also cancelled by the child policy instance.
+// - Otherwise, without a child policy instance, picks stay pending at this
+// policy's level (xds), inside the pending_picks_ list. To cancel these,
// we invoke the completion closure and set the pick's connected
// subchannel to nullptr right here.
void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
@@ -1200,10 +1162,10 @@ void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
}
pp = next;
}
- if (rr_policy_ != nullptr) {
- rr_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
- initial_metadata_flags_eq,
- GRPC_ERROR_REF(error));
+ if (child_policy_ != nullptr) {
+ child_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
+ initial_metadata_flags_eq,
+ GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
@@ -1218,22 +1180,21 @@ void XdsLb::ResetBackoffLocked() {
if (lb_channel_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_channel_);
}
- if (rr_policy_ != nullptr) {
- rr_policy_->ResetBackoffLocked();
+ if (child_policy_ != nullptr) {
+ child_policy_->ResetBackoffLocked();
}
}
bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
PendingPick* pp = PendingPickCreate(pick);
bool pick_done = false;
- if (rr_policy_ != nullptr) {
+ if (child_policy_ != nullptr) {
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] about to PICK from RR %p", this,
- rr_policy_.get());
+ gpr_log(GPR_INFO, "[xdslb %p] about to PICK from policy %p", this,
+ child_policy_.get());
}
- pick_done =
- PickFromRoundRobinPolicyLocked(false /* force_async */, pp, error);
- } else { // rr_policy_ == NULL
+ pick_done = PickFromChildPolicyLocked(false /* force_async */, pp, error);
+ } else { // child_policy_ == NULL
if (pick->on_complete == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No pick result available but synchronous result required.");
@@ -1241,7 +1202,7 @@ bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
} else {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
- "[xdslb %p] No RR policy. Adding to grpclb's pending picks",
+ "[xdslb %p] No child policy. Adding to xds's pending picks",
this);
}
AddPendingPick(pp);
@@ -1256,8 +1217,8 @@ bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
- // delegate to the RoundRobin to fill the children subchannels.
- rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
+ // delegate to the child_policy_ to fill the children subchannels.
+ child_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
MutexLock lock(&lb_channel_mu_);
if (lb_channel_ != nullptr) {
grpc_core::channelz::ChannelNode* channel_node =
@@ -1324,13 +1285,15 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
grpc_channel_args_destroy(lb_channel_args);
}
-void XdsLb::UpdateLocked(const grpc_channel_args& args) {
+// TODO(vishalpowar): Use lb_config to configure LB policy.
+void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
ProcessChannelArgsLocked(args);
- // If fallback is configured and the RR policy already exists, update
- // it with the new fallback addresses.
- if (lb_fallback_timeout_ms_ > 0 && rr_policy_ != nullptr) {
- CreateOrUpdateRoundRobinPolicyLocked();
- }
+ // Update the existing child policy.
+ // Note: We have disabled fallback mode in the code, so this child policy must
+ // have been created from a serverlist.
+ // TODO(vpowar): Handle the fallback_address changes when we add support for
+ // fallback in xDS.
+ if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked();
// Start watching the LB channel connectivity for connection, if not
// already doing so.
if (!watching_lb_channel_) {
@@ -1398,11 +1361,10 @@ void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
if (xdslb_policy->serverlist_ == nullptr && !xdslb_policy->shutting_down_ &&
error == GRPC_ERROR_NONE) {
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] Falling back to use backends from resolver",
+ gpr_log(GPR_INFO,
+ "[xdslb %p] Fallback timer fired. Not using fallback backends",
xdslb_policy);
}
- GPR_ASSERT(xdslb_policy->fallback_backend_addresses_ != nullptr);
- xdslb_policy->CreateOrUpdateRoundRobinPolicyLocked();
}
xdslb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
}
@@ -1452,8 +1414,8 @@ void XdsLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_) goto done;
// Re-initialize the lb_call. This should also take care of updating the
- // embedded RR policy. Note that the current RR policy, if any, will stay in
- // effect until an update from the new lb_call is received.
+ // child policy. Note that the current child policy, if any, will
+ // stay in effect until an update from the new lb_call is received.
switch (xdslb_policy->lb_channel_connectivity_) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
@@ -1512,8 +1474,8 @@ void DestroyClientStats(void* arg) {
}
void XdsLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
- /* if connected_subchannel is nullptr, no pick has been made by the RR
- * policy (e.g., all addresses failed to connect). There won't be any
+ /* if connected_subchannel is nullptr, no pick has been made by the
+ * child policy (e.g., all addresses failed to connect). There won't be any
* user_data/token available */
if (pp->pick->connected_subchannel != nullptr) {
if (GPR_LIKELY(!GRPC_MDISNULL(pp->lb_token))) {
@@ -1539,8 +1501,8 @@ void XdsLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
}
/* The \a on_complete closure passed as part of the pick requires keeping a
- * reference to its associated round robin instance. We wrap this closure in
- * order to unref the round robin instance upon its invocation */
+ * reference to its associated child policy instance. We wrap this closure in
+ * order to unref the child policy instance upon its invocation */
void XdsLb::OnPendingPickComplete(void* arg, grpc_error* error) {
PendingPick* pp = static_cast<PendingPick*>(arg);
PendingPickSetMetadataAndContext(pp);
@@ -1565,50 +1527,24 @@ void XdsLb::AddPendingPick(PendingPick* pp) {
}
//
-// code for interacting with the RR policy
+// code for interacting with the child policy
//
-// Performs a pick over \a rr_policy_. Given that a pick can return
+// Performs a pick over \a child_policy_. Given that a pick can return
// immediately (ignoring its completion callback), we need to perform the
// cleanups this callback would otherwise be responsible for.
// If \a force_async is true, then we will manually schedule the
// completion callback even if the pick is available immediately.
-bool XdsLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
- grpc_error** error) {
- // Check for drops if we are not using fallback backend addresses.
- if (serverlist_ != nullptr) {
- // Look at the index into the serverlist to see if we should drop this call.
- xds_grpclb_server* server = serverlist_->servers[serverlist_index_++];
- if (serverlist_index_ == serverlist_->num_servers) {
- serverlist_index_ = 0; // Wrap-around.
- }
- if (server->drop) {
- // Update client load reporting stats to indicate the number of
- // dropped calls. Note that we have to do this here instead of in
- // the client_load_reporting filter, because we do not create a
- // subchannel call (and therefore no client_load_reporting filter)
- // for dropped calls.
- if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
- lb_calld_->client_stats()->AddCallDroppedLocked(
- server->load_balance_token);
- }
- if (force_async) {
- GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
- Delete(pp);
- return false;
- }
- Delete(pp);
- return true;
- }
- }
+bool XdsLb::PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
+ grpc_error** error) {
// Set client_stats and user_data.
if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
pp->client_stats = lb_calld_->client_stats()->Ref();
}
GPR_ASSERT(pp->pick->user_data == nullptr);
pp->pick->user_data = (void**)&pp->lb_token;
- // Pick via the RR policy.
- bool pick_done = rr_policy_->PickLocked(pp->pick, error);
+ // Pick via the child policy.
+ bool pick_done = child_policy_->PickLocked(pp->pick, error);
if (pick_done) {
PendingPickSetMetadataAndContext(pp);
if (force_async) {
@@ -1619,72 +1555,67 @@ bool XdsLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
Delete(pp);
}
// else, the pending pick will be registered and taken care of by the
- // pending pick list inside the RR policy. Eventually,
+ // pending pick list inside the child policy. Eventually,
// OnPendingPickComplete() will be called, which will (among other
// things) add the LB token to the call's initial metadata.
return pick_done;
}
-void XdsLb::CreateRoundRobinPolicyLocked(const Args& args) {
- GPR_ASSERT(rr_policy_ == nullptr);
- rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
+void XdsLb::CreateChildPolicyLocked(const Args& args) {
+ GPR_ASSERT(child_policy_ == nullptr);
+ child_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"round_robin", args);
- if (GPR_UNLIKELY(rr_policy_ == nullptr)) {
- gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a RoundRobin policy", this);
+ if (GPR_UNLIKELY(child_policy_ == nullptr)) {
+ gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a child policy", this);
return;
}
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
- auto self = Ref(DEBUG_LOCATION, "on_rr_reresolution_requested");
+ auto self = Ref(DEBUG_LOCATION, "on_child_reresolution_requested");
self.release();
- rr_policy_->SetReresolutionClosureLocked(&on_rr_request_reresolution_);
- grpc_error* rr_state_error = nullptr;
- rr_connectivity_state_ = rr_policy_->CheckConnectivityLocked(&rr_state_error);
- // Connectivity state is a function of the RR policy updated/created.
- UpdateConnectivityStateFromRoundRobinPolicyLocked(rr_state_error);
- // Add the gRPC LB's interested_parties pollset_set to that of the newly
- // created RR policy. This will make the RR policy progress upon activity on
- // gRPC LB, which in turn is tied to the application's call.
- grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(),
+ child_policy_->SetReresolutionClosureLocked(&on_child_request_reresolution_);
+ grpc_error* child_state_error = nullptr;
+ child_connectivity_state_ =
+ child_policy_->CheckConnectivityLocked(&child_state_error);
+ // Connectivity state is a function of the child policy updated/created.
+ UpdateConnectivityStateFromChildPolicyLocked(child_state_error);
+ // Add the xDS's interested_parties pollset_set to that of the newly created
+ // child policy. This will make the child policy progress upon activity on
+ // xDS LB, which in turn is tied to the application's call.
+ grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
interested_parties());
- // Subscribe to changes to the connectivity of the new RR.
+ // Subscribe to changes to the connectivity of the new child policy.
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
- self = Ref(DEBUG_LOCATION, "on_rr_connectivity_changed");
+ self = Ref(DEBUG_LOCATION, "on_child_connectivity_changed");
self.release();
- rr_policy_->NotifyOnStateChangeLocked(&rr_connectivity_state_,
- &on_rr_connectivity_changed_);
- rr_policy_->ExitIdleLocked();
- // Send pending picks to RR policy.
+ child_policy_->NotifyOnStateChangeLocked(&child_connectivity_state_,
+ &on_child_connectivity_changed_);
+ child_policy_->ExitIdleLocked();
+ // Send pending picks to child policy.
PendingPick* pp;
while ((pp = pending_picks_)) {
pending_picks_ = pp->next;
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Pending pick about to (async) PICK from RR %p", this,
- rr_policy_.get());
+ gpr_log(
+ GPR_INFO,
+ "[xdslb %p] Pending pick about to (async) PICK from child policy %p",
+ this, child_policy_.get());
}
grpc_error* error = GRPC_ERROR_NONE;
- PickFromRoundRobinPolicyLocked(true /* force_async */, pp, &error);
+ PickFromChildPolicyLocked(true /* force_async */, pp, &error);
}
}
-grpc_channel_args* XdsLb::CreateRoundRobinPolicyArgsLocked() {
+grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() {
grpc_lb_addresses* addresses;
bool is_backend_from_grpclb_load_balancer = false;
- if (serverlist_ != nullptr) {
- GPR_ASSERT(serverlist_->num_servers > 0);
- addresses = ProcessServerlist(serverlist_);
- is_backend_from_grpclb_load_balancer = true;
- } else {
- // If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't
- // received any serverlist from the balancer, we use the fallback backends
- // returned by the resolver. Note that the fallback backend list may be
- // empty, in which case the new round_robin policy will keep the requested
- // picks pending.
- GPR_ASSERT(fallback_backend_addresses_ != nullptr);
- addresses = grpc_lb_addresses_copy(fallback_backend_addresses_);
- }
+ // This should never be invoked if we do not have serverlist_, as fallback
+ // mode is disabled for xDS plugin.
+ GPR_ASSERT(serverlist_ != nullptr);
+ GPR_ASSERT(serverlist_->num_servers > 0);
+ addresses = ProcessServerlist(serverlist_);
+ is_backend_from_grpclb_load_balancer = true;
GPR_ASSERT(addresses != nullptr);
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
@@ -1704,66 +1635,68 @@ grpc_channel_args* XdsLb::CreateRoundRobinPolicyArgsLocked() {
return args;
}
-void XdsLb::CreateOrUpdateRoundRobinPolicyLocked() {
+void XdsLb::CreateOrUpdateChildPolicyLocked() {
if (shutting_down_) return;
- grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked();
+ grpc_channel_args* args = CreateChildPolicyArgsLocked();
GPR_ASSERT(args != nullptr);
- if (rr_policy_ != nullptr) {
+ if (child_policy_ != nullptr) {
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] Updating RR policy %p", this,
- rr_policy_.get());
+ gpr_log(GPR_INFO, "[xdslb %p] Updating the child policy %p", this,
+ child_policy_.get());
}
- rr_policy_->UpdateLocked(*args);
+ // TODO(vishalpowar): Pass the correct LB config.
+ child_policy_->UpdateLocked(*args, nullptr);
} else {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.client_channel_factory = client_channel_factory();
lb_policy_args.args = args;
- CreateRoundRobinPolicyLocked(lb_policy_args);
+ CreateChildPolicyLocked(lb_policy_args);
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(GPR_INFO, "[xdslb %p] Created new RR policy %p", this,
- rr_policy_.get());
+ gpr_log(GPR_INFO, "[xdslb %p] Created a new child policy %p", this,
+ child_policy_.get());
}
}
grpc_channel_args_destroy(args);
}
-void XdsLb::OnRoundRobinRequestReresolutionLocked(void* arg,
- grpc_error* error) {
+void XdsLb::OnChildPolicyRequestReresolutionLocked(void* arg,
+ grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_ || error != GRPC_ERROR_NONE) {
- xdslb_policy->Unref(DEBUG_LOCATION, "on_rr_reresolution_requested");
+ xdslb_policy->Unref(DEBUG_LOCATION, "on_child_reresolution_requested");
return;
}
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(
- GPR_INFO,
- "[xdslb %p] Re-resolution requested from the internal RR policy (%p).",
- xdslb_policy, xdslb_policy->rr_policy_.get());
+ gpr_log(GPR_INFO,
+ "[xdslb %p] Re-resolution requested from child policy "
+ "(%p).",
+ xdslb_policy, xdslb_policy->child_policy_.get());
}
// If we are talking to a balancer, we expect to get updated addresses form
- // the balancer, so we can ignore the re-resolution request from the RR
- // policy. Otherwise, handle the re-resolution request using the
- // grpclb policy's original re-resolution closure.
+ // the balancer, so we can ignore the re-resolution request from the child
+ // policy.
+ // Otherwise, handle the re-resolution request using the xds policy's
+ // original re-resolution closure.
if (xdslb_policy->lb_calld_ == nullptr ||
!xdslb_policy->lb_calld_->seen_initial_response()) {
xdslb_policy->TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_NONE);
}
- // Give back the wrapper closure to the RR policy.
- xdslb_policy->rr_policy_->SetReresolutionClosureLocked(
- &xdslb_policy->on_rr_request_reresolution_);
+ // Give back the wrapper closure to the child policy.
+ xdslb_policy->child_policy_->SetReresolutionClosureLocked(
+ &xdslb_policy->on_child_request_reresolution_);
}
-void XdsLb::UpdateConnectivityStateFromRoundRobinPolicyLocked(
- grpc_error* rr_state_error) {
+void XdsLb::UpdateConnectivityStateFromChildPolicyLocked(
+ grpc_error* child_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&state_tracker_);
/* The new connectivity status is a function of the previous one and the new
- * input coming from the status of the RR policy.
+ * input coming from the status of the child policy.
*
- * current state (grpclb's)
+ * current state (xds's)
* |
- * v || I | C | R | TF | SD | <- new state (RR's)
+ * v || I | C | R | TF | SD | <- new state (child policy's)
* ===++====+=====+=====+======+======+
* I || I | C | R | [I] | [I] |
* ---++----+-----+-----+------+------+
@@ -1776,52 +1709,51 @@ void XdsLb::UpdateConnectivityStateFromRoundRobinPolicyLocked(
* SD || NA | NA | NA | NA | NA | (*)
* ---++----+-----+-----+------+------+
*
- * A [STATE] indicates that the old RR policy is kept. In those cases, STATE
- * is the current state of grpclb, which is left untouched.
+ * A [STATE] indicates that the old child policy is kept. In those cases,
+ * STATE is the current state of xds, which is left untouched.
*
* In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
- * the previous RR instance.
+ * the previous child policy instance.
*
* Note that the status is never updated to SHUTDOWN as a result of calling
* this function. Only glb_shutdown() has the power to set that state.
*
* (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
- switch (rr_connectivity_state_) {
+ switch (child_connectivity_state_) {
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN:
- GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
+ GPR_ASSERT(child_state_error != GRPC_ERROR_NONE);
break;
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY:
- GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
+ GPR_ASSERT(child_state_error == GRPC_ERROR_NONE);
}
if (grpc_lb_xds_trace.enabled()) {
- gpr_log(
- GPR_INFO,
- "[xdslb %p] Setting grpclb's state to %s from new RR policy %p state.",
- this, grpc_connectivity_state_name(rr_connectivity_state_),
- rr_policy_.get());
+ gpr_log(GPR_INFO,
+ "[xdslb %p] Setting xds's state to %s from child policy %p state.",
+ this, grpc_connectivity_state_name(child_connectivity_state_),
+ child_policy_.get());
}
- grpc_connectivity_state_set(&state_tracker_, rr_connectivity_state_,
- rr_state_error,
+ grpc_connectivity_state_set(&state_tracker_, child_connectivity_state_,
+ child_state_error,
"update_lb_connectivity_status_locked");
}
-void XdsLb::OnRoundRobinConnectivityChangedLocked(void* arg,
- grpc_error* error) {
+void XdsLb::OnChildPolicyConnectivityChangedLocked(void* arg,
+ grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_) {
- xdslb_policy->Unref(DEBUG_LOCATION, "on_rr_connectivity_changed");
+ xdslb_policy->Unref(DEBUG_LOCATION, "on_child_connectivity_changed");
return;
}
- xdslb_policy->UpdateConnectivityStateFromRoundRobinPolicyLocked(
+ xdslb_policy->UpdateConnectivityStateFromChildPolicyLocked(
GRPC_ERROR_REF(error));
- // Resubscribe. Reuse the "on_rr_connectivity_changed" ref.
- xdslb_policy->rr_policy_->NotifyOnStateChangeLocked(
- &xdslb_policy->rr_connectivity_state_,
- &xdslb_policy->on_rr_connectivity_changed_);
+ // Resubscribe. Reuse the "on_child_connectivity_changed" ref.
+ xdslb_policy->child_policy_->NotifyOnStateChangeLocked(
+ &xdslb_policy->child_connectivity_state_,
+ &xdslb_policy->on_child_connectivity_changed_);
}
//
@@ -1848,7 +1780,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(addresses, args));
}
- const char* name() const override { return "xds"; }
+ const char* name() const override { return "xds_experimental"; }
};
} // namespace
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 62bdbf2689..a59deadb26 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -25,7 +25,7 @@
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/uri/uri_parser.h"
//
// representation of an LB address
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index d651b1120d..ad459c9c8c 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
@@ -94,4 +94,9 @@ LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
return factory->CreateLoadBalancingPolicy(args);
}
+bool LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(const char* name) {
+ GPR_ASSERT(g_state != nullptr);
+ return g_state->GetLoadBalancingPolicyFactory(name) != nullptr;
+}
+
} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index 2e9bb061ed..338f7c9f69 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -47,6 +47,10 @@ class LoadBalancingPolicyRegistry {
/// Creates an LB policy of the type specified by \a name.
static OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
const char* name, const LoadBalancingPolicy::Args& args);
+
+ /// Returns true if the LB policy factory specified by \a name exists in this
+ /// registry.
+ static bool LoadBalancingPolicyExists(const char* name);
};
} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/method_params.cc b/src/core/ext/filters/client_channel/method_params.cc
deleted file mode 100644
index 1f116bb67d..0000000000
--- a/src/core/ext/filters/client_channel/method_params.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/ext/filters/client_channel/method_params.h"
-#include "src/core/lib/channel/status_util.h"
-#include "src/core/lib/gpr/string.h"
-#include "src/core/lib/gprpp/memory.h"
-
-// As per the retry design, we do not allow more than 5 retry attempts.
-#define MAX_MAX_RETRY_ATTEMPTS 5
-
-namespace grpc_core {
-namespace internal {
-
-namespace {
-
-bool ParseWaitForReady(
- grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
- if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
- return false;
- }
- *wait_for_ready = field->type == GRPC_JSON_TRUE
- ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
- : ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
- return true;
-}
-
-// Parses a JSON field of the form generated for a google.proto.Duration
-// proto message, as per:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-bool ParseDuration(grpc_json* field, grpc_millis* duration) {
- if (field->type != GRPC_JSON_STRING) return false;
- size_t len = strlen(field->value);
- if (field->value[len - 1] != 's') return false;
- UniquePtr<char> buf(gpr_strdup(field->value));
- *(buf.get() + len - 1) = '\0'; // Remove trailing 's'.
- char* decimal_point = strchr(buf.get(), '.');
- int nanos = 0;
- if (decimal_point != nullptr) {
- *decimal_point = '\0';
- nanos = gpr_parse_nonnegative_int(decimal_point + 1);
- if (nanos == -1) {
- return false;
- }
- int num_digits = static_cast<int>(strlen(decimal_point + 1));
- if (num_digits > 9) { // We don't accept greater precision than nanos.
- return false;
- }
- for (int i = 0; i < (9 - num_digits); ++i) {
- nanos *= 10;
- }
- }
- int seconds =
- decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get());
- if (seconds == -1) return false;
- *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
- return true;
-}
-
-UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
- grpc_json* field) {
- auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
- if (field->type != GRPC_JSON_OBJECT) return nullptr;
- for (grpc_json* sub_field = field->child; sub_field != nullptr;
- sub_field = sub_field->next) {
- if (sub_field->key == nullptr) return nullptr;
- if (strcmp(sub_field->key, "maxAttempts") == 0) {
- if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
- retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
- if (retry_policy->max_attempts <= 1) return nullptr;
- if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
- gpr_log(GPR_ERROR,
- "service config: clamped retryPolicy.maxAttempts at %d",
- MAX_MAX_RETRY_ATTEMPTS);
- retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
- }
- } else if (strcmp(sub_field->key, "initialBackoff") == 0) {
- if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
- if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
- return nullptr;
- }
- if (retry_policy->initial_backoff == 0) return nullptr;
- } else if (strcmp(sub_field->key, "maxBackoff") == 0) {
- if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
- if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
- return nullptr;
- }
- if (retry_policy->max_backoff == 0) return nullptr;
- } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
- if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
- if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
- if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
- 1) {
- return nullptr;
- }
- if (retry_policy->backoff_multiplier <= 0) return nullptr;
- } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
- if (!retry_policy->retryable_status_codes.Empty()) {
- return nullptr; // Duplicate.
- }
- if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
- for (grpc_json* element = sub_field->child; element != nullptr;
- element = element->next) {
- if (element->type != GRPC_JSON_STRING) return nullptr;
- grpc_status_code status;
- if (!grpc_status_code_from_string(element->value, &status)) {
- return nullptr;
- }
- retry_policy->retryable_status_codes.Add(status);
- }
- if (retry_policy->retryable_status_codes.Empty()) return nullptr;
- }
- }
- // Make sure required fields are set.
- if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
- retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
- retry_policy->retryable_status_codes.Empty()) {
- return nullptr;
- }
- return retry_policy;
-}
-
-} // namespace
-
-RefCountedPtr<ClientChannelMethodParams>
-ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
- RefCountedPtr<ClientChannelMethodParams> method_params =
- MakeRefCounted<ClientChannelMethodParams>();
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
- if (field->key == nullptr) continue;
- if (strcmp(field->key, "waitForReady") == 0) {
- if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
- return nullptr; // Duplicate.
- }
- if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
- return nullptr;
- }
- } else if (strcmp(field->key, "timeout") == 0) {
- if (method_params->timeout_ > 0) return nullptr; // Duplicate.
- if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
- } else if (strcmp(field->key, "retryPolicy") == 0) {
- if (method_params->retry_policy_ != nullptr) {
- return nullptr; // Duplicate.
- }
- method_params->retry_policy_ = ParseRetryPolicy(field);
- if (method_params->retry_policy_ == nullptr) return nullptr;
- }
- }
- return method_params;
-}
-
-} // namespace internal
-} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/method_params.h b/src/core/ext/filters/client_channel/method_params.h
deleted file mode 100644
index a31d360f17..0000000000
--- a/src/core/ext/filters/client_channel/method_params.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/channel/status_util.h"
-#include "src/core/lib/gprpp/ref_counted.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
-#include "src/core/lib/json/json.h"
-
-namespace grpc_core {
-namespace internal {
-
-class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
- public:
- enum WaitForReady {
- WAIT_FOR_READY_UNSET = 0,
- WAIT_FOR_READY_FALSE,
- WAIT_FOR_READY_TRUE
- };
-
- struct RetryPolicy {
- int max_attempts = 0;
- grpc_millis initial_backoff = 0;
- grpc_millis max_backoff = 0;
- float backoff_multiplier = 0;
- StatusCodeSet retryable_status_codes;
- };
-
- /// Creates a method_parameters object from \a json.
- /// Intended for use with ServiceConfig::CreateMethodConfigTable().
- static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
- const grpc_json* json);
-
- grpc_millis timeout() const { return timeout_; }
- WaitForReady wait_for_ready() const { return wait_for_ready_; }
- const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
-
- private:
- // So New() can call our private ctor.
- template <typename T, typename... Args>
- friend T* grpc_core::New(Args&&... args);
-
- // So Delete() can call our private dtor.
- template <typename T>
- friend void grpc_core::Delete(T*);
-
- ClientChannelMethodParams() {}
- virtual ~ClientChannelMethodParams() {}
-
- grpc_millis timeout_ = 0;
- WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
- UniquePtr<RetryPolicy> retry_policy_;
-};
-
-} // namespace internal
-} // namespace grpc_core
-
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H */
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index c2af0e6c49..5c050a2333 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -23,8 +23,8 @@
#include <stddef.h>
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/uri/uri_parser.h"
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index cd11eeb9e4..601b08be24 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -27,7 +27,7 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount(false,
namespace grpc_core {
Resolver::Resolver(grpc_combiner* combiner)
- : InternallyRefCountedWithTracing(&grpc_trace_resolver_refcount),
+ : InternallyRefCounted(&grpc_trace_resolver_refcount),
combiner_(GRPC_COMBINER_REF(combiner, "resolver")) {}
Resolver::~Resolver() { GRPC_COMBINER_UNREF(combiner_, "resolver"); }
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index e9acbb7c41..9da849a101 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -44,7 +44,7 @@ namespace grpc_core {
///
/// Note: All methods with a "Locked" suffix must be called from the
/// combiner passed to the constructor.
-class Resolver : public InternallyRefCountedWithTracing<Resolver> {
+class Resolver : public InternallyRefCounted<Resolver> {
public:
// Not copyable nor movable.
Resolver(const Resolver&) = delete;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 01796ca08f..4ebc2c8161 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -120,6 +120,10 @@ class AresDnsResolver : public Resolver {
grpc_lb_addresses* lb_addresses_ = nullptr;
/// currently resolving service config
char* service_config_json_ = nullptr;
+ // has shutdown been initiated
+ bool shutdown_initiated_ = false;
+ // timeout in milliseconds for active DNS queries
+ int query_timeout_ms_;
};
AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
@@ -157,6 +161,11 @@ AresDnsResolver::AresDnsResolver(const ResolverArgs& args)
grpc_combiner_scheduler(combiner()));
GRPC_CLOSURE_INIT(&on_resolved_, OnResolvedLocked, this,
grpc_combiner_scheduler(combiner()));
+ const grpc_arg* query_timeout_ms_arg =
+ grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_ARES_QUERY_TIMEOUT_MS);
+ query_timeout_ms_ = grpc_channel_arg_get_integer(
+ query_timeout_ms_arg,
+ {GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS, 0, INT_MAX});
}
AresDnsResolver::~AresDnsResolver() {
@@ -197,6 +206,7 @@ void AresDnsResolver::ResetBackoffLocked() {
}
void AresDnsResolver::ShutdownLocked() {
+ shutdown_initiated_ = true;
if (have_next_resolution_timer_) {
grpc_timer_cancel(&next_resolution_timer_);
}
@@ -213,9 +223,13 @@ void AresDnsResolver::ShutdownLocked() {
void AresDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) {
AresDnsResolver* r = static_cast<AresDnsResolver*>(arg);
+ GRPC_CARES_TRACE_LOG(
+ "%p re-resolution timer fired. error: %s. shutdown_initiated_: %d", r,
+ grpc_error_string(error), r->shutdown_initiated_);
r->have_next_resolution_timer_ = false;
- if (error == GRPC_ERROR_NONE) {
+ if (error == GRPC_ERROR_NONE && !r->shutdown_initiated_) {
if (!r->resolving_) {
+ GRPC_CARES_TRACE_LOG("%p start resolving due to re-resolution timer", r);
r->StartResolvingLocked();
}
}
@@ -301,13 +315,12 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
gpr_free(r->pending_request_);
r->pending_request_ = nullptr;
if (r->lb_addresses_ != nullptr) {
- static const char* args_to_remove[2];
+ static const char* args_to_remove[1];
size_t num_args_to_remove = 0;
- grpc_arg new_args[3];
+ grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
- new_args[num_args_to_add++] =
+ args_to_add[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses_);
- grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config;
char* service_config_string = nullptr;
if (r->service_config_json_ != nullptr) {
service_config_string = ChooseServiceConfig(r->service_config_json_);
@@ -316,31 +329,19 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
gpr_log(GPR_INFO, "selected service config choice: %s",
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
- new_args[num_args_to_add++] = grpc_channel_arg_string_create(
+ args_to_add[num_args_to_add++] = grpc_channel_arg_string_create(
(char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
- service_config =
- grpc_core::ServiceConfig::Create(service_config_string);
- if (service_config != nullptr) {
- const char* lb_policy_name =
- service_config->GetLoadBalancingPolicyName();
- if (lb_policy_name != nullptr) {
- args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
- new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_LB_POLICY_NAME,
- const_cast<char*>(lb_policy_name));
- }
- }
}
}
result = grpc_channel_args_copy_and_add_and_remove(
- r->channel_args_, args_to_remove, num_args_to_remove, new_args,
+ r->channel_args_, args_to_remove, num_args_to_remove, args_to_add,
num_args_to_add);
gpr_free(service_config_string);
grpc_lb_addresses_destroy(r->lb_addresses_);
// Reset backoff state so that we start from the beginning when the
// next request gets triggered.
r->backoff_.Reset();
- } else {
+ } else if (!r->shutdown_initiated_) {
const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_millis next_try = r->backoff_.NextAttemptTime();
@@ -416,7 +417,8 @@ void AresDnsResolver::StartResolvingLocked() {
pending_request_ = grpc_dns_lookup_ares_locked(
dns_server_, name_to_resolve_, kDefaultPort, interested_parties_,
&on_resolved_, &lb_addresses_, true /* check_grpclb */,
- request_service_config_ ? &service_config_json_ : nullptr, combiner());
+ request_service_config_ ? &service_config_json_ : nullptr,
+ query_timeout_ms_, combiner());
last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now();
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
index fdbd07ebf5..f42b1e309d 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc
@@ -33,6 +33,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/timer.h"
typedef struct fd_node {
/** the owner of this fd node */
@@ -76,6 +77,12 @@ struct grpc_ares_ev_driver {
grpc_ares_request* request;
/** Owned by the ev_driver. Creates new GrpcPolledFd's */
grpc_core::UniquePtr<grpc_core::GrpcPolledFdFactory> polled_fd_factory;
+ /** query timeout in milliseconds */
+ int query_timeout_ms;
+ /** alarm to cancel active queries */
+ grpc_timer query_timeout;
+ /** cancels queries on a timeout */
+ grpc_closure on_timeout_locked;
};
static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver);
@@ -116,8 +123,11 @@ static void fd_node_shutdown_locked(fd_node* fdn, const char* reason) {
}
}
+static void on_timeout_locked(void* arg, grpc_error* error);
+
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set,
+ int query_timeout_ms,
grpc_combiner* combiner,
grpc_ares_request* request) {
*ev_driver = grpc_core::New<grpc_ares_ev_driver>();
@@ -146,6 +156,9 @@ grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_core::NewGrpcPolledFdFactory((*ev_driver)->combiner);
(*ev_driver)
->polled_fd_factory->ConfigureAresChannelLocked((*ev_driver)->channel);
+ GRPC_CLOSURE_INIT(&(*ev_driver)->on_timeout_locked, on_timeout_locked,
+ *ev_driver, grpc_combiner_scheduler(combiner));
+ (*ev_driver)->query_timeout_ms = query_timeout_ms;
return GRPC_ERROR_NONE;
}
@@ -155,6 +168,7 @@ void grpc_ares_ev_driver_on_queries_complete_locked(
// is working, grpc_ares_notify_on_event_locked will shut down the
// fds; if it's not working, there are no fds to shut down.
ev_driver->shutting_down = true;
+ grpc_timer_cancel(&ev_driver->query_timeout);
grpc_ares_ev_driver_unref(ev_driver);
}
@@ -185,6 +199,17 @@ static fd_node* pop_fd_node_locked(fd_node** head, ares_socket_t as) {
return nullptr;
}
+static void on_timeout_locked(void* arg, grpc_error* error) {
+ grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
+ GRPC_CARES_TRACE_LOG(
+ "ev_driver=%p on_timeout_locked. driver->shutting_down=%d. err=%s",
+ driver, driver->shutting_down, grpc_error_string(error));
+ if (!driver->shutting_down && error == GRPC_ERROR_NONE) {
+ grpc_ares_ev_driver_shutdown_locked(driver);
+ }
+ grpc_ares_ev_driver_unref(driver);
+}
+
static void on_readable_locked(void* arg, grpc_error* error) {
fd_node* fdn = static_cast<fd_node*>(arg);
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
@@ -314,6 +339,17 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver) {
if (!ev_driver->working) {
ev_driver->working = true;
grpc_ares_notify_on_event_locked(ev_driver);
+ grpc_millis timeout =
+ ev_driver->query_timeout_ms == 0
+ ? GRPC_MILLIS_INF_FUTURE
+ : ev_driver->query_timeout_ms + grpc_core::ExecCtx::Get()->Now();
+ GRPC_CARES_TRACE_LOG(
+ "ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in %" PRId64
+ " ms",
+ ev_driver, timeout);
+ grpc_ares_ev_driver_ref(ev_driver);
+ grpc_timer_init(&ev_driver->query_timeout, timeout,
+ &ev_driver->on_timeout_locked);
}
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 671c537fe7..b8cefd9470 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -43,6 +43,7 @@ ares_channel* grpc_ares_ev_driver_get_channel_locked(
created successfully. */
grpc_error* grpc_ares_ev_driver_create_locked(grpc_ares_ev_driver** ev_driver,
grpc_pollset_set* pollset_set,
+ int query_timeout_ms,
grpc_combiner* combiner,
grpc_ares_request* request);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 582e2203fc..55715869b6 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -359,7 +359,7 @@ done:
void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
grpc_ares_request* r, const char* dns_server, const char* name,
const char* default_port, grpc_pollset_set* interested_parties,
- bool check_grpclb, grpc_combiner* combiner) {
+ bool check_grpclb, int query_timeout_ms, grpc_combiner* combiner) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_ares_hostbyname_request* hr = nullptr;
ares_channel* channel = nullptr;
@@ -388,7 +388,7 @@ void grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
port = gpr_strdup(default_port);
}
error = grpc_ares_ev_driver_create_locked(&r->ev_driver, interested_parties,
- combiner, r);
+ query_timeout_ms, combiner, r);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
channel = grpc_ares_ev_driver_get_channel_locked(r->ev_driver);
// If dns_server is specified, use it.
@@ -522,7 +522,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
- grpc_combiner* combiner) {
+ int query_timeout_ms, grpc_combiner* combiner) {
grpc_ares_request* r =
static_cast<grpc_ares_request*>(gpr_zalloc(sizeof(grpc_ares_request)));
r->ev_driver = nullptr;
@@ -546,7 +546,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
// Look up name using c-ares lib.
grpc_dns_lookup_ares_continue_after_check_localhost_and_ip_literals_locked(
r, dns_server, name, default_port, interested_parties, check_grpclb,
- combiner);
+ query_timeout_ms, combiner);
return r;
}
@@ -554,6 +554,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ int query_timeout_ms,
grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {
@@ -648,7 +649,8 @@ static void grpc_resolve_address_invoke_dns_lookup_ares_locked(
r->ares_request = grpc_dns_lookup_ares_locked(
nullptr /* dns_server */, r->name, r->default_port, r->interested_parties,
&r->on_dns_lookup_done_locked, &r->lb_addrs, false /* check_grpclb */,
- nullptr /* service_config_json */, r->combiner);
+ nullptr /* service_config_json */, GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS,
+ r->combiner);
}
static void grpc_resolve_address_ares_impl(const char* name,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index a1231cc4e0..9acef1d0ca 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -26,6 +26,8 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#define GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS 10000
+
extern grpc_core::TraceFlag grpc_trace_cares_address_sorting;
extern grpc_core::TraceFlag grpc_trace_cares_resolver;
@@ -60,7 +62,7 @@ extern grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addresses, bool check_grpclb,
- char** service_config_json, grpc_combiner* combiner);
+ char** service_config_json, int query_timeout_ms, grpc_combiner* combiner);
/* Cancel the pending grpc_ares_request \a request */
extern void (*grpc_cancel_ares_request_locked)(grpc_ares_request* request);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index 9f293c1ac0..fc78b18304 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -30,7 +30,7 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
- grpc_combiner* combiner) {
+ int query_timeout_ms, grpc_combiner* combiner) {
return NULL;
}
@@ -38,6 +38,7 @@ grpc_ares_request* (*grpc_dns_lookup_ares_locked)(
const char* dns_server, const char* name, const char* default_port,
grpc_pollset_set* interested_parties, grpc_closure* on_done,
grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json,
+ int query_timeout_ms,
grpc_combiner* combiner) = grpc_dns_lookup_ares_locked_impl;
static void grpc_cancel_ares_request_locked_impl(grpc_ares_request* r) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index 144ac24a56..3aa690bea4 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -103,7 +103,7 @@ void FakeResolver::NextLocked(grpc_channel_args** target_result,
}
void FakeResolver::RequestReresolutionLocked() {
- if (reresolution_results_ != nullptr) {
+ if (reresolution_results_ != nullptr || return_failure_) {
grpc_channel_args_destroy(next_results_);
next_results_ = grpc_channel_args_copy(reresolution_results_);
MaybeFinishNextLocked();
@@ -141,6 +141,7 @@ struct SetResponseClosureArg {
grpc_closure set_response_closure;
FakeResolverResponseGenerator* generator;
grpc_channel_args* response;
+ bool immediate = true;
};
void FakeResolverResponseGenerator::SetResponseLocked(void* arg,
@@ -194,7 +195,7 @@ void FakeResolverResponseGenerator::SetFailureLocked(void* arg,
SetResponseClosureArg* closure_arg = static_cast<SetResponseClosureArg*>(arg);
FakeResolver* resolver = closure_arg->generator->resolver_;
resolver->return_failure_ = true;
- resolver->MaybeFinishNextLocked();
+ if (closure_arg->immediate) resolver->MaybeFinishNextLocked();
Delete(closure_arg);
}
@@ -209,6 +210,18 @@ void FakeResolverResponseGenerator::SetFailure() {
GRPC_ERROR_NONE);
}
+void FakeResolverResponseGenerator::SetFailureOnReresolution() {
+ GPR_ASSERT(resolver_ != nullptr);
+ SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
+ closure_arg->generator = this;
+ closure_arg->immediate = false;
+ GRPC_CLOSURE_SCHED(
+ GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked,
+ closure_arg,
+ grpc_combiner_scheduler(resolver_->combiner())),
+ GRPC_ERROR_NONE);
+}
+
namespace {
static void* response_generator_arg_copy(void* p) {
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index 708eaf1147..7f69059351 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -20,9 +20,9 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/uri/uri_parser.h"
#define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \
"grpc.fake_resolver.response_generator"
@@ -61,6 +61,10 @@ class FakeResolverResponseGenerator
// returning a null result with no error).
void SetFailure();
+ // Same as SetFailure(), but instead of returning the error
+ // immediately, waits for the next call to RequestReresolutionLocked().
+ void SetFailureOnReresolution();
+
// Returns a channel arg containing \a generator.
static grpc_arg MakeChannelArg(FakeResolverResponseGenerator* generator);
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index ee3cfeeb9b..d891ef62e1 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -24,11 +24,11 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/resolver.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/uri/uri_parser.h"
namespace grpc_core {
diff --git a/src/core/ext/filters/client_channel/resolver_result_parsing.cc b/src/core/ext/filters/client_channel/resolver_result_parsing.cc
new file mode 100644
index 0000000000..4f7fd6b424
--- /dev/null
+++ b/src/core/ext/filters/client_channel/resolver_result_parsing.cc
@@ -0,0 +1,369 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/status_util.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
+
+// As per the retry design, we do not allow more than 5 retry attempts.
+#define MAX_MAX_RETRY_ATTEMPTS 5
+
+namespace grpc_core {
+namespace internal {
+
+ProcessedResolverResult::ProcessedResolverResult(
+ const grpc_channel_args* resolver_result, bool parse_retry) {
+ ProcessServiceConfig(resolver_result, parse_retry);
+ // If no LB config was found above, just find the LB policy name then.
+ if (lb_policy_name_ == nullptr) ProcessLbPolicyName(resolver_result);
+}
+
+void ProcessedResolverResult::ProcessServiceConfig(
+ const grpc_channel_args* resolver_result, bool parse_retry) {
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_SERVICE_CONFIG);
+ const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
+ if (service_config_json != nullptr) {
+ service_config_json_.reset(gpr_strdup(service_config_json));
+ service_config_ = grpc_core::ServiceConfig::Create(service_config_json);
+ if (service_config_ != nullptr) {
+ if (parse_retry) {
+ channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_SERVER_URI);
+ const char* server_uri = grpc_channel_arg_get_string(channel_arg);
+ GPR_ASSERT(server_uri != nullptr);
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ server_name_ = uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ service_config_->ParseGlobalParams(ParseServiceConfig, this);
+ grpc_uri_destroy(uri);
+ } else {
+ service_config_->ParseGlobalParams(ParseServiceConfig, this);
+ }
+ method_params_table_ = service_config_->CreateMethodConfigTable(
+ ClientChannelMethodParams::CreateFromJson);
+ }
+ }
+}
+
+void ProcessedResolverResult::ProcessLbPolicyName(
+ const grpc_channel_args* resolver_result) {
+ // Prefer the LB policy name found in the service config. Note that this is
+ // checking the deprecated loadBalancingPolicy field, rather than the new
+ // loadBalancingConfig field.
+ if (service_config_ != nullptr) {
+ lb_policy_name_.reset(
+ gpr_strdup(service_config_->GetLoadBalancingPolicyName()));
+ // Convert to lower-case.
+ if (lb_policy_name_ != nullptr) {
+ char* lb_policy_name = lb_policy_name_.get();
+ for (size_t i = 0; i < strlen(lb_policy_name); ++i) {
+ lb_policy_name[i] = tolower(lb_policy_name[i]);
+ }
+ }
+ }
+ // Otherwise, find the LB policy name set by the client API.
+ if (lb_policy_name_ == nullptr) {
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_LB_POLICY_NAME);
+ lb_policy_name_.reset(gpr_strdup(grpc_channel_arg_get_string(channel_arg)));
+ }
+ // Special case: If at least one balancer address is present, we use
+ // the grpclb policy, regardless of what the resolver has returned.
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(resolver_result, GRPC_ARG_LB_ADDRESSES);
+ if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
+ grpc_lb_addresses* addresses =
+ static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
+ if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
+ if (lb_policy_name_ != nullptr &&
+ strcmp(lb_policy_name_.get(), "grpclb") != 0) {
+ gpr_log(GPR_INFO,
+ "resolver requested LB policy %s but provided at least one "
+ "balancer address -- forcing use of grpclb LB policy",
+ lb_policy_name_.get());
+ }
+ lb_policy_name_.reset(gpr_strdup("grpclb"));
+ }
+ }
+ // Use pick_first if nothing was specified and we didn't select grpclb
+ // above.
+ if (lb_policy_name_ == nullptr) {
+ lb_policy_name_.reset(gpr_strdup("pick_first"));
+ }
+}
+
+void ProcessedResolverResult::ParseServiceConfig(
+ const grpc_json* field, ProcessedResolverResult* parsing_state) {
+ parsing_state->ParseLbConfigFromServiceConfig(field);
+ if (parsing_state->server_name_ != nullptr) {
+ parsing_state->ParseRetryThrottleParamsFromServiceConfig(field);
+ }
+}
+
+void ProcessedResolverResult::ParseLbConfigFromServiceConfig(
+ const grpc_json* field) {
+ if (lb_policy_config_ != nullptr) return; // Already found.
+ // Find the LB config global parameter.
+ if (field->key == nullptr || strcmp(field->key, "loadBalancingConfig") != 0 ||
+ field->type != GRPC_JSON_ARRAY) {
+ return; // Not valid lb config array.
+ }
+ // Find the first LB policy that this client supports.
+ for (grpc_json* lb_config = field->child; lb_config != nullptr;
+ lb_config = lb_config->next) {
+ if (lb_config->type != GRPC_JSON_OBJECT) return;
+ // Find the policy object.
+ grpc_json* policy = nullptr;
+ for (grpc_json* field = lb_config->child; field != nullptr;
+ field = field->next) {
+ if (field->key == nullptr || strcmp(field->key, "policy") != 0 ||
+ field->type != GRPC_JSON_OBJECT) {
+ return;
+ }
+ if (policy != nullptr) return; // Duplicate.
+ policy = field;
+ }
+ // Find the specific policy content since the policy object is of type
+ // "oneof".
+ grpc_json* policy_content = nullptr;
+ for (grpc_json* field = policy->child; field != nullptr;
+ field = field->next) {
+ if (field->key == nullptr || field->type != GRPC_JSON_OBJECT) return;
+ if (policy_content != nullptr) return; // Violate "oneof" type.
+ policy_content = field;
+ }
+ // If we support this policy, then select it.
+ if (grpc_core::LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(
+ policy_content->key)) {
+ lb_policy_name_.reset(gpr_strdup(policy_content->key));
+ lb_policy_config_ = policy_content->child;
+ return;
+ }
+ }
+}
+
+void ProcessedResolverResult::ParseRetryThrottleParamsFromServiceConfig(
+ const grpc_json* field) {
+ if (strcmp(field->key, "retryThrottling") == 0) {
+ if (retry_throttle_data_ != nullptr) return; // Duplicate.
+ if (field->type != GRPC_JSON_OBJECT) return;
+ int max_milli_tokens = 0;
+ int milli_token_ratio = 0;
+ for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ sub_field = sub_field->next) {
+ if (sub_field->key == nullptr) return;
+ if (strcmp(sub_field->key, "maxTokens") == 0) {
+ if (max_milli_tokens != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
+ if (max_milli_tokens == -1) return;
+ max_milli_tokens *= 1000;
+ } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
+ if (milli_token_ratio != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ // We support up to 3 decimal digits.
+ size_t whole_len = strlen(sub_field->value);
+ uint32_t multiplier = 1;
+ uint32_t decimal_value = 0;
+ const char* decimal_point = strchr(sub_field->value, '.');
+ if (decimal_point != nullptr) {
+ whole_len = static_cast<size_t>(decimal_point - sub_field->value);
+ multiplier = 1000;
+ size_t decimal_len = strlen(decimal_point + 1);
+ if (decimal_len > 3) decimal_len = 3;
+ if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
+ &decimal_value)) {
+ return;
+ }
+ uint32_t decimal_multiplier = 1;
+ for (size_t i = 0; i < (3 - decimal_len); ++i) {
+ decimal_multiplier *= 10;
+ }
+ decimal_value *= decimal_multiplier;
+ }
+ uint32_t whole_value;
+ if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
+ &whole_value)) {
+ return;
+ }
+ milli_token_ratio =
+ static_cast<int>((whole_value * multiplier) + decimal_value);
+ if (milli_token_ratio <= 0) return;
+ }
+ }
+ retry_throttle_data_ =
+ grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
+ server_name_, max_milli_tokens, milli_token_ratio);
+ }
+}
+
+namespace {
+
+bool ParseWaitForReady(
+ grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) {
+ if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
+ return false;
+ }
+ *wait_for_ready = field->type == GRPC_JSON_TRUE
+ ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE
+ : ClientChannelMethodParams::WAIT_FOR_READY_FALSE;
+ return true;
+}
+
+// Parses a JSON field of the form generated for a google.proto.Duration
+// proto message, as per:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+bool ParseDuration(grpc_json* field, grpc_millis* duration) {
+ if (field->type != GRPC_JSON_STRING) return false;
+ size_t len = strlen(field->value);
+ if (field->value[len - 1] != 's') return false;
+ UniquePtr<char> buf(gpr_strdup(field->value));
+ *(buf.get() + len - 1) = '\0'; // Remove trailing 's'.
+ char* decimal_point = strchr(buf.get(), '.');
+ int nanos = 0;
+ if (decimal_point != nullptr) {
+ *decimal_point = '\0';
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
+ return false;
+ }
+ int num_digits = static_cast<int>(strlen(decimal_point + 1));
+ if (num_digits > 9) { // We don't accept greater precision than nanos.
+ return false;
+ }
+ for (int i = 0; i < (9 - num_digits); ++i) {
+ nanos *= 10;
+ }
+ }
+ int seconds =
+ decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get());
+ if (seconds == -1) return false;
+ *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
+ return true;
+}
+
+UniquePtr<ClientChannelMethodParams::RetryPolicy> ParseRetryPolicy(
+ grpc_json* field) {
+ auto retry_policy = MakeUnique<ClientChannelMethodParams::RetryPolicy>();
+ if (field->type != GRPC_JSON_OBJECT) return nullptr;
+ for (grpc_json* sub_field = field->child; sub_field != nullptr;
+ sub_field = sub_field->next) {
+ if (sub_field->key == nullptr) return nullptr;
+ if (strcmp(sub_field->key, "maxAttempts") == 0) {
+ if (retry_policy->max_attempts != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value);
+ if (retry_policy->max_attempts <= 1) return nullptr;
+ if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) {
+ gpr_log(GPR_ERROR,
+ "service config: clamped retryPolicy.maxAttempts at %d",
+ MAX_MAX_RETRY_ATTEMPTS);
+ retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS;
+ }
+ } else if (strcmp(sub_field->key, "initialBackoff") == 0) {
+ if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->initial_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "maxBackoff") == 0) {
+ if (retry_policy->max_backoff > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(sub_field, &retry_policy->max_backoff)) {
+ return nullptr;
+ }
+ if (retry_policy->max_backoff == 0) return nullptr;
+ } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) {
+ if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return nullptr;
+ if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) !=
+ 1) {
+ return nullptr;
+ }
+ if (retry_policy->backoff_multiplier <= 0) return nullptr;
+ } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) {
+ if (!retry_policy->retryable_status_codes.Empty()) {
+ return nullptr; // Duplicate.
+ }
+ if (sub_field->type != GRPC_JSON_ARRAY) return nullptr;
+ for (grpc_json* element = sub_field->child; element != nullptr;
+ element = element->next) {
+ if (element->type != GRPC_JSON_STRING) return nullptr;
+ grpc_status_code status;
+ if (!grpc_status_code_from_string(element->value, &status)) {
+ return nullptr;
+ }
+ retry_policy->retryable_status_codes.Add(status);
+ }
+ if (retry_policy->retryable_status_codes.Empty()) return nullptr;
+ }
+ }
+ // Make sure required fields are set.
+ if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 ||
+ retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 ||
+ retry_policy->retryable_status_codes.Empty()) {
+ return nullptr;
+ }
+ return retry_policy;
+}
+
+} // namespace
+
+RefCountedPtr<ClientChannelMethodParams>
+ClientChannelMethodParams::CreateFromJson(const grpc_json* json) {
+ RefCountedPtr<ClientChannelMethodParams> method_params =
+ MakeRefCounted<ClientChannelMethodParams>();
+ for (grpc_json* field = json->child; field != nullptr; field = field->next) {
+ if (field->key == nullptr) continue;
+ if (strcmp(field->key, "waitForReady") == 0) {
+ if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) {
+ return nullptr; // Duplicate.
+ }
+ if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) {
+ return nullptr;
+ }
+ } else if (strcmp(field->key, "timeout") == 0) {
+ if (method_params->timeout_ > 0) return nullptr; // Duplicate.
+ if (!ParseDuration(field, &method_params->timeout_)) return nullptr;
+ } else if (strcmp(field->key, "retryPolicy") == 0) {
+ if (method_params->retry_policy_ != nullptr) {
+ return nullptr; // Duplicate.
+ }
+ method_params->retry_policy_ = ParseRetryPolicy(field);
+ if (method_params->retry_policy_ == nullptr) return nullptr;
+ }
+ }
+ return method_params;
+}
+
+} // namespace internal
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/resolver_result_parsing.h b/src/core/ext/filters/client_channel/resolver_result_parsing.h
new file mode 100644
index 0000000000..f1fb7406bc
--- /dev/null
+++ b/src/core/ext/filters/client_channel/resolver_result_parsing.h
@@ -0,0 +1,146 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_RESULT_PARSING_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_RESULT_PARSING_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/filters/client_channel/retry_throttle.h"
+#include "src/core/lib/channel/status_util.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis
+#include "src/core/lib/json/json.h"
+#include "src/core/lib/slice/slice_hash_table.h"
+#include "src/core/lib/transport/service_config.h"
+
+namespace grpc_core {
+namespace internal {
+
+class ClientChannelMethodParams;
+
+// A table mapping from a method name to its method parameters.
+typedef grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<ClientChannelMethodParams>>
+ ClientChannelMethodParamsTable;
+
+// A container of processed fields from the resolver result. Simplifies the
+// usage of resolver result.
+class ProcessedResolverResult {
+ public:
+ // Processes the resolver result and populates the relative members
+ // for later consumption. Tries to parse retry parameters only if parse_retry
+ // is true.
+ ProcessedResolverResult(const grpc_channel_args* resolver_result,
+ bool parse_retry);
+
+ // Getters. Any managed object's ownership is transferred.
+ grpc_core::UniquePtr<char> service_config_json() {
+ return std::move(service_config_json_);
+ }
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
+ return std::move(retry_throttle_data_);
+ }
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable>
+ method_params_table() {
+ return std::move(method_params_table_);
+ }
+ grpc_core::UniquePtr<char> lb_policy_name() {
+ return std::move(lb_policy_name_);
+ }
+ grpc_json* lb_policy_config() { return lb_policy_config_; }
+
+ private:
+ // Finds the service config; extracts LB config and (maybe) retry throttle
+ // params from it.
+ void ProcessServiceConfig(const grpc_channel_args* resolver_result,
+ bool parse_retry);
+
+ // Finds the LB policy name (when no LB config was found).
+ void ProcessLbPolicyName(const grpc_channel_args* resolver_result);
+
+ // Parses the service config. Intended to be used by
+ // ServiceConfig::ParseGlobalParams.
+ static void ParseServiceConfig(const grpc_json* field,
+ ProcessedResolverResult* parsing_state);
+ // Parses the LB config from service config.
+ void ParseLbConfigFromServiceConfig(const grpc_json* field);
+ // Parses the retry throttle parameters from service config.
+ void ParseRetryThrottleParamsFromServiceConfig(const grpc_json* field);
+
+ // Service config.
+ grpc_core::UniquePtr<char> service_config_json_;
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config_;
+ // LB policy.
+ grpc_json* lb_policy_config_ = nullptr;
+ grpc_core::UniquePtr<char> lb_policy_name_;
+ // Retry throttle data.
+ char* server_name_ = nullptr;
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
+ // Method params table.
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
+};
+
+// The parameters of a method.
+class ClientChannelMethodParams : public RefCounted<ClientChannelMethodParams> {
+ public:
+ enum WaitForReady {
+ WAIT_FOR_READY_UNSET = 0,
+ WAIT_FOR_READY_FALSE,
+ WAIT_FOR_READY_TRUE
+ };
+
+ struct RetryPolicy {
+ int max_attempts = 0;
+ grpc_millis initial_backoff = 0;
+ grpc_millis max_backoff = 0;
+ float backoff_multiplier = 0;
+ StatusCodeSet retryable_status_codes;
+ };
+
+ /// Creates a method_parameters object from \a json.
+ /// Intended for use with ServiceConfig::CreateMethodConfigTable().
+ static RefCountedPtr<ClientChannelMethodParams> CreateFromJson(
+ const grpc_json* json);
+
+ grpc_millis timeout() const { return timeout_; }
+ WaitForReady wait_for_ready() const { return wait_for_ready_; }
+ const RetryPolicy* retry_policy() const { return retry_policy_.get(); }
+
+ private:
+ // So New() can call our private ctor.
+ template <typename T, typename... Args>
+ friend T* grpc_core::New(Args&&... args);
+
+ // So Delete() can call our private dtor.
+ template <typename T>
+ friend void grpc_core::Delete(T*);
+
+ ClientChannelMethodParams() {}
+ virtual ~ClientChannelMethodParams() {}
+
+ grpc_millis timeout_ = 0;
+ WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET;
+ UniquePtr<RetryPolicy> retry_policy_;
+};
+
+} // namespace internal
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_RESULT_PARSING_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index e4c6efe862..af55f7710e 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -34,7 +34,6 @@
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@@ -54,6 +53,7 @@
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/status_metadata.h"
+#include "src/core/lib/uri/uri_parser.h"
#define INTERNAL_REF_BITS 16
#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1))
@@ -153,7 +153,7 @@ struct grpc_subchannel {
/** have we started the backoff loop */
bool backoff_begun;
// reset_backoff() was called while alarm was pending
- bool deferred_reset_backoff;
+ bool retry_immediately;
/** our alarm */
grpc_timer alarm;
@@ -162,12 +162,16 @@ struct grpc_subchannel {
};
struct grpc_subchannel_call {
+ grpc_subchannel_call(grpc_core::ConnectedSubchannel* connection,
+ const grpc_core::ConnectedSubchannel::CallArgs& args)
+ : connection(connection), deadline(args.deadline) {}
+
grpc_core::ConnectedSubchannel* connection;
- grpc_closure* schedule_closure_after_destroy;
+ grpc_closure* schedule_closure_after_destroy = nullptr;
// state needed to support channelz interception of recv trailing metadata.
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata;
- grpc_metadata_batch* recv_trailing_metadata;
+ grpc_metadata_batch* recv_trailing_metadata = nullptr;
grpc_millis deadline;
};
@@ -705,8 +709,8 @@ static void on_alarm(void* arg, grpc_error* error) {
if (c->disconnected) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
&error, 1);
- } else if (c->deferred_reset_backoff) {
- c->deferred_reset_backoff = false;
+ } else if (c->retry_immediately) {
+ c->retry_immediately = false;
error = GRPC_ERROR_NONE;
} else {
GRPC_ERROR_REF(error);
@@ -883,12 +887,12 @@ static void on_subchannel_connected(void* arg, grpc_error* error) {
void grpc_subchannel_reset_backoff(grpc_subchannel* subchannel) {
gpr_mu_lock(&subchannel->mu);
+ subchannel->backoff->Reset();
if (subchannel->have_alarm) {
- subchannel->deferred_reset_backoff = true;
+ subchannel->retry_immediately = true;
grpc_timer_cancel(&subchannel->alarm);
} else {
subchannel->backoff_begun = false;
- subchannel->backoff->Reset();
maybe_start_connecting_locked(subchannel);
}
gpr_mu_unlock(&subchannel->mu);
@@ -905,6 +909,7 @@ static void subchannel_call_destroy(void* call, grpc_error* error) {
grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
c->schedule_closure_after_destroy);
connection->Unref(DEBUG_LOCATION, "subchannel_call");
+ c->~grpc_subchannel_call();
}
void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
@@ -1067,7 +1072,7 @@ ConnectedSubchannel::ConnectedSubchannel(
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
channelz_subchannel,
intptr_t socket_uuid)
- : RefCountedWithTracing<ConnectedSubchannel>(&grpc_trace_stream_refcount),
+ : RefCounted<ConnectedSubchannel>(&grpc_trace_stream_refcount),
channel_stack_(channel_stack),
channelz_subchannel_(std::move(channelz_subchannel)),
socket_uuid_(socket_uuid) {}
@@ -1102,14 +1107,12 @@ grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
grpc_subchannel_call** call) {
const size_t allocation_size =
GetInitialCallSizeEstimate(args.parent_data_size);
- *call = static_cast<grpc_subchannel_call*>(
- gpr_arena_alloc(args.arena, allocation_size));
+ *call = new (gpr_arena_alloc(args.arena, allocation_size))
+ grpc_subchannel_call(this, args);
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
RefCountedPtr<ConnectedSubchannel> connection =
Ref(DEBUG_LOCATION, "subchannel_call");
connection.release(); // Ref is passed to the grpc_subchannel_call object.
- (*call)->connection = this;
- (*call)->deadline = args.deadline;
const grpc_call_element_args call_args = {
callstk, /* call_stack */
nullptr, /* server_transport_data */
@@ -1128,6 +1131,9 @@ grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
return error;
}
grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
+ if (channelz_subchannel_ != nullptr) {
+ channelz_subchannel_->RecordCallStarted();
+ }
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index ec3b4d86e4..69c2456ec2 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -72,7 +72,7 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
namespace grpc_core {
-class ConnectedSubchannel : public RefCountedWithTracing<ConnectedSubchannel> {
+class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
public:
struct CallArgs {
grpc_polling_entity* pollent;
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 1c23a6c4be..aa8441f17b 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -91,7 +91,7 @@ void grpc_subchannel_key_destroy(grpc_subchannel_key* k) {
gpr_free(k);
}
-static void sck_avl_destroy(void* p, void* user_data) {
+static void sck_avl_destroy(void* p, void* unused) {
grpc_subchannel_key_destroy(static_cast<grpc_subchannel_key*>(p));
}
@@ -104,7 +104,7 @@ static long sck_avl_compare(void* a, void* b, void* unused) {
static_cast<grpc_subchannel_key*>(b));
}
-static void scv_avl_destroy(void* p, void* user_data) {
+static void scv_avl_destroy(void* p, void* unused) {
GRPC_SUBCHANNEL_WEAK_UNREF((grpc_subchannel*)p, "subchannel_index");
}
@@ -137,7 +137,7 @@ void grpc_subchannel_index_shutdown(void) {
void grpc_subchannel_index_unref(void) {
if (gpr_unref(&g_refcount)) {
gpr_mu_destroy(&g_mu);
- grpc_avl_unref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(g_subchannel_index, nullptr);
}
}
@@ -147,13 +147,12 @@ grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key) {
// Lock, and take a reference to the subchannel index.
// We don't need to do the search under a lock as avl's are immutable.
gpr_mu_lock(&g_mu);
- grpc_avl index = grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr);
gpr_mu_unlock(&g_mu);
grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- (grpc_subchannel*)grpc_avl_get(index, key, grpc_core::ExecCtx::Get()),
- "index_find");
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ (grpc_subchannel*)grpc_avl_get(index, key, nullptr), "index_find");
+ grpc_avl_unref(index, nullptr);
return c;
}
@@ -169,13 +168,11 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
- grpc_avl index =
- grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr);
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = static_cast<grpc_subchannel*>(
- grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
+ c = static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr));
if (c != nullptr) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -184,11 +181,9 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
need_to_unref_constructed = true;
} else {
// no -> update the avl and compare/swap
- grpc_avl updated =
- grpc_avl_add(grpc_avl_ref(index, grpc_core::ExecCtx::Get()),
- subchannel_key_copy(key),
- GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"),
- grpc_core::ExecCtx::Get());
+ grpc_avl updated = grpc_avl_add(
+ grpc_avl_ref(index, nullptr), subchannel_key_copy(key),
+ GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"), nullptr);
// it may happen (but it's expected to be unlikely)
// that some other thread has changed the index:
@@ -200,9 +195,9 @@ grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
}
gpr_mu_unlock(&g_mu);
- grpc_avl_unref(updated, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(updated, nullptr);
}
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, nullptr);
}
if (need_to_unref_constructed) {
@@ -219,24 +214,22 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
// Compare and swap loop:
// - take a reference to the current index
gpr_mu_lock(&g_mu);
- grpc_avl index =
- grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
+ grpc_avl index = grpc_avl_ref(g_subchannel_index, nullptr);
gpr_mu_unlock(&g_mu);
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel* c = static_cast<grpc_subchannel*>(
- grpc_avl_get(index, key, grpc_core::ExecCtx::Get()));
+ grpc_subchannel* c =
+ static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr));
if (c != constructed) {
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(index, nullptr);
break;
}
// compare and swap the update (some other thread may have
// mutated the index behind us)
grpc_avl updated =
- grpc_avl_remove(grpc_avl_ref(index, grpc_core::ExecCtx::Get()), key,
- grpc_core::ExecCtx::Get());
+ grpc_avl_remove(grpc_avl_ref(index, nullptr), key, nullptr);
gpr_mu_lock(&g_mu);
if (index.root == g_subchannel_index.root) {
@@ -245,8 +238,8 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
}
gpr_mu_unlock(&g_mu);
- grpc_avl_unref(updated, grpc_core::ExecCtx::Get());
- grpc_avl_unref(index, grpc_core::ExecCtx::Get());
+ grpc_avl_unref(updated, nullptr);
+ grpc_avl_unref(index, nullptr);
}
}
diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc
index d23ad67ad5..b4cb07f0f9 100644
--- a/src/core/ext/filters/deadline/deadline_filter.cc
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -27,6 +27,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel_init.h"
@@ -152,7 +153,11 @@ static void inject_recv_trailing_metadata_ready(
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
- bool in_call_combiner;
+ start_timer_after_init_state(grpc_call_element* elem, grpc_millis deadline)
+ : elem(elem), deadline(deadline) {}
+ ~start_timer_after_init_state() { start_timer_if_needed(elem, deadline); }
+
+ bool in_call_combiner = false;
grpc_call_element* elem;
grpc_millis deadline;
grpc_closure closure;
@@ -171,20 +176,16 @@ static void start_timer_after_init(void* arg, grpc_error* error) {
"scheduling deadline timer");
return;
}
- start_timer_if_needed(state->elem, state->deadline);
- gpr_free(state);
+ grpc_core::Delete(state);
GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
"done scheduling deadline timer");
}
-void grpc_deadline_state_init(grpc_call_element* elem,
- grpc_call_stack* call_stack,
- grpc_call_combiner* call_combiner,
- grpc_millis deadline) {
- grpc_deadline_state* deadline_state =
- static_cast<grpc_deadline_state*>(elem->call_data);
- deadline_state->call_stack = call_stack;
- deadline_state->call_combiner = call_combiner;
+grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem,
+ grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner,
+ grpc_millis deadline)
+ : call_stack(call_stack), call_combiner(call_combiner) {
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
if (deadline != GRPC_MILLIS_INF_FUTURE) {
@@ -196,21 +197,14 @@ void grpc_deadline_state_init(grpc_call_element* elem,
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
struct start_timer_after_init_state* state =
- static_cast<struct start_timer_after_init_state*>(
- gpr_zalloc(sizeof(*state)));
- state->elem = elem;
- state->deadline = deadline;
+ grpc_core::New<start_timer_after_init_state>(elem, deadline);
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE);
}
}
-void grpc_deadline_state_destroy(grpc_call_element* elem) {
- grpc_deadline_state* deadline_state =
- static_cast<grpc_deadline_state*>(elem->call_data);
- cancel_timer_if_needed(deadline_state);
-}
+grpc_deadline_state::~grpc_deadline_state() { cancel_timer_if_needed(this); }
void grpc_deadline_state_reset(grpc_call_element* elem,
grpc_millis new_deadline) {
@@ -269,8 +263,8 @@ typedef struct server_call_data {
// Constructor for call_data. Used for both client and server filters.
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
- args->deadline);
+ new (elem->call_data) grpc_deadline_state(
+ elem, args->call_stack, args->call_combiner, args->deadline);
return GRPC_ERROR_NONE;
}
@@ -278,7 +272,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- grpc_deadline_state_destroy(elem);
+ grpc_deadline_state* deadline_state =
+ static_cast<grpc_deadline_state*>(elem->call_data);
+ deadline_state->~grpc_deadline_state();
}
// Method for starting a call op for client filter.
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 1d797f445a..e37032999c 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -22,19 +22,23 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
-typedef enum grpc_deadline_timer_state {
+enum grpc_deadline_timer_state {
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING,
GRPC_DEADLINE_STATE_FINISHED
-} grpc_deadline_timer_state;
+};
// State used for filters that enforce call deadlines.
// Must be the first field in the filter's call_data.
-typedef struct grpc_deadline_state {
+struct grpc_deadline_state {
+ grpc_deadline_state(grpc_call_element* elem, grpc_call_stack* call_stack,
+ grpc_call_combiner* call_combiner, grpc_millis deadline);
+ ~grpc_deadline_state();
+
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
grpc_call_combiner* call_combiner;
- grpc_deadline_timer_state timer_state;
+ grpc_deadline_timer_state timer_state = GRPC_DEADLINE_STATE_INITIAL;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when we receive trailing metadata.
@@ -43,21 +47,13 @@ typedef struct grpc_deadline_state {
// The original recv_trailing_metadata_ready closure, which we chain to
// after our own closure is invoked.
grpc_closure* original_recv_trailing_metadata_ready;
-} grpc_deadline_state;
+};
//
// NOTE: All of these functions require that the first field in
// elem->call_data is a grpc_deadline_state.
//
-// assumes elem->call_data is zero'd
-void grpc_deadline_state_init(grpc_call_element* elem,
- grpc_call_stack* call_stack,
- grpc_call_combiner* call_combiner,
- grpc_millis deadline);
-
-void grpc_deadline_state_destroy(grpc_call_element* elem);
-
// Cancels the existing timer and starts a new one with new_deadline.
//
// Note: It is generally safe to call this with an earlier deadline
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index cd459e47cd..bf9a01f659 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -37,10 +37,31 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
/* default maximum size of payload eligable for GET request */
-static const size_t kMaxPayloadSizeForGet = 2048;
+static constexpr size_t kMaxPayloadSizeForGet = 2048;
+
+static void recv_initial_metadata_ready(void* user_data, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+static void on_send_message_next_done(void* arg, grpc_error* error);
+static void send_message_on_complete(void* arg, grpc_error* error);
namespace {
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+ ::recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&on_send_message_next_done, ::on_send_message_next_done,
+ elem, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&send_message_on_complete, ::send_message_on_complete,
+ elem, grpc_schedule_on_exec_ctx);
+ }
+
+ ~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
+
grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
@@ -51,18 +72,18 @@ struct call_data {
grpc_linked_mdelem user_agent;
// State for handling recv_initial_metadata ops.
grpc_metadata_batch* recv_initial_metadata;
- grpc_error* recv_initial_metadata_error;
- grpc_closure* original_recv_initial_metadata_ready;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
+ grpc_closure* original_recv_initial_metadata_ready = nullptr;
grpc_closure recv_initial_metadata_ready;
// State for handling recv_trailing_metadata ops.
grpc_metadata_batch* recv_trailing_metadata;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_closure recv_trailing_metadata_ready;
- grpc_error* recv_trailing_metadata_error;
- bool seen_recv_trailing_metadata_ready;
+ grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
+ bool seen_recv_trailing_metadata_ready = false;
// State for handling send_message ops.
grpc_transport_stream_op_batch* send_message_batch;
- size_t send_message_bytes_read;
+ size_t send_message_bytes_read = 0;
grpc_core::ManualConstructor<grpc_core::ByteStreamCache> send_message_cache;
grpc_core::ManualConstructor<grpc_core::ByteStreamCache::CachingByteStream>
send_message_caching_stream;
@@ -442,18 +463,7 @@ done:
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
- recv_initial_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
- elem, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
- on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -462,7 +472,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
+ calld->~call_data();
}
static grpc_mdelem scheme_from_args(const grpc_channel_args* args) {
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 933fe3c77b..9c8c8d9e18 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -39,6 +39,10 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
+static void start_send_message_batch(void* arg, grpc_error* unused);
+static void send_message_on_complete(void* arg, grpc_error* error);
+static void on_send_message_next_done(void* arg, grpc_error* error);
+
namespace {
enum initial_metadata_state {
// Initial metadata not yet seen.
@@ -50,6 +54,23 @@ enum initial_metadata_state {
};
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner,
+ start_send_message_batch, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_slice_buffer_init(&slices);
+ GRPC_CLOSURE_INIT(&send_message_on_complete, ::send_message_on_complete,
+ elem, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&on_send_message_next_done, ::on_send_message_next_done,
+ elem, grpc_schedule_on_exec_ctx);
+ }
+
+ ~call_data() {
+ grpc_slice_buffer_destroy_internal(&slices);
+ GRPC_ERROR_UNREF(cancel_error);
+ }
+
grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
@@ -57,11 +78,12 @@ struct call_data {
grpc_linked_mdelem accept_stream_encoding_storage;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
- grpc_message_compression_algorithm message_compression_algorithm;
- initial_metadata_state send_initial_metadata_state;
- grpc_error* cancel_error;
+ grpc_message_compression_algorithm message_compression_algorithm =
+ GRPC_MESSAGE_COMPRESS_NONE;
+ initial_metadata_state send_initial_metadata_state = INITIAL_METADATA_UNSEEN;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
grpc_closure start_send_message_batch_in_call_combiner;
- grpc_transport_stream_op_batch* send_message_batch;
+ grpc_transport_stream_op_batch* send_message_batch = nullptr;
grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream>
replacement_stream;
@@ -424,16 +446,7 @@ static void compress_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- calld->cancel_error = GRPC_ERROR_NONE;
- grpc_slice_buffer_init(&calld->slices);
- GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
- start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
- on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
- elem, grpc_schedule_on_exec_ctx);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -442,8 +455,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- grpc_slice_buffer_destroy_internal(&calld->slices);
- GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->~call_data();
}
/* Constructor for channel_data */
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 436ea09d94..ce1be8370c 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -35,9 +35,32 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
+static void hs_recv_initial_metadata_ready(void* user_data, grpc_error* err);
+static void hs_recv_trailing_metadata_ready(void* user_data, grpc_error* err);
+static void hs_recv_message_ready(void* user_data, grpc_error* err);
+
namespace {
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+ hs_recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_message_ready, hs_recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ hs_recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ }
+
+ ~call_data() {
+ GRPC_ERROR_UNREF(recv_initial_metadata_ready_error);
+ if (have_read_stream) {
+ read_stream->Orphan();
+ }
+ }
+
grpc_call_combiner* call_combiner;
// Outgoing headers to add to send_initial_metadata.
@@ -47,27 +70,27 @@ struct call_data {
// If we see the recv_message contents in the GET query string, we
// store it here.
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> read_stream;
- bool have_read_stream;
+ bool have_read_stream = false;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
- grpc_error* recv_initial_metadata_ready_error;
+ grpc_error* recv_initial_metadata_ready_error = GRPC_ERROR_NONE;
grpc_closure* original_recv_initial_metadata_ready;
- grpc_metadata_batch* recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata = nullptr;
uint32_t* recv_initial_metadata_flags;
- bool seen_recv_initial_metadata_ready;
+ bool seen_recv_initial_metadata_ready = false;
// State for intercepting recv_message.
grpc_closure* original_recv_message_ready;
grpc_closure recv_message_ready;
grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
- bool seen_recv_message_ready;
+ bool seen_recv_message_ready = false;
// State for intercepting recv_trailing_metadata
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_error* recv_trailing_metadata_ready_error;
- bool seen_recv_trailing_metadata_ready;
+ bool seen_recv_trailing_metadata_ready = false;
};
struct channel_data {
@@ -431,16 +454,7 @@ static void hs_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* hs_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
- hs_recv_initial_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_message_ready, hs_recv_message_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- hs_recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -449,10 +463,7 @@ static void hs_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_ready_error);
- if (calld->have_read_stream) {
- calld->read_stream->Orphan();
- }
+ calld->~call_data();
}
/* Constructor for channel_data */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index 8ac34c629f..6a7231ff7d 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -25,7 +25,6 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/parse_address.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/ext/filters/load_reporting/registered_opencensus_objects.h"
#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
#include "src/core/lib/channel/channel_args.h"
@@ -36,6 +35,7 @@
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h"
+#include "src/core/lib/uri/uri_parser.h"
namespace grpc {
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 2d3b16d992..94d6942aa4 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -90,9 +90,53 @@ RefCountedPtr<MessageSizeLimits> MessageSizeLimits::CreateFromJson(
} // namespace
} // namespace grpc_core
+static void recv_message_ready(void* user_data, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+
namespace {
+struct channel_data {
+ message_size_limits limits;
+ // Maps path names to refcounted_message_size_limits structs.
+ grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
+ grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
+ method_limit_table;
+};
+
struct call_data {
+ call_data(grpc_call_element* elem, const channel_data& chand,
+ const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner), limits(chand.limits) {
+ GRPC_CLOSURE_INIT(&recv_message_ready, ::recv_message_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ // Get max sizes from channel data, then merge in per-method config values.
+ // Note: Per-method config is only available on the client, so we
+ // apply the max request size to the send limit and the max response
+ // size to the receive limit.
+ if (chand.method_limit_table != nullptr) {
+ grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
+ grpc_core::ServiceConfig::MethodConfigTableLookup(
+ *chand.method_limit_table, args.path);
+ if (limits != nullptr) {
+ if (limits->limits().max_send_size >= 0 &&
+ (limits->limits().max_send_size < this->limits.max_send_size ||
+ this->limits.max_send_size < 0)) {
+ this->limits.max_send_size = limits->limits().max_send_size;
+ }
+ if (limits->limits().max_recv_size >= 0 &&
+ (limits->limits().max_recv_size < this->limits.max_recv_size ||
+ this->limits.max_recv_size < 0)) {
+ this->limits.max_recv_size = limits->limits().max_recv_size;
+ }
+ }
+ }
+ }
+
+ ~call_data() { GRPC_ERROR_UNREF(error); }
+
grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
@@ -101,25 +145,17 @@ struct call_data {
grpc_closure recv_message_ready;
grpc_closure recv_trailing_metadata_ready;
// The error caused by a message that is too large, or GRPC_ERROR_NONE
- grpc_error* error;
+ grpc_error* error = GRPC_ERROR_NONE;
// Used by recv_message_ready.
- grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message = nullptr;
// Original recv_message_ready callback, invoked after our own.
- grpc_closure* next_recv_message_ready;
+ grpc_closure* next_recv_message_ready = nullptr;
// Original recv_trailing_metadata callback, invoked after our own.
grpc_closure* original_recv_trailing_metadata_ready;
- bool seen_recv_trailing_metadata;
+ bool seen_recv_trailing_metadata = false;
grpc_error* recv_trailing_metadata_error;
};
-struct channel_data {
- message_size_limits limits;
- // Maps path names to refcounted_message_size_limits structs.
- grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
- grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
- method_limit_table;
-};
-
} // namespace
// Callback invoked when we receive a message. Here we check the max
@@ -228,38 +264,7 @@ static void start_transport_stream_op_batch(
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->call_combiner = args->call_combiner;
- calld->next_recv_message_ready = nullptr;
- calld->original_recv_trailing_metadata_ready = nullptr;
- calld->error = GRPC_ERROR_NONE;
- GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- // Get max sizes from channel data, then merge in per-method config values.
- // Note: Per-method config is only available on the client, so we
- // apply the max request size to the send limit and the max response
- // size to the receive limit.
- calld->limits = chand->limits;
- if (chand->method_limit_table != nullptr) {
- grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
- grpc_core::ServiceConfig::MethodConfigTableLookup(
- *chand->method_limit_table, args->path);
- if (limits != nullptr) {
- if (limits->limits().max_send_size >= 0 &&
- (limits->limits().max_send_size < calld->limits.max_send_size ||
- calld->limits.max_send_size < 0)) {
- calld->limits.max_send_size = limits->limits().max_send_size;
- }
- if (limits->limits().max_recv_size >= 0 &&
- (limits->limits().max_recv_size < calld->limits.max_recv_size ||
- calld->limits.max_recv_size < 0)) {
- calld->limits.max_recv_size = limits->limits().max_recv_size;
- }
- }
- }
+ new (elem->call_data) call_data(elem, *chand, *args);
return GRPC_ERROR_NONE;
}
@@ -268,7 +273,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = (call_data*)elem->call_data;
- GRPC_ERROR_UNREF(calld->error);
+ calld->~call_data();
}
static int default_size(const grpc_channel_args* args,
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.cc b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
index 60a32022f5..42a2e2e896 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.cc
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
@@ -117,8 +117,9 @@ static void on_handshake_done(void* arg, grpc_error* error) {
c->args.interested_parties);
c->result->transport =
grpc_create_chttp2_transport(args->args, args->endpoint, true);
- c->result->socket_uuid =
- grpc_chttp2_transport_get_socket_uuid(c->result->transport);
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node =
+ grpc_chttp2_transport_get_socket_node(c->result->transport);
+ c->result->socket_uuid = socket_node == nullptr ? 0 : socket_node->uuid();
GPR_ASSERT(c->result->transport);
// TODO(roth): We ideally want to wait until we receive HTTP/2
// settings from the server before we consider the connection
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
index 5ce73a95d7..e73eee4353 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
@@ -27,7 +27,6 @@
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
-#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/ext/transport/chttp2/client/chttp2_connector.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/memory.h"
@@ -39,6 +38,7 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/uri/uri_parser.h"
static void client_channel_factory_ref(
grpc_client_channel_factory* cc_factory) {}
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.cc b/src/core/ext/transport/chttp2/server/chttp2_server.cc
index 07b304b320..3d09187b9b 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.cc
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.cc
@@ -37,8 +37,10 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/channel/handshaker_registry.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
@@ -114,9 +116,16 @@ static void on_handshake_done(void* arg, grpc_error* error) {
server_connection_state* connection_state =
static_cast<server_connection_state*>(args->user_data);
gpr_mu_lock(&connection_state->svr_state->mu);
+ grpc_resource_user* resource_user = grpc_server_get_default_resource_user(
+ connection_state->svr_state->server);
if (error != GRPC_ERROR_NONE || connection_state->svr_state->shutdown) {
const char* error_str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str);
+ grpc_resource_user* resource_user = grpc_server_get_default_resource_user(
+ connection_state->svr_state->server);
+ if (resource_user != nullptr) {
+ grpc_resource_user_free(resource_user, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
+ }
if (error == GRPC_ERROR_NONE && args->endpoint != nullptr) {
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
@@ -135,12 +144,12 @@ static void on_handshake_done(void* arg, grpc_error* error) {
// handshaker may have handed off the connection to some external
// code, so we can just clean up here without creating a transport.
if (args->endpoint != nullptr) {
- grpc_transport* transport =
- grpc_create_chttp2_transport(args->args, args->endpoint, false);
+ grpc_transport* transport = grpc_create_chttp2_transport(
+ args->args, args->endpoint, false, resource_user);
grpc_server_setup_transport(
connection_state->svr_state->server, transport,
connection_state->accepting_pollset, args->args,
- grpc_chttp2_transport_get_socket_uuid(transport));
+ grpc_chttp2_transport_get_socket_node(transport), resource_user);
// Use notify_on_receive_settings callback to enforce the
// handshake deadline.
connection_state->transport =
@@ -159,6 +168,11 @@ static void on_handshake_done(void* arg, grpc_error* error) {
connection_state, grpc_schedule_on_exec_ctx);
grpc_timer_init(&connection_state->timer, connection_state->deadline,
&connection_state->on_timeout);
+ } else {
+ if (resource_user != nullptr) {
+ grpc_resource_user_free(resource_user,
+ GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
+ }
}
}
grpc_handshake_manager_pending_list_remove(
@@ -183,6 +197,20 @@ static void on_accept(void* arg, grpc_endpoint* tcp,
gpr_free(acceptor);
return;
}
+ grpc_resource_user* resource_user =
+ grpc_server_get_default_resource_user(state->server);
+ if (resource_user != nullptr &&
+ !grpc_resource_user_safe_alloc(resource_user,
+ GRPC_RESOURCE_QUOTA_CHANNEL_SIZE)) {
+ gpr_log(
+ GPR_ERROR,
+ "Memory quota exhausted, rejecting the connection, no handshaking.");
+ gpr_mu_unlock(&state->mu);
+ grpc_endpoint_shutdown(tcp, GRPC_ERROR_NONE);
+ grpc_endpoint_destroy(tcp);
+ gpr_free(acceptor);
+ return;
+ }
grpc_handshake_manager* handshake_mgr = grpc_handshake_manager_create();
grpc_handshake_manager_pending_list_add(&state->pending_handshake_mgrs,
handshake_mgr);
@@ -338,9 +366,10 @@ grpc_error* grpc_chttp2_server_add_port(grpc_server* server, const char* addr,
grpc_resolved_addresses_destroy(resolved);
arg = grpc_channel_args_find(args, GRPC_ARG_ENABLE_CHANNELZ);
- if (grpc_channel_arg_get_bool(arg, false)) {
+ if (grpc_channel_arg_get_bool(arg, GRPC_ENABLE_CHANNELZ_DEFAULT)) {
state->channelz_listen_socket =
- grpc_core::MakeRefCounted<grpc_core::channelz::ListenSocketNode>();
+ grpc_core::MakeRefCounted<grpc_core::channelz::ListenSocketNode>(
+ grpc_core::UniquePtr<char>(gpr_strdup(addr)));
socket_uuid = state->channelz_listen_socket->uuid();
}
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
index b9024a87e2..c29c1e58cd 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
@@ -61,7 +61,7 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
grpc_endpoint_add_to_pollset(server_endpoint, pollsets[i]);
}
- grpc_server_setup_transport(server, transport, nullptr, server_args, 0);
+ grpc_server_setup_transport(server, transport, nullptr, server_args, nullptr);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 8a481bb7d5..9b6574b612 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015 gRPC authors.
+ * Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,6 +31,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include "src/core/ext/transport/chttp2/transport/context_list.h"
#include "src/core/ext/transport/chttp2/transport/frame_data.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
@@ -53,6 +54,7 @@
#include "src/core/lib/transport/timeout_encoding.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
+#include "src/core/lib/uri/uri_parser.h"
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
@@ -154,84 +156,57 @@ bool g_flow_control_enabled = true;
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/
-static void destruct_transport(grpc_chttp2_transport* t) {
+grpc_chttp2_transport::~grpc_chttp2_transport() {
size_t i;
- if (t->channelz_socket != nullptr) {
- t->channelz_socket.reset();
+ if (channelz_socket != nullptr) {
+ channelz_socket.reset();
}
- grpc_endpoint_destroy(t->ep);
+ grpc_endpoint_destroy(ep);
+
+ grpc_slice_buffer_destroy_internal(&qbuf);
- grpc_slice_buffer_destroy_internal(&t->qbuf);
+ grpc_slice_buffer_destroy_internal(&outbuf);
+ grpc_chttp2_hpack_compressor_destroy(&hpack_compressor);
- grpc_slice_buffer_destroy_internal(&t->outbuf);
- grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor);
+ grpc_core::ContextList::Execute(cl, nullptr, GRPC_ERROR_NONE);
+ cl = nullptr;
- grpc_slice_buffer_destroy_internal(&t->read_buffer);
- grpc_chttp2_hpack_parser_destroy(&t->hpack_parser);
- grpc_chttp2_goaway_parser_destroy(&t->goaway_parser);
+ grpc_slice_buffer_destroy_internal(&read_buffer);
+ grpc_chttp2_hpack_parser_destroy(&hpack_parser);
+ grpc_chttp2_goaway_parser_destroy(&goaway_parser);
for (i = 0; i < STREAM_LIST_COUNT; i++) {
- GPR_ASSERT(t->lists[i].head == nullptr);
- GPR_ASSERT(t->lists[i].tail == nullptr);
+ GPR_ASSERT(lists[i].head == nullptr);
+ GPR_ASSERT(lists[i].tail == nullptr);
}
- GRPC_ERROR_UNREF(t->goaway_error);
+ GRPC_ERROR_UNREF(goaway_error);
- GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0);
+ GPR_ASSERT(grpc_chttp2_stream_map_size(&stream_map) == 0);
- grpc_chttp2_stream_map_destroy(&t->stream_map);
- grpc_connectivity_state_destroy(&t->channel_callback.state_tracker);
+ grpc_chttp2_stream_map_destroy(&stream_map);
+ grpc_connectivity_state_destroy(&channel_callback.state_tracker);
- GRPC_COMBINER_UNREF(t->combiner, "chttp2_transport");
+ GRPC_COMBINER_UNREF(combiner, "chttp2_transport");
- cancel_pings(t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
+ cancel_pings(this,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
- while (t->write_cb_pool) {
- grpc_chttp2_write_cb* next = t->write_cb_pool->next;
- gpr_free(t->write_cb_pool);
- t->write_cb_pool = next;
+ while (write_cb_pool) {
+ grpc_chttp2_write_cb* next = write_cb_pool->next;
+ gpr_free(write_cb_pool);
+ write_cb_pool = next;
}
- t->flow_control.Destroy();
+ flow_control.Destroy();
- GRPC_ERROR_UNREF(t->closed_with_error);
- gpr_free(t->ping_acks);
- gpr_free(t->peer_string);
- gpr_free(t);
-}
-
-#ifndef NDEBUG
-void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason,
- const char* file, int line) {
- if (grpc_trace_chttp2_refcount.enabled()) {
- gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
- gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
- t, val, val - 1, reason, file, line);
- }
- if (!gpr_unref(&t->refs)) return;
- destruct_transport(t);
+ GRPC_ERROR_UNREF(closed_with_error);
+ gpr_free(ping_acks);
+ gpr_free(peer_string);
}
-void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
- const char* file, int line) {
- if (grpc_trace_chttp2_refcount.enabled()) {
- gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
- gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
- t, val, val + 1, reason, file, line);
- }
- gpr_ref(&t->refs);
-}
-#else
-void grpc_chttp2_unref_transport(grpc_chttp2_transport* t) {
- if (!gpr_unref(&t->refs)) return;
- destruct_transport(t);
-}
-
-void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); }
-#endif
-
static const grpc_transport_vtable* get_vtable(void);
/* Returns whether bdp is enabled */
@@ -395,8 +370,12 @@ static bool read_channel_args(grpc_chttp2_transport* t,
}
}
if (channelz_enabled) {
+ // TODO(ncteisen): add an API to endpoint to query for local addr, and pass
+ // it in here, so SocketNode knows its own address.
t->channelz_socket =
- grpc_core::MakeRefCounted<grpc_core::channelz::SocketNode>();
+ grpc_core::MakeRefCounted<grpc_core::channelz::SocketNode>(
+ grpc_core::UniquePtr<char>(),
+ grpc_core::UniquePtr<char>(gpr_strdup(t->peer_string)));
}
return enable_bdp;
}
@@ -476,113 +455,96 @@ static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) {
}
}
-static void init_transport(grpc_chttp2_transport* t,
- const grpc_channel_args* channel_args,
- grpc_endpoint* ep, bool is_client) {
+grpc_chttp2_transport::grpc_chttp2_transport(
+ const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client,
+ grpc_resource_user* resource_user)
+ : refs(1, &grpc_trace_chttp2_refcount),
+ ep(ep),
+ peer_string(grpc_endpoint_get_peer(ep)),
+ resource_user(resource_user),
+ combiner(grpc_combiner_create()),
+ is_client(is_client),
+ next_stream_id(is_client ? 1 : 2),
+ deframe_state(is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0) {
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
-
- t->base.vtable = get_vtable();
- t->ep = ep;
- /* one ref is for destroy */
- gpr_ref_init(&t->refs, 1);
- t->combiner = grpc_combiner_create();
- t->peer_string = grpc_endpoint_get_peer(ep);
- t->endpoint_reading = 1;
- t->next_stream_id = is_client ? 1 : 2;
- t->is_client = is_client;
- t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
- t->is_first_frame = true;
- grpc_connectivity_state_init(
- &t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
- is_client ? "client_transport" : "server_transport");
-
- grpc_slice_buffer_init(&t->qbuf);
- grpc_slice_buffer_init(&t->outbuf);
- grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
-
- init_transport_closures(t);
-
- t->goaway_error = GRPC_ERROR_NONE;
- grpc_chttp2_goaway_parser_init(&t->goaway_parser);
- grpc_chttp2_hpack_parser_init(&t->hpack_parser);
-
- grpc_slice_buffer_init(&t->read_buffer);
-
+ base.vtable = get_vtable();
/* 8 is a random stab in the dark as to a good initial size: it's small enough
that it shouldn't waste memory for infrequently used connections, yet
large enough that the exponential growth should happen nicely when it's
needed.
TODO(ctiller): tune this */
- grpc_chttp2_stream_map_init(&t->stream_map, 8);
+ grpc_chttp2_stream_map_init(&stream_map, 8);
+ grpc_slice_buffer_init(&read_buffer);
+ grpc_connectivity_state_init(
+ &channel_callback.state_tracker, GRPC_CHANNEL_READY,
+ is_client ? "client_transport" : "server_transport");
+ grpc_slice_buffer_init(&outbuf);
+ if (is_client) {
+ grpc_slice_buffer_add(&outbuf, grpc_slice_from_copied_string(
+ GRPC_CHTTP2_CLIENT_CONNECT_STRING));
+ }
+ grpc_chttp2_hpack_compressor_init(&hpack_compressor);
+ grpc_slice_buffer_init(&qbuf);
/* copy in initial settings to all setting sets */
size_t i;
int j;
for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
- t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
+ settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
}
}
- t->dirtied_local_settings = 1;
- /* Hack: it's common for implementations to assume 65536 bytes initial send
- window -- this should by rights be 0 */
- t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
- t->sent_local_settings = 0;
- t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
+ grpc_chttp2_hpack_parser_init(&hpack_parser);
+ grpc_chttp2_goaway_parser_init(&goaway_parser);
- if (is_client) {
- grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
- GRPC_CHTTP2_CLIENT_CONNECT_STRING));
- }
+ init_transport_closures(this);
/* configure http2 the way we like it */
if (is_client) {
- queue_setting_update(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
- queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
+ queue_setting_update(this, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
+ queue_setting_update(this, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
- queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ queue_setting_update(this, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
- queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
- 1);
+ queue_setting_update(this,
+ GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
- configure_transport_ping_policy(t);
- init_transport_keepalive_settings(t);
-
- t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
+ configure_transport_ping_policy(this);
+ init_transport_keepalive_settings(this);
bool enable_bdp = true;
if (channel_args) {
- enable_bdp = read_channel_args(t, channel_args, is_client);
+ enable_bdp = read_channel_args(this, channel_args, is_client);
}
if (g_flow_control_enabled) {
- t->flow_control.Init<grpc_core::chttp2::TransportFlowControl>(t,
- enable_bdp);
+ flow_control.Init<grpc_core::chttp2::TransportFlowControl>(this,
+ enable_bdp);
} else {
- t->flow_control.Init<grpc_core::chttp2::TransportFlowControlDisabled>(t);
+ flow_control.Init<grpc_core::chttp2::TransportFlowControlDisabled>(this);
enable_bdp = false;
}
/* No pings allowed before receiving a header or data frame. */
- t->ping_state.pings_before_data_required = 0;
- t->ping_state.is_delayed_ping_timer_set = false;
- t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
+ ping_state.pings_before_data_required = 0;
+ ping_state.is_delayed_ping_timer_set = false;
+ ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
- t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
- t->ping_recv_state.ping_strikes = 0;
+ ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
+ ping_recv_state.ping_strikes = 0;
- init_keepalive_pings_if_enabled(t);
+ init_keepalive_pings_if_enabled(this);
if (enable_bdp) {
- GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
- schedule_bdp_ping_locked(t);
- grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
+ GRPC_CHTTP2_REF_TRANSPORT(this, "bdp_ping");
+ schedule_bdp_ping_locked(this);
+ grpc_chttp2_act_on_flowctl_action(flow_control->PeriodicUpdate(), this,
nullptr);
}
- grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
- post_benign_reclaimer(t);
+ grpc_chttp2_initiate_write(this, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
+ post_benign_reclaimer(this);
}
static void destroy_transport_locked(void* tp, grpc_error* error) {
@@ -592,6 +554,7 @@ static void destroy_transport_locked(void* tp, grpc_error* error) {
t, grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+ // Must be the last line.
GRPC_CHTTP2_UNREF_TRANSPORT(t, "destroy");
}
@@ -676,111 +639,108 @@ void grpc_chttp2_stream_unref(grpc_chttp2_stream* s) {
}
#endif
-static int init_stream(grpc_transport* gt, grpc_stream* gs,
- grpc_stream_refcount* refcount, const void* server_data,
- gpr_arena* arena) {
- GPR_TIMER_SCOPE("init_stream", 0);
- grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
- grpc_chttp2_stream* s = reinterpret_cast<grpc_chttp2_stream*>(gs);
-
- s->t = t;
- s->refcount = refcount;
+grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
+ grpc_stream_refcount* refcount,
+ const void* server_data,
+ gpr_arena* arena)
+ : t(t), refcount(refcount), metadata_buffer{{arena}, {arena}} {
/* We reserve one 'active stream' that's dropped when the stream is
read-closed. The others are for Chttp2IncomingByteStreams that are
actively reading */
- GRPC_CHTTP2_STREAM_REF(s, "chttp2");
-
- grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0], arena);
- grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
- grpc_chttp2_data_parser_init(&s->data_parser);
- grpc_slice_buffer_init(&s->flow_controlled_buffer);
- s->deadline = GRPC_MILLIS_INF_FUTURE;
- GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
- grpc_schedule_on_exec_ctx);
- grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
- s->unprocessed_incoming_frames_buffer_cached_length = 0;
- grpc_slice_buffer_init(&s->frame_storage);
- grpc_slice_buffer_init(&s->compressed_data_buffer);
- grpc_slice_buffer_init(&s->decompressed_data_buffer);
- s->pending_byte_stream = false;
- s->decompressed_header_bytes = 0;
- GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
- grpc_combiner_scheduler(t->combiner));
-
+ GRPC_CHTTP2_STREAM_REF(this, "chttp2");
GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
if (server_data) {
- s->id = static_cast<uint32_t>((uintptr_t)server_data);
- *t->accepting_stream = s;
- grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ id = static_cast<uint32_t>((uintptr_t)server_data);
+ *t->accepting_stream = this;
+ grpc_chttp2_stream_map_add(&t->stream_map, id, this);
post_destructive_reclaimer(t);
}
-
if (t->flow_control->flow_control_enabled()) {
- s->flow_control.Init<grpc_core::chttp2::StreamFlowControl>(
+ flow_control.Init<grpc_core::chttp2::StreamFlowControl>(
static_cast<grpc_core::chttp2::TransportFlowControl*>(
t->flow_control.get()),
- s);
+ this);
} else {
- s->flow_control.Init<grpc_core::chttp2::StreamFlowControlDisabled>();
+ flow_control.Init<grpc_core::chttp2::StreamFlowControlDisabled>();
}
- return 0;
-}
+ grpc_slice_buffer_init(&frame_storage);
+ grpc_slice_buffer_init(&unprocessed_incoming_frames_buffer);
+ grpc_slice_buffer_init(&flow_controlled_buffer);
+ grpc_slice_buffer_init(&compressed_data_buffer);
+ grpc_slice_buffer_init(&decompressed_data_buffer);
-static void destroy_stream_locked(void* sp, grpc_error* error) {
- GPR_TIMER_SCOPE("destroy_stream", 0);
- grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
- grpc_chttp2_transport* t = s->t;
+ GRPC_CLOSURE_INIT(&complete_fetch_locked, ::complete_fetch_locked, this,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&reset_byte_stream, ::reset_byte_stream, this,
+ grpc_combiner_scheduler(t->combiner));
+}
+grpc_chttp2_stream::~grpc_chttp2_stream() {
if (t->channelz_socket != nullptr) {
- if ((t->is_client && s->eos_received) || (!t->is_client && s->eos_sent)) {
+ if ((t->is_client && eos_received) || (!t->is_client && eos_sent)) {
t->channelz_socket->RecordStreamSucceeded();
} else {
t->channelz_socket->RecordStreamFailed();
}
}
- GPR_ASSERT((s->write_closed && s->read_closed) || s->id == 0);
- if (s->id != 0) {
- GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == nullptr);
+ GPR_ASSERT((write_closed && read_closed) || id == 0);
+ if (id != 0) {
+ GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, id) == nullptr);
}
- grpc_slice_buffer_destroy_internal(&s->unprocessed_incoming_frames_buffer);
- grpc_slice_buffer_destroy_internal(&s->frame_storage);
- grpc_slice_buffer_destroy_internal(&s->compressed_data_buffer);
- grpc_slice_buffer_destroy_internal(&s->decompressed_data_buffer);
+ grpc_slice_buffer_destroy_internal(&unprocessed_incoming_frames_buffer);
+ grpc_slice_buffer_destroy_internal(&frame_storage);
+ grpc_slice_buffer_destroy_internal(&compressed_data_buffer);
+ grpc_slice_buffer_destroy_internal(&decompressed_data_buffer);
- grpc_chttp2_list_remove_stalled_by_transport(t, s);
- grpc_chttp2_list_remove_stalled_by_stream(t, s);
+ grpc_chttp2_list_remove_stalled_by_transport(t, this);
+ grpc_chttp2_list_remove_stalled_by_stream(t, this);
for (int i = 0; i < STREAM_LIST_COUNT; i++) {
- if (GPR_UNLIKELY(s->included[i])) {
+ if (GPR_UNLIKELY(included[i])) {
gpr_log(GPR_ERROR, "%s stream %d still included in list %d",
- t->is_client ? "client" : "server", s->id, i);
+ t->is_client ? "client" : "server", id, i);
abort();
}
}
- GPR_ASSERT(s->send_initial_metadata_finished == nullptr);
- GPR_ASSERT(s->fetching_send_message == nullptr);
- GPR_ASSERT(s->send_trailing_metadata_finished == nullptr);
- GPR_ASSERT(s->recv_initial_metadata_ready == nullptr);
- GPR_ASSERT(s->recv_message_ready == nullptr);
- GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr);
- grpc_chttp2_data_parser_destroy(&s->data_parser);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]);
- grpc_slice_buffer_destroy_internal(&s->flow_controlled_buffer);
- GRPC_ERROR_UNREF(s->read_closed_error);
- GRPC_ERROR_UNREF(s->write_closed_error);
- GRPC_ERROR_UNREF(s->byte_stream_error);
+ GPR_ASSERT(send_initial_metadata_finished == nullptr);
+ GPR_ASSERT(fetching_send_message == nullptr);
+ GPR_ASSERT(send_trailing_metadata_finished == nullptr);
+ GPR_ASSERT(recv_initial_metadata_ready == nullptr);
+ GPR_ASSERT(recv_message_ready == nullptr);
+ GPR_ASSERT(recv_trailing_metadata_finished == nullptr);
+ grpc_slice_buffer_destroy_internal(&flow_controlled_buffer);
+ GRPC_ERROR_UNREF(read_closed_error);
+ GRPC_ERROR_UNREF(write_closed_error);
+ GRPC_ERROR_UNREF(byte_stream_error);
- s->flow_control.Destroy();
+ flow_control.Destroy();
+
+ if (t->resource_user != nullptr) {
+ grpc_resource_user_free(t->resource_user, GRPC_RESOURCE_QUOTA_CALL_SIZE);
+ }
GRPC_CHTTP2_UNREF_TRANSPORT(t, "stream");
+ GRPC_CLOSURE_SCHED(destroy_stream_arg, GRPC_ERROR_NONE);
+}
+
+static int init_stream(grpc_transport* gt, grpc_stream* gs,
+ grpc_stream_refcount* refcount, const void* server_data,
+ gpr_arena* arena) {
+ GPR_TIMER_SCOPE("init_stream", 0);
+ grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
+ new (gs) grpc_chttp2_stream(t, refcount, server_data, arena);
+ return 0;
+}
- GRPC_CLOSURE_SCHED(s->destroy_stream_arg, GRPC_ERROR_NONE);
+static void destroy_stream_locked(void* sp, grpc_error* error) {
+ GPR_TIMER_SCOPE("destroy_stream", 0);
+ grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
+ s->~grpc_chttp2_stream();
}
static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
@@ -816,7 +776,21 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
if (t->channel_callback.accept_stream == nullptr) {
return nullptr;
}
- grpc_chttp2_stream* accepting;
+ // Don't accept the stream if memory quota doesn't allow. Note that we should
+ // simply refuse the stream here instead of canceling the stream after it's
+ // accepted since the latter will create the call which costs much memory.
+ if (t->resource_user != nullptr &&
+ !grpc_resource_user_safe_alloc(t->resource_user,
+ GRPC_RESOURCE_QUOTA_CALL_SIZE)) {
+ gpr_log(GPR_ERROR, "Memory exhausted, rejecting the stream.");
+ grpc_slice_buffer_add(
+ &t->qbuf,
+ grpc_chttp2_rst_stream_create(
+ id, static_cast<uint32_t>(GRPC_HTTP2_REFUSED_STREAM), nullptr));
+ grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
+ return nullptr;
+ }
+ grpc_chttp2_stream* accepting = nullptr;
GPR_ASSERT(t->accepting_stream == nullptr);
t->accepting_stream = &accepting;
t->channel_callback.accept_stream(t->channel_callback.accept_stream_user_data,
@@ -1063,11 +1037,13 @@ static void write_action_begin_locked(void* gt, grpc_error* error_ignored) {
static void write_action(void* gt, grpc_error* error) {
GPR_TIMER_SCOPE("write_action", 0);
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(gt);
+ void* cl = t->cl;
+ t->cl = nullptr;
grpc_endpoint_write(
t->ep, &t->outbuf,
GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end_locked, t,
grpc_combiner_scheduler(t->combiner)),
- nullptr);
+ cl);
}
/* Callback from the grpc_endpoint after bytes have been written by calling
@@ -1391,6 +1367,8 @@ static void perform_stream_op_locked(void* stream_op,
GRPC_STATS_INC_HTTP2_OP_BATCHES();
+ s->context = op->payload->context;
+ s->traced = op->is_traced;
if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_INFO, "perform_stream_op_locked: %s; on_complete = %p", str,
@@ -1701,8 +1679,8 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
gpr_free(str);
}
- op->handler_private.extra_arg = gs;
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
+ op->handler_private.extra_arg = gs;
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked,
op, grpc_combiner_scheduler(t->combiner)),
@@ -2708,6 +2686,7 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) {
grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
+ grpc_timer_init_unset(&t->keepalive_watchdog_timer);
send_keepalive_ping_locked(t);
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
} else {
@@ -2834,8 +2813,8 @@ Chttp2IncomingByteStream::Chttp2IncomingByteStream(
: ByteStream(frame_size, flags),
transport_(transport),
stream_(stream),
+ refs_(2),
remaining_bytes_(frame_size) {
- gpr_ref_init(&refs_, 2);
GRPC_ERROR_UNREF(stream->byte_stream_error);
stream->byte_stream_error = GRPC_ERROR_NONE;
}
@@ -2860,14 +2839,6 @@ void Chttp2IncomingByteStream::Orphan() {
GRPC_ERROR_NONE);
}
-void Chttp2IncomingByteStream::Unref() {
- if (gpr_unref(&refs_)) {
- Delete(this);
- }
-}
-
-void Chttp2IncomingByteStream::Ref() { gpr_ref(&refs_); }
-
void Chttp2IncomingByteStream::NextLocked(void* arg,
grpc_error* error_ignored) {
Chttp2IncomingByteStream* bs = static_cast<Chttp2IncomingByteStream*>(arg);
@@ -3174,21 +3145,18 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
static const grpc_transport_vtable* get_vtable(void) { return &vtable; }
-intptr_t grpc_chttp2_transport_get_socket_uuid(grpc_transport* transport) {
+grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
+grpc_chttp2_transport_get_socket_node(grpc_transport* transport) {
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(transport);
- if (t->channelz_socket != nullptr) {
- return t->channelz_socket->uuid();
- } else {
- return 0;
- }
+ return t->channelz_socket;
}
grpc_transport* grpc_create_chttp2_transport(
- const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client) {
- grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(
- gpr_zalloc(sizeof(grpc_chttp2_transport)));
- init_transport(t, channel_args, ep, is_client);
+ const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client,
+ grpc_resource_user* resource_user) {
+ auto t = grpc_core::New<grpc_chttp2_transport>(channel_args, ep, is_client,
+ resource_user);
return &t->base;
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index e5872fee43..c22cfb0ad7 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
+#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/transport/transport.h"
@@ -32,9 +33,11 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount;
extern bool g_flow_control_enabled;
grpc_transport* grpc_create_chttp2_transport(
- const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client);
+ const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client,
+ grpc_resource_user* resource_user = nullptr);
-intptr_t grpc_chttp2_transport_get_socket_uuid(grpc_transport* transport);
+grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
+grpc_chttp2_transport_get_socket_node(grpc_transport* transport);
/// Takes ownership of \a read_buffer, which (if non-NULL) contains
/// leftover bytes previously read from the endpoint (e.g., by handshakers).
diff --git a/src/core/ext/transport/chttp2/transport/context_list.cc b/src/core/ext/transport/chttp2/transport/context_list.cc
new file mode 100644
index 0000000000..f30d41c332
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/context_list.cc
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/ext/transport/chttp2/transport/context_list.h"
+
+namespace {
+void (*write_timestamps_callback_g)(void*, grpc_core::Timestamps*) = nullptr;
+}
+
+namespace grpc_core {
+void ContextList::Execute(void* arg, grpc_core::Timestamps* ts,
+ grpc_error* error) {
+ ContextList* head = static_cast<ContextList*>(arg);
+ ContextList* to_be_freed;
+ while (head != nullptr) {
+ if (error == GRPC_ERROR_NONE && ts != nullptr) {
+ if (write_timestamps_callback_g) {
+ ts->byte_offset = static_cast<uint32_t>(head->byte_offset_);
+ write_timestamps_callback_g(head->s_->context, ts);
+ }
+ }
+ GRPC_CHTTP2_STREAM_UNREF(static_cast<grpc_chttp2_stream*>(head->s_),
+ "timestamp");
+ to_be_freed = head;
+ head = head->next_;
+ grpc_core::Delete(to_be_freed);
+ }
+}
+
+void grpc_http2_set_write_timestamps_callback(
+ void (*fn)(void*, grpc_core::Timestamps*)) {
+ write_timestamps_callback_g = fn;
+}
+} /* namespace grpc_core */
diff --git a/src/core/ext/transport/chttp2/transport/context_list.h b/src/core/ext/transport/chttp2/transport/context_list.h
new file mode 100644
index 0000000000..d870107749
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/context_list.h
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CONTEXT_LIST_H
+#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CONTEXT_LIST_H
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/buffer_list.h"
+
+#include "src/core/ext/transport/chttp2/transport/internal.h"
+
+namespace grpc_core {
+/** A list of RPC Contexts */
+class ContextList {
+ public:
+ /* Creates a new element with \a context as the value and appends it to the
+ * list. */
+ static void Append(ContextList** head, grpc_chttp2_stream* s) {
+ /* Make sure context is not already present */
+ GRPC_CHTTP2_STREAM_REF(s, "timestamp");
+
+#ifndef NDEBUG
+ ContextList* ptr = *head;
+ while (ptr != nullptr) {
+ if (ptr->s_ == s) {
+ GPR_ASSERT(
+ false &&
+ "Trying to append a stream that is already present in the list");
+ }
+ ptr = ptr->next_;
+ }
+#endif
+
+ /* Create a new element in the list and add it at the front */
+ ContextList* elem = grpc_core::New<ContextList>();
+ elem->s_ = s;
+ elem->byte_offset_ = s->byte_counter;
+ elem->next_ = *head;
+ *head = elem;
+ }
+
+ /* Executes a function \a fn with each context in the list and \a ts. It also
+ * frees up the entire list after this operation. */
+ static void Execute(void* arg, grpc_core::Timestamps* ts, grpc_error* error);
+
+ private:
+ grpc_chttp2_stream* s_ = nullptr;
+ ContextList* next_ = nullptr;
+ size_t byte_offset_ = 0;
+};
+
+void grpc_http2_set_write_timestamps_callback(
+ void (*fn)(void*, grpc_core::Timestamps*));
+} /* namespace grpc_core */
+
+#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CONTEXT_LIST_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.cc b/src/core/ext/transport/chttp2/transport/frame_data.cc
index 933b32c03c..1de00735cf 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_data.cc
@@ -32,18 +32,12 @@
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/transport.h"
-grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser) {
- parser->state = GRPC_CHTTP2_DATA_FH_0;
- parser->parsing_frame = nullptr;
- return GRPC_ERROR_NONE;
-}
-
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser) {
- if (parser->parsing_frame != nullptr) {
- GRPC_ERROR_UNREF(parser->parsing_frame->Finished(
+grpc_chttp2_data_parser::~grpc_chttp2_data_parser() {
+ if (parsing_frame != nullptr) {
+ GRPC_ERROR_UNREF(parsing_frame->Finished(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Parser destroyed"), false));
}
- GRPC_ERROR_UNREF(parser->error);
+ GRPC_ERROR_UNREF(error);
}
grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index e5d01f764e..2c5da99fa6 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -43,20 +43,18 @@ namespace grpc_core {
class Chttp2IncomingByteStream;
} // namespace grpc_core
-typedef struct {
- grpc_chttp2_stream_state state;
- uint8_t frame_type;
- uint32_t frame_size;
- grpc_error* error;
+struct grpc_chttp2_data_parser {
+ grpc_chttp2_data_parser() = default;
+ ~grpc_chttp2_data_parser();
- bool is_frame_compressed;
- grpc_core::Chttp2IncomingByteStream* parsing_frame;
-} grpc_chttp2_data_parser;
+ grpc_chttp2_stream_state state = GRPC_CHTTP2_DATA_FH_0;
+ uint8_t frame_type = 0;
+ uint32_t frame_size = 0;
+ grpc_error* error = GRPC_ERROR_NONE;
-/* initialize per-stream state for data frame parsing */
-grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser);
-
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser);
+ bool is_frame_compressed = false;
+ grpc_core::Chttp2IncomingByteStream* parsing_frame = nullptr;
+};
/* start processing a new data frame */
grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
index 4bdd4309a4..a0a7534594 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
@@ -32,7 +32,7 @@ grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats* stats) {
static const size_t frame_size = 13;
grpc_slice slice = GRPC_SLICE_MALLOC(frame_size);
- stats->framing_bytes += frame_size;
+ if (stats != nullptr) stats->framing_bytes += frame_size;
uint8_t* p = GRPC_SLICE_START_PTR(slice);
// Frame size.
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
index 920d52770f..dbe9df6ae3 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
@@ -212,10 +212,6 @@ static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c,
return new_index;
}
-/* dummy function */
-static void add_nothing(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
- size_t elem_size) {}
-
// Add a key to the dynamic table. Both key and value will be added to table at
// the decoder.
static void add_key_with_index(grpc_chttp2_hpack_compressor* c,
@@ -524,17 +520,22 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
uint32_t indices_key;
/* should this elem be in the table? */
- size_t decoder_space_usage =
+ const size_t decoder_space_usage =
grpc_chttp2_get_size_in_hpack_table(elem, st->use_true_binary_metadata);
- bool should_add_elem = elem_interned &&
- decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
- c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
- c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
- void (*maybe_add)(grpc_chttp2_hpack_compressor*, grpc_mdelem, size_t) =
- should_add_elem ? add_elem : add_nothing;
- void (*emit)(grpc_chttp2_hpack_compressor*, uint32_t, grpc_mdelem,
- framer_state*) =
- should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx;
+ const bool should_add_elem = elem_interned &&
+ decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
+ c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
+ c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
+
+ auto emit_maybe_add = [&should_add_elem, &elem, &st, &c, &indices_key,
+ &decoder_space_usage] {
+ if (should_add_elem) {
+ emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
+ add_elem(c, elem, decoder_space_usage);
+ } else {
+ emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
+ }
+ };
/* no hits for the elem... maybe there's a key? */
indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)];
@@ -542,8 +543,7 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
- emit(c, dynidx(c, indices_key), elem, st);
- maybe_add(c, elem, decoder_space_usage);
+ emit_maybe_add();
return;
}
@@ -552,20 +552,23 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
- emit(c, dynidx(c, indices_key), elem, st);
- maybe_add(c, elem, decoder_space_usage);
+ emit_maybe_add();
return;
}
/* no elem, key in the table... fall back to literal emission */
- bool should_add_key =
+ const bool should_add_key =
!elem_interned && decoder_space_usage < MAX_DECODER_SPACE_USAGE;
- emit = (should_add_elem || should_add_key) ? emit_lithdr_incidx_v
- : emit_lithdr_noidx_v;
- maybe_add =
- should_add_elem ? add_elem : (should_add_key ? add_key : add_nothing);
- emit(c, 0, elem, st);
- maybe_add(c, elem, decoder_space_usage);
+ if (should_add_elem || should_add_key) {
+ emit_lithdr_incidx_v(c, 0, elem, st);
+ } else {
+ emit_lithdr_noidx_v(c, 0, elem, st);
+ }
+ if (should_add_elem) {
+ add_elem(c, elem, decoder_space_usage);
+ } else if (should_add_key) {
+ add_key(c, elem, decoder_space_usage);
+ }
}
#define STRLEN_LIT(x) (sizeof(x) - 1)
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index 4d7dfd900f..dca15e7680 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -27,18 +27,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena) {
- buffer->arena = arena;
- grpc_metadata_batch_init(&buffer->batch);
- buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE;
-}
-
-void grpc_chttp2_incoming_metadata_buffer_destroy(
- grpc_chttp2_incoming_metadata_buffer* buffer) {
- grpc_metadata_batch_destroy(&buffer->batch);
-}
-
grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index d029cf00d4..c551b3cc8b 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -23,17 +23,20 @@
#include "src/core/lib/transport/transport.h"
-typedef struct {
+struct grpc_chttp2_incoming_metadata_buffer {
+ grpc_chttp2_incoming_metadata_buffer(gpr_arena* arena) : arena(arena) {
+ grpc_metadata_batch_init(&batch);
+ batch.deadline = GRPC_MILLIS_INF_FUTURE;
+ }
+ ~grpc_chttp2_incoming_metadata_buffer() {
+ grpc_metadata_batch_destroy(&batch);
+ }
+
gpr_arena* arena;
grpc_metadata_batch batch;
- size_t size; // total size of metadata
-} grpc_chttp2_incoming_metadata_buffer;
-
-/** assumes everything initially zeroed */
-void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena);
-void grpc_chttp2_incoming_metadata_buffer_destroy(
- grpc_chttp2_incoming_metadata_buffer* buffer);
+ size_t size = 0; // total size of metadata
+};
+
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index ff26dd9255..341f5b3977 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -45,6 +45,10 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
+namespace grpc_core {
+class ContextList;
+}
+
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
@@ -103,8 +107,8 @@ const char* grpc_chttp2_initiate_write_reason_string(
grpc_chttp2_initiate_write_reason reason);
typedef struct {
- grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT];
- uint64_t inflight_id;
+ grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT] = {};
+ uint64_t inflight_id = 0;
} grpc_chttp2_ping_queue;
typedef struct {
@@ -232,8 +236,12 @@ class Chttp2IncomingByteStream : public ByteStream {
// alone for now. We can revisit this once we're able to link against
// libc++, at which point we can eliminate New<> and Delete<> and
// switch to std::shared_ptr<>.
- void Ref();
- void Unref();
+ void Ref() { refs_.Ref(); }
+ void Unref() {
+ if (refs_.Unref()) {
+ grpc_core::Delete(this);
+ }
+ }
void PublishError(grpc_error* error);
@@ -252,7 +260,7 @@ class Chttp2IncomingByteStream : public ByteStream {
grpc_chttp2_transport* transport_; // Immutable.
grpc_chttp2_stream* stream_; // Immutable.
- gpr_refcount refs_;
+ grpc_core::RefCount refs_;
/* Accessed only by transport thread when stream->pending_byte_stream == false
* Accessed only by application thread when stream->pending_byte_stream ==
@@ -280,34 +288,41 @@ typedef enum {
} grpc_chttp2_keepalive_state;
struct grpc_chttp2_transport {
+ grpc_chttp2_transport(const grpc_channel_args* channel_args,
+ grpc_endpoint* ep, bool is_client,
+ grpc_resource_user* resource_user);
+ ~grpc_chttp2_transport();
+
grpc_transport base; /* must be first */
- gpr_refcount refs;
+ grpc_core::RefCount refs;
grpc_endpoint* ep;
char* peer_string;
+ grpc_resource_user* resource_user;
+
grpc_combiner* combiner;
- grpc_closure* notify_on_receive_settings;
+ grpc_closure* notify_on_receive_settings = nullptr;
/** write execution state of the transport */
- grpc_chttp2_write_state write_state;
+ grpc_chttp2_write_state write_state = GRPC_CHTTP2_WRITE_STATE_IDLE;
/** is this the first write in a series of writes?
set when we initiate writing from idle, cleared when we
initiate writing from writing+more */
- bool is_first_write_in_batch;
+ bool is_first_write_in_batch = false;
/** is the transport destroying itself? */
- uint8_t destroying;
+ uint8_t destroying = false;
/** has the upper layer closed the transport? */
- grpc_error* closed_with_error;
+ grpc_error* closed_with_error = GRPC_ERROR_NONE;
/** is there a read request to the endpoint outstanding? */
- uint8_t endpoint_reading;
+ uint8_t endpoint_reading = 1;
- grpc_chttp2_optimization_target opt_target;
+ grpc_chttp2_optimization_target opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
/** various lists of streams */
- grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
+ grpc_chttp2_stream_list lists[STREAM_LIST_COUNT] = {};
/** maps stream id to grpc_chttp2_stream objects */
grpc_chttp2_stream_map stream_map;
@@ -324,7 +339,7 @@ struct grpc_chttp2_transport {
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
- grpc_chttp2_stream** accepting_stream;
+ grpc_chttp2_stream** accepting_stream = nullptr;
struct {
/* accept stream callback */
@@ -348,41 +363,43 @@ struct grpc_chttp2_transport {
/** how much data are we willing to buffer when the WRITE_BUFFER_HINT is set?
*/
- uint32_t write_buffer_size;
+ uint32_t write_buffer_size = grpc_core::chttp2::kDefaultWindow;
/** Set to a grpc_error object if a goaway frame is received. By default, set
* to GRPC_ERROR_NONE */
- grpc_error* goaway_error;
+ grpc_error* goaway_error = GRPC_ERROR_NONE;
- grpc_chttp2_sent_goaway_state sent_goaway_state;
+ grpc_chttp2_sent_goaway_state sent_goaway_state = GRPC_CHTTP2_NO_GOAWAY_SEND;
/** are the local settings dirty and need to be sent? */
- bool dirtied_local_settings;
+ bool dirtied_local_settings = true;
/** have local settings been sent? */
- bool sent_local_settings;
- /** bitmask of setting indexes to send out */
- uint32_t force_send_settings;
+ bool sent_local_settings = false;
+ /** bitmask of setting indexes to send out
+ Hack: it's common for implementations to assume 65536 bytes initial send
+ window -- this should by rights be 0 */
+ uint32_t force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
/** settings values */
uint32_t settings[GRPC_NUM_SETTING_SETS][GRPC_CHTTP2_NUM_SETTINGS];
/** what is the next stream id to be allocated by this peer?
copied to next_stream_id in parsing when parsing commences */
- uint32_t next_stream_id;
+ uint32_t next_stream_id = 0;
/** last new stream id */
- uint32_t last_new_stream_id;
+ uint32_t last_new_stream_id = 0;
/** ping queues for various ping insertion points */
- grpc_chttp2_ping_queue ping_queue;
+ grpc_chttp2_ping_queue ping_queue = grpc_chttp2_ping_queue();
grpc_chttp2_repeated_ping_policy ping_policy;
grpc_chttp2_repeated_ping_state ping_state;
- uint64_t ping_ctr; /* unique id for pings */
+ uint64_t ping_ctr = 0; /* unique id for pings */
grpc_closure retry_initiate_ping_locked;
/** ping acks */
- size_t ping_ack_count;
- size_t ping_ack_capacity;
- uint64_t* ping_acks;
+ size_t ping_ack_count = 0;
+ size_t ping_ack_capacity = 0;
+ uint64_t* ping_acks = nullptr;
grpc_chttp2_server_ping_recv_state ping_recv_state;
/** parser for headers */
@@ -408,22 +425,22 @@ struct grpc_chttp2_transport {
int64_t initial_window_update = 0;
/* deframing */
- grpc_chttp2_deframe_transport_state deframe_state;
- uint8_t incoming_frame_type;
- uint8_t incoming_frame_flags;
- uint8_t header_eof;
- bool is_first_frame;
- uint32_t expect_continuation_stream_id;
- uint32_t incoming_frame_size;
- uint32_t incoming_stream_id;
+ grpc_chttp2_deframe_transport_state deframe_state = GRPC_DTS_CLIENT_PREFIX_0;
+ uint8_t incoming_frame_type = 0;
+ uint8_t incoming_frame_flags = 0;
+ uint8_t header_eof = 0;
+ bool is_first_frame = true;
+ uint32_t expect_continuation_stream_id = 0;
+ uint32_t incoming_frame_size = 0;
+ uint32_t incoming_stream_id = 0;
/* active parser */
- void* parser_data;
- grpc_chttp2_stream* incoming_stream;
+ void* parser_data = nullptr;
+ grpc_chttp2_stream* incoming_stream = nullptr;
grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t,
grpc_chttp2_stream* s, grpc_slice slice, int is_last);
- grpc_chttp2_write_cb* write_cb_pool;
+ grpc_chttp2_write_cb* write_cb_pool = nullptr;
/* bdp estimator */
grpc_closure next_bdp_ping_timer_expired_locked;
@@ -432,23 +449,23 @@ struct grpc_chttp2_transport {
/* if non-NULL, close the transport with this error when writes are finished
*/
- grpc_error* close_transport_on_writes_finished;
+ grpc_error* close_transport_on_writes_finished = GRPC_ERROR_NONE;
/* a list of closures to run after writes are finished */
- grpc_closure_list run_after_write;
+ grpc_closure_list run_after_write = GRPC_CLOSURE_LIST_INIT;
/* buffer pool state */
/** have we scheduled a benign cleanup? */
- bool benign_reclaimer_registered;
+ bool benign_reclaimer_registered = false;
/** have we scheduled a destructive cleanup? */
- bool destructive_reclaimer_registered;
+ bool destructive_reclaimer_registered = false;
/** benign cleanup closure */
grpc_closure benign_reclaimer_locked;
/** destructive cleanup closure */
grpc_closure destructive_reclaimer_locked;
/* next bdp ping timer */
- bool have_next_bdp_ping_timer;
+ bool have_next_bdp_ping_timer = false;
grpc_timer next_bdp_ping_timer;
/* keep-alive ping support */
@@ -469,12 +486,12 @@ struct grpc_chttp2_transport {
/** grace period for a ping to complete before watchdog kicks in */
grpc_millis keepalive_timeout;
/** if keepalive pings are allowed when there's no outstanding streams */
- bool keepalive_permit_without_calls;
+ bool keepalive_permit_without_calls = false;
/** keep-alive state machine state */
grpc_chttp2_keepalive_state keepalive_state;
-
+ grpc_core::ContextList* cl = nullptr;
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> channelz_socket;
- uint32_t num_messages_in_next_write;
+ uint32_t num_messages_in_next_write = 0;
};
typedef enum {
@@ -485,6 +502,11 @@ typedef enum {
} grpc_published_metadata_method;
struct grpc_chttp2_stream {
+ grpc_chttp2_stream(grpc_chttp2_transport* t, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena);
+ ~grpc_chttp2_stream();
+
+ void* context;
grpc_chttp2_transport* t;
grpc_stream_refcount* refcount;
@@ -492,63 +514,63 @@ struct grpc_chttp2_stream {
grpc_closure* destroy_stream_arg;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
- uint8_t included[STREAM_LIST_COUNT];
+ uint8_t included[STREAM_LIST_COUNT] = {};
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
- uint32_t id;
+ uint32_t id = 0;
/** things the upper layers would like to send */
- grpc_metadata_batch* send_initial_metadata;
- grpc_closure* send_initial_metadata_finished;
- grpc_metadata_batch* send_trailing_metadata;
- grpc_closure* send_trailing_metadata_finished;
+ grpc_metadata_batch* send_initial_metadata = nullptr;
+ grpc_closure* send_initial_metadata_finished = nullptr;
+ grpc_metadata_batch* send_trailing_metadata = nullptr;
+ grpc_closure* send_trailing_metadata_finished = nullptr;
grpc_core::OrphanablePtr<grpc_core::ByteStream> fetching_send_message;
- uint32_t fetched_send_message_length;
- grpc_slice fetching_slice;
+ uint32_t fetched_send_message_length = 0;
+ grpc_slice fetching_slice = grpc_empty_slice();
int64_t next_message_end_offset;
- int64_t flow_controlled_bytes_written;
- int64_t flow_controlled_bytes_flowed;
+ int64_t flow_controlled_bytes_written = 0;
+ int64_t flow_controlled_bytes_flowed = 0;
grpc_closure complete_fetch_locked;
- grpc_closure* fetching_send_message_finished;
+ grpc_closure* fetching_send_message_finished = nullptr;
grpc_metadata_batch* recv_initial_metadata;
- grpc_closure* recv_initial_metadata_ready;
- bool* trailing_metadata_available;
+ grpc_closure* recv_initial_metadata_ready = nullptr;
+ bool* trailing_metadata_available = nullptr;
grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
- grpc_closure* recv_message_ready;
+ grpc_closure* recv_message_ready = nullptr;
grpc_metadata_batch* recv_trailing_metadata;
- grpc_closure* recv_trailing_metadata_finished;
+ grpc_closure* recv_trailing_metadata_finished = nullptr;
- grpc_transport_stream_stats* collecting_stats;
- grpc_transport_stream_stats stats;
+ grpc_transport_stream_stats* collecting_stats = nullptr;
+ grpc_transport_stream_stats stats = grpc_transport_stream_stats();
/** Is this stream closed for writing. */
- bool write_closed;
+ bool write_closed = false;
/** Is this stream reading half-closed. */
- bool read_closed;
+ bool read_closed = false;
/** Are all published incoming byte streams closed. */
- bool all_incoming_byte_streams_finished;
+ bool all_incoming_byte_streams_finished = false;
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
- bool seen_error;
+ bool seen_error = false;
/** Are we buffering writes on this stream? If yes, we won't become writable
until there's enough queued up in the flow_controlled_buffer */
- bool write_buffering;
+ bool write_buffering = false;
/** Has trailing metadata been received. */
- bool received_trailing_metadata;
+ bool received_trailing_metadata = false;
/* have we sent or received the EOS bit? */
- bool eos_received;
- bool eos_sent;
+ bool eos_received = false;
+ bool eos_sent = false;
/** the error that resulted in this stream being read-closed */
- grpc_error* read_closed_error;
+ grpc_error* read_closed_error = GRPC_ERROR_NONE;
/** the error that resulted in this stream being write-closed */
- grpc_error* write_closed_error;
+ grpc_error* write_closed_error = GRPC_ERROR_NONE;
- grpc_published_metadata_method published_metadata[2];
- bool final_metadata_requested;
+ grpc_published_metadata_method published_metadata[2] = {};
+ bool final_metadata_requested = false;
grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
@@ -558,33 +580,33 @@ struct grpc_chttp2_stream {
* Accessed only by application thread when stream->pending_byte_stream ==
* true */
grpc_slice_buffer unprocessed_incoming_frames_buffer;
- grpc_closure* on_next; /* protected by t combiner */
- bool pending_byte_stream; /* protected by t combiner */
+ grpc_closure* on_next = nullptr; /* protected by t combiner */
+ bool pending_byte_stream = false; /* protected by t combiner */
// cached length of buffer to be used by the transport thread in cases where
// stream->pending_byte_stream == true. The value is saved before
// application threads are allowed to modify
// unprocessed_incoming_frames_buffer
- size_t unprocessed_incoming_frames_buffer_cached_length;
+ size_t unprocessed_incoming_frames_buffer_cached_length = 0;
grpc_closure reset_byte_stream;
- grpc_error* byte_stream_error; /* protected by t combiner */
- bool received_last_frame; /* protected by t combiner */
+ grpc_error* byte_stream_error = GRPC_ERROR_NONE; /* protected by t combiner */
+ bool received_last_frame = false; /* protected by t combiner */
- grpc_millis deadline;
+ grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
/** saw some stream level error */
- grpc_error* forced_close_error;
+ grpc_error* forced_close_error = GRPC_ERROR_NONE;
/** how many header frames have we received? */
- uint8_t header_frames_received;
+ uint8_t header_frames_received = 0;
/** parsing state for data frames */
/* Accessed only by transport thread when stream->pending_byte_stream == false
* Accessed only by application thread when stream->pending_byte_stream ==
* true */
grpc_chttp2_data_parser data_parser;
/** number of bytes received - reset at end of parse thread execution */
- int64_t received_bytes;
+ int64_t received_bytes = 0;
- bool sent_initial_metadata;
- bool sent_trailing_metadata;
+ bool sent_initial_metadata = false;
+ bool sent_trailing_metadata = false;
grpc_core::PolymorphicManualConstructor<
grpc_core::chttp2::StreamFlowControlBase,
@@ -594,32 +616,38 @@ struct grpc_chttp2_stream {
grpc_slice_buffer flow_controlled_buffer;
- grpc_chttp2_write_cb* on_flow_controlled_cbs;
- grpc_chttp2_write_cb* on_write_finished_cbs;
- grpc_chttp2_write_cb* finish_after_write;
- size_t sending_bytes;
+ grpc_chttp2_write_cb* on_flow_controlled_cbs = nullptr;
+ grpc_chttp2_write_cb* on_write_finished_cbs = nullptr;
+ grpc_chttp2_write_cb* finish_after_write = nullptr;
+ size_t sending_bytes = 0;
/* Stream compression method to be used. */
- grpc_stream_compression_method stream_compression_method;
+ grpc_stream_compression_method stream_compression_method =
+ GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
/* Stream decompression method to be used. */
- grpc_stream_compression_method stream_decompression_method;
+ grpc_stream_compression_method stream_decompression_method =
+ GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
/** Stream compression decompress context */
- grpc_stream_compression_context* stream_decompression_ctx;
+ grpc_stream_compression_context* stream_decompression_ctx = nullptr;
/** Stream compression compress context */
- grpc_stream_compression_context* stream_compression_ctx;
+ grpc_stream_compression_context* stream_compression_ctx = nullptr;
/** Buffer storing data that is compressed but not sent */
grpc_slice_buffer compressed_data_buffer;
/** Amount of uncompressed bytes sent out when compressed_data_buffer is
* emptied */
- size_t uncompressed_data_size;
+ size_t uncompressed_data_size = 0;
/** Temporary buffer storing decompressed data */
grpc_slice_buffer decompressed_data_buffer;
/** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
*/
- bool unprocessed_incoming_frames_decompressed;
+ bool unprocessed_incoming_frames_decompressed = false;
+ /** Whether the bytes needs to be traced using Fathom */
+ bool traced = false;
/** gRPC header bytes that are already decompressed */
- size_t decompressed_header_bytes;
+ size_t decompressed_header_bytes = 0;
+ /** Byte counter for number of bytes written */
+ size_t byte_counter = 0;
};
/** Transport writing call flow:
@@ -764,15 +792,29 @@ void grpc_chttp2_stream_unref(grpc_chttp2_stream* s);
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(t, r) \
grpc_chttp2_unref_transport(t, r, __FILE__, __LINE__)
-void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason,
- const char* file, int line);
-void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
- const char* file, int line);
+inline void grpc_chttp2_unref_transport(grpc_chttp2_transport* t,
+ const char* reason, const char* file,
+ int line) {
+ if (t->refs.Unref(grpc_core::DebugLocation(file, line), reason)) {
+ grpc_core::Delete(t);
+ }
+}
+inline void grpc_chttp2_ref_transport(grpc_chttp2_transport* t,
+ const char* reason, const char* file,
+ int line) {
+ t->refs.Ref(grpc_core::DebugLocation(file, line), reason);
+}
#else
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
#define GRPC_CHTTP2_UNREF_TRANSPORT(t, r) grpc_chttp2_unref_transport(t)
-void grpc_chttp2_unref_transport(grpc_chttp2_transport* t);
-void grpc_chttp2_ref_transport(grpc_chttp2_transport* t);
+inline void grpc_chttp2_unref_transport(grpc_chttp2_transport* t) {
+ if (t->refs.Unref()) {
+ grpc_core::Delete(t);
+ }
+}
+inline void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) {
+ t->refs.Ref();
+}
#endif
void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id);
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index d533989444..265d3365d3 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -18,6 +18,7 @@
#include <grpc/support/port_platform.h>
+#include "src/core/ext/transport/chttp2/transport/context_list.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include <limits.h>
@@ -362,6 +363,7 @@ class DataSendContext {
grpc_chttp2_encode_data(s_->id, &s_->compressed_data_buffer, send_bytes,
is_last_frame_, &s_->stats.outgoing, &t_->outbuf);
s_->flow_control->SentData(send_bytes);
+ s_->byte_counter += send_bytes;
if (s_->compressed_data_buffer.length == 0) {
s_->sending_bytes += s_->uncompressed_data_size;
}
@@ -496,6 +498,9 @@ class StreamWriteContext {
data_send_context.CompressMoreBytes();
}
}
+ if (s_->traced && grpc_endpoint_can_track_err(t_->ep)) {
+ grpc_core::ContextList::Append(&t_->cl, s_);
+ }
write_context_->ResetPingClock();
if (data_send_context.is_last_frame()) {
SentLastFrame();
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index 81e2634e3a..349d8681d5 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.cc
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -111,16 +111,21 @@ typedef struct grpc_cronet_transport grpc_cronet_transport;
/* TODO (makdharma): reorder structure for memory efficiency per
http://www.catb.org/esr/structure-packing/#_structure_reordering: */
struct read_state {
+ read_state(gpr_arena* arena)
+ : trailing_metadata(arena), initial_metadata(arena) {
+ grpc_slice_buffer_init(&read_slice_buffer);
+ }
+
/* vars to store data coming from server */
- char* read_buffer;
- bool length_field_received;
- int received_bytes;
- int remaining_bytes;
- int length_field;
- bool compressed;
- char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES];
- char* payload_field;
- bool read_stream_closed;
+ char* read_buffer = nullptr;
+ bool length_field_received = false;
+ int received_bytes = 0;
+ int remaining_bytes = 0;
+ int length_field = 0;
+ bool compressed = 0;
+ char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES] = {};
+ char* payload_field = nullptr;
+ bool read_stream_closed = 0;
/* vars for holding data destined for the application */
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> sbs;
@@ -128,59 +133,71 @@ struct read_state {
/* vars for trailing metadata */
grpc_chttp2_incoming_metadata_buffer trailing_metadata;
- bool trailing_metadata_valid;
+ bool trailing_metadata_valid = false;
/* vars for initial metadata */
grpc_chttp2_incoming_metadata_buffer initial_metadata;
};
struct write_state {
- char* write_buffer;
+ char* write_buffer = nullptr;
};
/* track state of one stream op */
struct op_state {
- bool state_op_done[OP_NUM_OPS];
- bool state_callback_received[OP_NUM_OPS];
+ op_state(gpr_arena* arena) : rs(arena) {}
+
+ bool state_op_done[OP_NUM_OPS] = {};
+ bool state_callback_received[OP_NUM_OPS] = {};
/* A non-zero gRPC status code has been seen */
- bool fail_state;
+ bool fail_state = false;
/* Transport is discarding all buffered messages */
- bool flush_read;
- bool flush_cronet_when_ready;
- bool pending_write_for_trailer;
- bool pending_send_message;
+ bool flush_read = false;
+ bool flush_cronet_when_ready = false;
+ bool pending_write_for_trailer = false;
+ bool pending_send_message = false;
/* User requested RECV_TRAILING_METADATA */
- bool pending_recv_trailing_metadata;
+ bool pending_recv_trailing_metadata = false;
/* Cronet has not issued a callback of a bidirectional read */
- bool pending_read_from_cronet;
- grpc_error* cancel_error;
+ bool pending_read_from_cronet = false;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
/* data structure for storing data coming from server */
struct read_state rs;
/* data structure for storing data going to the server */
struct write_state ws;
};
+struct stream_obj;
+
struct op_and_state {
+ op_and_state(stream_obj* s, const grpc_transport_stream_op_batch& op);
+
grpc_transport_stream_op_batch op;
struct op_state state;
- bool done;
- struct stream_obj* s; /* Pointer back to the stream object */
- struct op_and_state* next; /* next op_and_state in the linked list */
+ bool done = false;
+ struct stream_obj* s; /* Pointer back to the stream object */
+ /* next op_and_state in the linked list */
+ struct op_and_state* next;
};
struct op_storage {
- int num_pending_ops;
- struct op_and_state* head;
+ int num_pending_ops = 0;
+ struct op_and_state* head = nullptr;
};
struct stream_obj {
+ stream_obj(grpc_transport* gt, grpc_stream* gs,
+ grpc_stream_refcount* refcount, gpr_arena* arena);
+ ~stream_obj();
+
gpr_arena* arena;
- struct op_and_state* oas;
- grpc_transport_stream_op_batch* curr_op;
+ struct op_and_state* oas = nullptr;
+ grpc_transport_stream_op_batch* curr_op = nullptr;
grpc_cronet_transport* curr_ct;
grpc_stream* curr_gs;
- bidirectional_stream* cbs;
- bidirectional_stream_header_array header_array;
+ bidirectional_stream* cbs = nullptr;
+ bidirectional_stream_header_array header_array =
+ bidirectional_stream_header_array(); // Zero-initialize the structure.
/* Stream level state. Some state will be tracked both at stream and stream_op
* level */
@@ -195,7 +212,6 @@ struct stream_obj {
/* Refcount object of the stream */
grpc_stream_refcount* refcount;
};
-typedef struct stream_obj stream_obj;
#ifndef NDEBUG
#define GRPC_CRONET_STREAM_REF(stream, reason) \
@@ -306,6 +322,10 @@ static grpc_error* make_error_with_desc(int error_code, const char* desc) {
return error;
}
+inline op_and_state::op_and_state(stream_obj* s,
+ const grpc_transport_stream_op_batch& op)
+ : op(op), state(s->arena), s(s), next(s->storage.head) {}
+
/*
Add a new stream op to op storage.
*/
@@ -314,14 +334,8 @@ static void add_to_storage(struct stream_obj* s,
struct op_storage* storage = &s->storage;
/* add new op at the beginning of the linked list. The memory is freed
in remove_from_storage */
- struct op_and_state* new_op = static_cast<struct op_and_state*>(
- gpr_malloc(sizeof(struct op_and_state)));
- memcpy(&new_op->op, op, sizeof(grpc_transport_stream_op_batch));
- memset(&new_op->state, 0, sizeof(new_op->state));
- new_op->s = s;
- new_op->done = false;
+ op_and_state* new_op = grpc_core::New<op_and_state>(s, *op);
gpr_mu_lock(&s->mu);
- new_op->next = storage->head;
storage->head = new_op;
storage->num_pending_ops++;
if (op->send_message) {
@@ -347,7 +361,7 @@ static void remove_from_storage(struct stream_obj* s,
}
if (s->storage.head == oas) {
s->storage.head = oas->next;
- gpr_free(oas);
+ grpc_core::Delete(oas);
s->storage.num_pending_ops--;
CRONET_LOG(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
s->storage.num_pending_ops);
@@ -358,7 +372,7 @@ static void remove_from_storage(struct stream_obj* s,
s->storage.num_pending_ops--;
CRONET_LOG(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
s->storage.num_pending_ops);
- gpr_free(oas);
+ grpc_core::Delete(oas);
break;
} else if (GPR_UNLIKELY(curr->next == nullptr)) {
CRONET_LOG(GPR_ERROR, "Reached end of LL and did not find op to free");
@@ -540,10 +554,6 @@ static void on_response_headers_received(
}
gpr_mu_lock(&s->mu);
- memset(&s->state.rs.initial_metadata, 0,
- sizeof(s->state.rs.initial_metadata));
- grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
- s->arena);
convert_cronet_array_to_metadata(headers, &s->state.rs.initial_metadata);
s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
@@ -634,11 +644,7 @@ static void on_response_trailers_received(
stream_obj* s = static_cast<stream_obj*>(stream->annotation);
grpc_cronet_transport* t = s->curr_ct;
gpr_mu_lock(&s->mu);
- memset(&s->state.rs.trailing_metadata, 0,
- sizeof(s->state.rs.trailing_metadata));
s->state.rs.trailing_metadata_valid = false;
- grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata,
- s->arena);
convert_cronet_array_to_metadata(trailers, &s->state.rs.trailing_metadata);
if (trailers->count > 0) {
s->state.rs.trailing_metadata_valid = true;
@@ -1354,36 +1360,28 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
Functions used by upper layers to access transport functionality.
*/
+inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
+ grpc_stream_refcount* refcount, gpr_arena* arena)
+ : arena(arena),
+ curr_ct(reinterpret_cast<grpc_cronet_transport*>(gt)),
+ curr_gs(gs),
+ state(arena),
+ refcount(refcount) {
+ GRPC_CRONET_STREAM_REF(this, "cronet transport");
+ gpr_mu_init(&mu);
+}
+
+inline stream_obj::~stream_obj() {
+ null_and_maybe_free_read_buffer(this);
+ /* Clean up read_slice_buffer in case there is unread data. */
+ grpc_slice_buffer_destroy_internal(&state.rs.read_slice_buffer);
+ GRPC_ERROR_UNREF(state.cancel_error);
+}
+
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
- stream_obj* s = reinterpret_cast<stream_obj*>(gs);
-
- s->refcount = refcount;
- GRPC_CRONET_STREAM_REF(s, "cronet transport");
- memset(&s->storage, 0, sizeof(s->storage));
- s->storage.head = nullptr;
- memset(&s->state, 0, sizeof(s->state));
- s->curr_op = nullptr;
- s->cbs = nullptr;
- memset(&s->header_array, 0, sizeof(s->header_array));
- memset(&s->state.rs, 0, sizeof(s->state.rs));
- memset(&s->state.ws, 0, sizeof(s->state.ws));
- memset(s->state.state_op_done, 0, sizeof(s->state.state_op_done));
- memset(s->state.state_callback_received, 0,
- sizeof(s->state.state_callback_received));
- s->state.fail_state = s->state.flush_read = false;
- s->state.cancel_error = nullptr;
- s->state.flush_cronet_when_ready = s->state.pending_write_for_trailer = false;
- s->state.pending_send_message = false;
- s->state.pending_recv_trailing_metadata = false;
- s->state.pending_read_from_cronet = false;
-
- s->curr_gs = gs;
- s->curr_ct = reinterpret_cast<grpc_cronet_transport*>(gt);
- s->arena = arena;
-
- gpr_mu_init(&s->mu);
+ new (gs) stream_obj(gt, gs, refcount, arena);
return 0;
}
@@ -1426,10 +1424,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
stream_obj* s = reinterpret_cast<stream_obj*>(gs);
- null_and_maybe_free_read_buffer(s);
- /* Clean up read_slice_buffer in case there is unread data. */
- grpc_slice_buffer_destroy_internal(&s->state.rs.read_slice_buffer);
- GRPC_ERROR_UNREF(s->state.cancel_error);
+ s->~stream_obj();
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 9dbd095843..0b9bf5dd11 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -40,18 +40,68 @@
if (grpc_inproc_trace.enabled()) gpr_log(__VA_ARGS__); \
} while (0)
-static grpc_slice g_empty_slice;
-static grpc_slice g_fake_path_key;
-static grpc_slice g_fake_path_value;
-static grpc_slice g_fake_auth_key;
-static grpc_slice g_fake_auth_value;
+namespace {
+grpc_slice g_empty_slice;
+grpc_slice g_fake_path_key;
+grpc_slice g_fake_path_value;
+grpc_slice g_fake_auth_key;
+grpc_slice g_fake_auth_value;
+
+struct inproc_stream;
+bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
+void op_state_machine(void* arg, grpc_error* error);
+void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
+ bool is_initial);
+grpc_error* fill_in_metadata(inproc_stream* s,
+ const grpc_metadata_batch* metadata,
+ uint32_t flags, grpc_metadata_batch* out_md,
+ uint32_t* outflags, bool* markfilled);
+
+struct shared_mu {
+ shared_mu() {
+ // Share one lock between both sides since both sides get affected
+ gpr_mu_init(&mu);
+ gpr_ref_init(&refs, 2);
+ }
-typedef struct {
gpr_mu mu;
gpr_refcount refs;
-} shared_mu;
+};
+
+struct inproc_transport {
+ inproc_transport(const grpc_transport_vtable* vtable, shared_mu* mu,
+ bool is_client)
+ : mu(mu), is_client(is_client) {
+ base.vtable = vtable;
+ // Start each side of transport with 2 refs since they each have a ref
+ // to the other
+ gpr_ref_init(&refs, 2);
+ grpc_connectivity_state_init(&connectivity, GRPC_CHANNEL_READY,
+ is_client ? "inproc_client" : "inproc_server");
+ }
+
+ ~inproc_transport() {
+ grpc_connectivity_state_destroy(&connectivity);
+ if (gpr_unref(&mu->refs)) {
+ gpr_free(mu);
+ }
+ }
+
+ void ref() {
+ INPROC_LOG(GPR_INFO, "ref_transport %p", this);
+ gpr_ref(&refs);
+ }
+
+ void unref() {
+ INPROC_LOG(GPR_INFO, "unref_transport %p", this);
+ if (!gpr_unref(&refs)) {
+ return;
+ }
+ INPROC_LOG(GPR_INFO, "really_destroy_transport %p", this);
+ this->~inproc_transport();
+ gpr_free(this);
+ }
-typedef struct inproc_transport {
grpc_transport base;
shared_mu* mu;
gpr_refcount refs;
@@ -60,128 +110,174 @@ typedef struct inproc_transport {
void (*accept_stream_cb)(void* user_data, grpc_transport* transport,
const void* server_data);
void* accept_stream_data;
- bool is_closed;
+ bool is_closed = false;
struct inproc_transport* other_side;
- struct inproc_stream* stream_list;
-} inproc_transport;
+ struct inproc_stream* stream_list = nullptr;
+};
+
+struct inproc_stream {
+ inproc_stream(inproc_transport* t, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena)
+ : t(t), refs(refcount), arena(arena) {
+ // Ref this stream right now for ctor and list.
+ ref("inproc_init_stream:init");
+ ref("inproc_init_stream:list");
+
+ grpc_metadata_batch_init(&to_read_initial_md);
+ grpc_metadata_batch_init(&to_read_trailing_md);
+ GRPC_CLOSURE_INIT(&op_closure, op_state_machine, this,
+ grpc_schedule_on_exec_ctx);
+ grpc_metadata_batch_init(&write_buffer_initial_md);
+ grpc_metadata_batch_init(&write_buffer_trailing_md);
+
+ stream_list_prev = nullptr;
+ gpr_mu_lock(&t->mu->mu);
+ stream_list_next = t->stream_list;
+ if (t->stream_list) {
+ t->stream_list->stream_list_prev = this;
+ }
+ t->stream_list = this;
+ gpr_mu_unlock(&t->mu->mu);
+
+ if (!server_data) {
+ t->ref();
+ inproc_transport* st = t->other_side;
+ st->ref();
+ other_side = nullptr; // will get filled in soon
+ // Pass the client-side stream address to the server-side for a ref
+ ref("inproc_init_stream:clt"); // ref it now on behalf of server
+ // side to avoid destruction
+ INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p",
+ st->accept_stream_cb, st->accept_stream_data);
+ (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)this);
+ } else {
+ // This is the server-side and is being called through accept_stream_cb
+ inproc_stream* cs = (inproc_stream*)server_data;
+ other_side = cs;
+ // Ref the server-side stream on behalf of the client now
+ ref("inproc_init_stream:srv");
+
+ // Now we are about to affect the other side, so lock the transport
+ // to make sure that it doesn't get destroyed
+ gpr_mu_lock(&t->mu->mu);
+ cs->other_side = this;
+ // Now transfer from the other side's write_buffer if any to the to_read
+ // buffer
+ if (cs->write_buffer_initial_md_filled) {
+ fill_in_metadata(this, &cs->write_buffer_initial_md,
+ cs->write_buffer_initial_md_flags, &to_read_initial_md,
+ &to_read_initial_md_flags, &to_read_initial_md_filled);
+ deadline = GPR_MIN(deadline, cs->write_buffer_deadline);
+ grpc_metadata_batch_clear(&cs->write_buffer_initial_md);
+ cs->write_buffer_initial_md_filled = false;
+ }
+ if (cs->write_buffer_trailing_md_filled) {
+ fill_in_metadata(this, &cs->write_buffer_trailing_md, 0,
+ &to_read_trailing_md, nullptr,
+ &to_read_trailing_md_filled);
+ grpc_metadata_batch_clear(&cs->write_buffer_trailing_md);
+ cs->write_buffer_trailing_md_filled = false;
+ }
+ if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) {
+ cancel_other_error = cs->write_buffer_cancel_error;
+ cs->write_buffer_cancel_error = GRPC_ERROR_NONE;
+ }
+
+ gpr_mu_unlock(&t->mu->mu);
+ }
+ }
+
+ ~inproc_stream() {
+ GRPC_ERROR_UNREF(write_buffer_cancel_error);
+ GRPC_ERROR_UNREF(cancel_self_error);
+ GRPC_ERROR_UNREF(cancel_other_error);
+
+ if (recv_inited) {
+ grpc_slice_buffer_destroy_internal(&recv_message);
+ }
+
+ t->unref();
+
+ if (closure_at_destroy) {
+ GRPC_CLOSURE_SCHED(closure_at_destroy, GRPC_ERROR_NONE);
+ }
+ }
+
+#ifndef NDEBUG
+#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason)
+#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs, reason)
+#else
+#define STREAM_REF(refs, reason) grpc_stream_ref(refs)
+#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs)
+#endif
+ void ref(const char* reason) {
+ INPROC_LOG(GPR_INFO, "ref_stream %p %s", this, reason);
+ STREAM_REF(refs, reason);
+ }
+
+ void unref(const char* reason) {
+ INPROC_LOG(GPR_INFO, "unref_stream %p %s", this, reason);
+ STREAM_UNREF(refs, reason);
+ }
+#undef STREAM_REF
+#undef STREAM_UNREF
-typedef struct inproc_stream {
inproc_transport* t;
grpc_metadata_batch to_read_initial_md;
- uint32_t to_read_initial_md_flags;
- bool to_read_initial_md_filled;
+ uint32_t to_read_initial_md_flags = 0;
+ bool to_read_initial_md_filled = false;
grpc_metadata_batch to_read_trailing_md;
- bool to_read_trailing_md_filled;
- bool ops_needed;
- bool op_closure_scheduled;
+ bool to_read_trailing_md_filled = false;
+ bool ops_needed = false;
+ bool op_closure_scheduled = false;
grpc_closure op_closure;
// Write buffer used only during gap at init time when client-side
// stream is set up but server side stream is not yet set up
grpc_metadata_batch write_buffer_initial_md;
- bool write_buffer_initial_md_filled;
- uint32_t write_buffer_initial_md_flags;
- grpc_millis write_buffer_deadline;
+ bool write_buffer_initial_md_filled = false;
+ uint32_t write_buffer_initial_md_flags = 0;
+ grpc_millis write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
grpc_metadata_batch write_buffer_trailing_md;
- bool write_buffer_trailing_md_filled;
- grpc_error* write_buffer_cancel_error;
+ bool write_buffer_trailing_md_filled = false;
+ grpc_error* write_buffer_cancel_error = GRPC_ERROR_NONE;
struct inproc_stream* other_side;
- bool other_side_closed; // won't talk anymore
- bool write_buffer_other_side_closed; // on hold
+ bool other_side_closed = false; // won't talk anymore
+ bool write_buffer_other_side_closed = false; // on hold
grpc_stream_refcount* refs;
- grpc_closure* closure_at_destroy;
+ grpc_closure* closure_at_destroy = nullptr;
gpr_arena* arena;
- grpc_transport_stream_op_batch* send_message_op;
- grpc_transport_stream_op_batch* send_trailing_md_op;
- grpc_transport_stream_op_batch* recv_initial_md_op;
- grpc_transport_stream_op_batch* recv_message_op;
- grpc_transport_stream_op_batch* recv_trailing_md_op;
+ grpc_transport_stream_op_batch* send_message_op = nullptr;
+ grpc_transport_stream_op_batch* send_trailing_md_op = nullptr;
+ grpc_transport_stream_op_batch* recv_initial_md_op = nullptr;
+ grpc_transport_stream_op_batch* recv_message_op = nullptr;
+ grpc_transport_stream_op_batch* recv_trailing_md_op = nullptr;
grpc_slice_buffer recv_message;
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> recv_stream;
- bool recv_inited;
+ bool recv_inited = false;
- bool initial_md_sent;
- bool trailing_md_sent;
- bool initial_md_recvd;
- bool trailing_md_recvd;
+ bool initial_md_sent = false;
+ bool trailing_md_sent = false;
+ bool initial_md_recvd = false;
+ bool trailing_md_recvd = false;
- bool closed;
+ bool closed = false;
- grpc_error* cancel_self_error;
- grpc_error* cancel_other_error;
+ grpc_error* cancel_self_error = GRPC_ERROR_NONE;
+ grpc_error* cancel_other_error = GRPC_ERROR_NONE;
- grpc_millis deadline;
+ grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
- bool listed;
+ bool listed = true;
struct inproc_stream* stream_list_prev;
struct inproc_stream* stream_list_next;
-} inproc_stream;
-
-static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
-static void op_state_machine(void* arg, grpc_error* error);
-
-static void ref_transport(inproc_transport* t) {
- INPROC_LOG(GPR_INFO, "ref_transport %p", t);
- gpr_ref(&t->refs);
-}
-
-static void really_destroy_transport(inproc_transport* t) {
- INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t);
- grpc_connectivity_state_destroy(&t->connectivity);
- if (gpr_unref(&t->mu->refs)) {
- gpr_free(t->mu);
- }
- gpr_free(t);
-}
-
-static void unref_transport(inproc_transport* t) {
- INPROC_LOG(GPR_INFO, "unref_transport %p", t);
- if (gpr_unref(&t->refs)) {
- really_destroy_transport(t);
- }
-}
-
-#ifndef NDEBUG
-#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason)
-#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs, reason)
-#else
-#define STREAM_REF(refs, reason) grpc_stream_ref(refs)
-#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs)
-#endif
-
-static void ref_stream(inproc_stream* s, const char* reason) {
- INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason);
- STREAM_REF(s->refs, reason);
-}
-
-static void unref_stream(inproc_stream* s, const char* reason) {
- INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason);
- STREAM_UNREF(s->refs, reason);
-}
-
-static void really_destroy_stream(inproc_stream* s) {
- INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s);
+};
- GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
- GRPC_ERROR_UNREF(s->cancel_self_error);
- GRPC_ERROR_UNREF(s->cancel_other_error);
-
- if (s->recv_inited) {
- grpc_slice_buffer_destroy_internal(&s->recv_message);
- }
-
- unref_transport(s->t);
-
- if (s->closure_at_destroy) {
- GRPC_CLOSURE_SCHED(s->closure_at_destroy, GRPC_ERROR_NONE);
- }
-}
-
-static void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
- bool is_initial) {
+void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
+ bool is_initial) {
for (grpc_linked_mdelem* md = md_batch->list.head; md != nullptr;
md = md->next) {
char* key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
@@ -193,10 +289,10 @@ static void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
}
}
-static grpc_error* fill_in_metadata(inproc_stream* s,
- const grpc_metadata_batch* metadata,
- uint32_t flags, grpc_metadata_batch* out_md,
- uint32_t* outflags, bool* markfilled) {
+grpc_error* fill_in_metadata(inproc_stream* s,
+ const grpc_metadata_batch* metadata,
+ uint32_t flags, grpc_metadata_batch* out_md,
+ uint32_t* outflags, bool* markfilled) {
if (grpc_inproc_trace.enabled()) {
log_metadata(metadata, s->t->is_client, outflags != nullptr);
}
@@ -221,109 +317,16 @@ static grpc_error* fill_in_metadata(inproc_stream* s,
return error;
}
-static int init_stream(grpc_transport* gt, grpc_stream* gs,
- grpc_stream_refcount* refcount, const void* server_data,
- gpr_arena* arena) {
+int init_stream(grpc_transport* gt, grpc_stream* gs,
+ grpc_stream_refcount* refcount, const void* server_data,
+ gpr_arena* arena) {
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
- inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
- s->arena = arena;
-
- s->refs = refcount;
- // Ref this stream right now
- ref_stream(s, "inproc_init_stream:init");
-
- grpc_metadata_batch_init(&s->to_read_initial_md);
- s->to_read_initial_md_flags = 0;
- s->to_read_initial_md_filled = false;
- grpc_metadata_batch_init(&s->to_read_trailing_md);
- s->to_read_trailing_md_filled = false;
- grpc_metadata_batch_init(&s->write_buffer_initial_md);
- s->write_buffer_initial_md_flags = 0;
- s->write_buffer_initial_md_filled = false;
- grpc_metadata_batch_init(&s->write_buffer_trailing_md);
- s->write_buffer_trailing_md_filled = false;
- s->ops_needed = false;
- s->op_closure_scheduled = false;
- GRPC_CLOSURE_INIT(&s->op_closure, op_state_machine, s,
- grpc_schedule_on_exec_ctx);
- s->t = t;
- s->closure_at_destroy = nullptr;
- s->other_side_closed = false;
-
- s->initial_md_sent = s->trailing_md_sent = s->initial_md_recvd =
- s->trailing_md_recvd = false;
-
- s->closed = false;
-
- s->cancel_self_error = GRPC_ERROR_NONE;
- s->cancel_other_error = GRPC_ERROR_NONE;
- s->write_buffer_cancel_error = GRPC_ERROR_NONE;
- s->deadline = GRPC_MILLIS_INF_FUTURE;
- s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
-
- s->stream_list_prev = nullptr;
- gpr_mu_lock(&t->mu->mu);
- s->listed = true;
- ref_stream(s, "inproc_init_stream:list");
- s->stream_list_next = t->stream_list;
- if (t->stream_list) {
- t->stream_list->stream_list_prev = s;
- }
- t->stream_list = s;
- gpr_mu_unlock(&t->mu->mu);
-
- if (!server_data) {
- ref_transport(t);
- inproc_transport* st = t->other_side;
- ref_transport(st);
- s->other_side = nullptr; // will get filled in soon
- // Pass the client-side stream address to the server-side for a ref
- ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server
- // side to avoid destruction
- INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb,
- st->accept_stream_data);
- (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
- } else {
- // This is the server-side and is being called through accept_stream_cb
- inproc_stream* cs = (inproc_stream*)server_data;
- s->other_side = cs;
- // Ref the server-side stream on behalf of the client now
- ref_stream(s, "inproc_init_stream:srv");
-
- // Now we are about to affect the other side, so lock the transport
- // to make sure that it doesn't get destroyed
- gpr_mu_lock(&s->t->mu->mu);
- cs->other_side = s;
- // Now transfer from the other side's write_buffer if any to the to_read
- // buffer
- if (cs->write_buffer_initial_md_filled) {
- fill_in_metadata(s, &cs->write_buffer_initial_md,
- cs->write_buffer_initial_md_flags,
- &s->to_read_initial_md, &s->to_read_initial_md_flags,
- &s->to_read_initial_md_filled);
- s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline);
- grpc_metadata_batch_clear(&cs->write_buffer_initial_md);
- cs->write_buffer_initial_md_filled = false;
- }
- if (cs->write_buffer_trailing_md_filled) {
- fill_in_metadata(s, &cs->write_buffer_trailing_md, 0,
- &s->to_read_trailing_md, nullptr,
- &s->to_read_trailing_md_filled);
- grpc_metadata_batch_clear(&cs->write_buffer_trailing_md);
- cs->write_buffer_trailing_md_filled = false;
- }
- if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) {
- s->cancel_other_error = cs->write_buffer_cancel_error;
- cs->write_buffer_cancel_error = GRPC_ERROR_NONE;
- }
-
- gpr_mu_unlock(&s->t->mu->mu);
- }
+ new (gs) inproc_stream(t, refcount, server_data, arena);
return 0; // return value is not important
}
-static void close_stream_locked(inproc_stream* s) {
+void close_stream_locked(inproc_stream* s) {
if (!s->closed) {
// Release the metadata that we would have written out
grpc_metadata_batch_destroy(&s->write_buffer_initial_md);
@@ -341,21 +344,21 @@ static void close_stream_locked(inproc_stream* s) {
n->stream_list_prev = p;
}
s->listed = false;
- unref_stream(s, "close_stream:list");
+ s->unref("close_stream:list");
}
s->closed = true;
- unref_stream(s, "close_stream:closing");
+ s->unref("close_stream:closing");
}
}
// This function means that we are done talking/listening to the other side
-static void close_other_side_locked(inproc_stream* s, const char* reason) {
+void close_other_side_locked(inproc_stream* s, const char* reason) {
if (s->other_side != nullptr) {
// First release the metadata that came from the other side's arena
grpc_metadata_batch_destroy(&s->to_read_initial_md);
grpc_metadata_batch_destroy(&s->to_read_trailing_md);
- unref_stream(s->other_side, reason);
+ s->other_side->unref(reason);
s->other_side_closed = true;
s->other_side = nullptr;
} else if (!s->other_side_closed) {
@@ -367,9 +370,9 @@ static void close_other_side_locked(inproc_stream* s, const char* reason) {
// this stream_op_batch is only one of the pending operations for this
// stream. This is called when one of the pending operations for the stream
// is done and about to be NULLed out
-static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
- grpc_transport_stream_op_batch* op,
- const char* msg) {
+void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
+ grpc_transport_stream_op_batch* op,
+ const char* msg) {
int is_sm = static_cast<int>(op == s->send_message_op);
int is_stm = static_cast<int>(op == s->send_trailing_md_op);
// TODO(vjpai): We should not consider the recv ops here, since they
@@ -386,8 +389,7 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
}
}
-static void maybe_schedule_op_closure_locked(inproc_stream* s,
- grpc_error* error) {
+void maybe_schedule_op_closure_locked(inproc_stream* s, grpc_error* error) {
if (s && s->ops_needed && !s->op_closure_scheduled) {
GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_REF(error));
s->op_closure_scheduled = true;
@@ -395,7 +397,7 @@ static void maybe_schedule_op_closure_locked(inproc_stream* s,
}
}
-static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
+void fail_helper_locked(inproc_stream* s, grpc_error* error) {
INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata
@@ -525,8 +527,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
// that the incoming byte stream's next() call will always return
// synchronously. That assumption is true today but may not always be
// true in the future.
-static void message_transfer_locked(inproc_stream* sender,
- inproc_stream* receiver) {
+void message_transfer_locked(inproc_stream* sender, inproc_stream* receiver) {
size_t remaining =
sender->send_message_op->payload->send_message.send_message->length();
if (receiver->recv_inited) {
@@ -572,7 +573,7 @@ static void message_transfer_locked(inproc_stream* sender,
sender->send_message_op = nullptr;
}
-static void op_state_machine(void* arg, grpc_error* error) {
+void op_state_machine(void* arg, grpc_error* error) {
// This function gets called when we have contents in the unprocessed reads
// Get what we want based on our ops wanted
// Schedule our appropriate closures
@@ -607,10 +608,8 @@ static void op_state_machine(void* arg, grpc_error* error) {
if (other->recv_message_op) {
message_transfer_locked(s, other);
maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
- } else if (!s->t->is_client &&
- (s->trailing_md_sent || other->recv_trailing_md_op)) {
- // A server send will never be matched if the client is waiting
- // for trailing metadata already
+ } else if (!s->t->is_client && s->trailing_md_sent) {
+ // A server send will never be matched if the server already sent status
s->send_message_op->payload->send_message.send_message.reset();
complete_if_batch_end_locked(
s, GRPC_ERROR_NONE, s->send_message_op,
@@ -621,11 +620,15 @@ static void op_state_machine(void* arg, grpc_error* error) {
// Pause a send trailing metadata if there is still an outstanding
// send message unless we know that the send message will never get
// matched to a receive. This happens on the client if the server has
- // already sent status.
+ // already sent status or on the server if the client has requested
+ // status
if (s->send_trailing_md_op &&
(!s->send_message_op ||
(s->t->is_client &&
- (s->trailing_md_recvd || s->to_read_trailing_md_filled)))) {
+ (s->trailing_md_recvd || s->to_read_trailing_md_filled)) ||
+ (!s->t->is_client && other &&
+ (other->trailing_md_recvd || other->to_read_trailing_md_filled ||
+ other->recv_trailing_md_op)))) {
grpc_metadata_batch* dest = (other == nullptr)
? &s->write_buffer_trailing_md
: &other->to_read_trailing_md;
@@ -723,16 +726,6 @@ static void op_state_machine(void* arg, grpc_error* error) {
maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
}
}
- if (s->recv_trailing_md_op && s->t->is_client && other &&
- other->send_message_op) {
- INPROC_LOG(GPR_INFO,
- "op_state_machine %p scheduling trailing-metadata-ready %p", s,
- GRPC_ERROR_NONE);
- GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->payload->recv_trailing_metadata
- .recv_trailing_metadata_ready,
- GRPC_ERROR_NONE);
- maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
- }
if (s->to_read_trailing_md_filled) {
if (s->trailing_md_recvd) {
new_err =
@@ -748,6 +741,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
if (s->recv_message_op != nullptr) {
// This message needs to be wrapped up because it will never be
// satisfied
+ *s->recv_message_op->payload->recv_message.recv_message = nullptr;
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
@@ -810,6 +804,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// No further message will come on this stream, so finish off the
// recv_message_op
INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
+ *s->recv_message_op->payload->recv_message.recv_message = nullptr;
GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@@ -847,7 +842,7 @@ done:
GRPC_ERROR_UNREF(new_err);
}
-static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
+bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
bool ret = false; // was the cancel accepted
INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error));
if (s->cancel_self_error == GRPC_ERROR_NONE) {
@@ -900,10 +895,10 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
return ret;
}
-static void do_nothing(void* arg, grpc_error* error) {}
+void do_nothing(void* arg, grpc_error* error) {}
-static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
- grpc_transport_stream_op_batch* op) {
+void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
+ grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
@@ -1012,18 +1007,18 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
// We want to initiate the closure if:
- // 1. We want to send a message and the other side wants to receive or end
+ // 1. We want to send a message and the other side wants to receive
// 2. We want to send trailing metadata and there isn't an unmatched send
+ // or the other side wants trailing metadata
// 3. We want initial metadata and the other side has sent it
// 4. We want to receive a message and there is a message ready
// 5. There is trailing metadata, even if nothing specifically wants
// that because that can shut down the receive message as well
- if ((op->send_message && other &&
- ((other->recv_message_op != nullptr) ||
- (other->recv_trailing_md_op != nullptr))) ||
- (op->send_trailing_metadata && !op->send_message) ||
+ if ((op->send_message && other && other->recv_message_op != nullptr) ||
+ (op->send_trailing_metadata &&
+ (!s->send_message_op || (other && other->recv_trailing_md_op))) ||
(op->recv_initial_metadata && s->to_read_initial_md_filled) ||
- (op->recv_message && other && (other->send_message_op != nullptr)) ||
+ (op->recv_message && other && other->send_message_op != nullptr) ||
(s->to_read_trailing_md_filled || s->trailing_md_recvd)) {
if (!s->op_closure_scheduled) {
GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_NONE);
@@ -1083,7 +1078,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
GRPC_ERROR_UNREF(error);
}
-static void close_transport_locked(inproc_transport* t) {
+void close_transport_locked(inproc_transport* t) {
INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
grpc_connectivity_state_set(
&t->connectivity, GRPC_CHANNEL_SHUTDOWN,
@@ -1103,7 +1098,7 @@ static void close_transport_locked(inproc_transport* t) {
}
}
-static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
+void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
gpr_mu_lock(&t->mu->mu);
@@ -1136,39 +1131,64 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
gpr_mu_unlock(&t->mu->mu);
}
-static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
- grpc_closure* then_schedule_closure) {
+void destroy_stream(grpc_transport* gt, grpc_stream* gs,
+ grpc_closure* then_schedule_closure) {
INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->closure_at_destroy = then_schedule_closure;
- really_destroy_stream(s);
+ s->~inproc_stream();
}
-static void destroy_transport(grpc_transport* gt) {
+void destroy_transport(grpc_transport* gt) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
gpr_mu_lock(&t->mu->mu);
close_transport_locked(t);
gpr_mu_unlock(&t->mu->mu);
- unref_transport(t->other_side);
- unref_transport(t);
+ t->other_side->unref();
+ t->unref();
}
/*******************************************************************************
* INTEGRATION GLUE
*/
-static void set_pollset(grpc_transport* gt, grpc_stream* gs,
- grpc_pollset* pollset) {
+void set_pollset(grpc_transport* gt, grpc_stream* gs, grpc_pollset* pollset) {
// Nothing to do here
}
-static void set_pollset_set(grpc_transport* gt, grpc_stream* gs,
- grpc_pollset_set* pollset_set) {
+void set_pollset_set(grpc_transport* gt, grpc_stream* gs,
+ grpc_pollset_set* pollset_set) {
// Nothing to do here
}
-static grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; }
+grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; }
+
+const grpc_transport_vtable inproc_vtable = {
+ sizeof(inproc_stream), "inproc", init_stream,
+ set_pollset, set_pollset_set, perform_stream_op,
+ perform_transport_op, destroy_stream, destroy_transport,
+ get_endpoint};
+
+/*******************************************************************************
+ * Main inproc transport functions
+ */
+void inproc_transports_create(grpc_transport** server_transport,
+ const grpc_channel_args* server_args,
+ grpc_transport** client_transport,
+ const grpc_channel_args* client_args) {
+ INPROC_LOG(GPR_INFO, "inproc_transports_create");
+ shared_mu* mu = new (gpr_malloc(sizeof(*mu))) shared_mu();
+ inproc_transport* st = new (gpr_malloc(sizeof(*st)))
+ inproc_transport(&inproc_vtable, mu, /*is_client=*/false);
+ inproc_transport* ct = new (gpr_malloc(sizeof(*ct)))
+ inproc_transport(&inproc_vtable, mu, /*is_client=*/true);
+ st->other_side = ct;
+ ct->other_side = st;
+ *server_transport = reinterpret_cast<grpc_transport*>(st);
+ *client_transport = reinterpret_cast<grpc_transport*>(ct);
+}
+} // namespace
/*******************************************************************************
* GLOBAL INIT AND DESTROY
@@ -1190,48 +1210,6 @@ void grpc_inproc_transport_init(void) {
g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
}
-static const grpc_transport_vtable inproc_vtable = {
- sizeof(inproc_stream), "inproc", init_stream,
- set_pollset, set_pollset_set, perform_stream_op,
- perform_transport_op, destroy_stream, destroy_transport,
- get_endpoint};
-
-/*******************************************************************************
- * Main inproc transport functions
- */
-static void inproc_transports_create(grpc_transport** server_transport,
- const grpc_channel_args* server_args,
- grpc_transport** client_transport,
- const grpc_channel_args* client_args) {
- INPROC_LOG(GPR_INFO, "inproc_transports_create");
- inproc_transport* st =
- static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st)));
- inproc_transport* ct =
- static_cast<inproc_transport*>(gpr_zalloc(sizeof(*ct)));
- // Share one lock between both sides since both sides get affected
- st->mu = ct->mu = static_cast<shared_mu*>(gpr_malloc(sizeof(*st->mu)));
- gpr_mu_init(&st->mu->mu);
- gpr_ref_init(&st->mu->refs, 2);
- st->base.vtable = &inproc_vtable;
- ct->base.vtable = &inproc_vtable;
- // Start each side of transport with 2 refs since they each have a ref
- // to the other
- gpr_ref_init(&st->refs, 2);
- gpr_ref_init(&ct->refs, 2);
- st->is_client = false;
- ct->is_client = true;
- grpc_connectivity_state_init(&st->connectivity, GRPC_CHANNEL_READY,
- "inproc_server");
- grpc_connectivity_state_init(&ct->connectivity, GRPC_CHANNEL_READY,
- "inproc_client");
- st->other_side = ct;
- ct->other_side = st;
- st->stream_list = nullptr;
- ct->stream_list = nullptr;
- *server_transport = reinterpret_cast<grpc_transport*>(st);
- *client_transport = reinterpret_cast<grpc_transport*>(ct);
-}
-
grpc_channel* grpc_inproc_channel_create(grpc_server* server,
grpc_channel_args* args,
void* reserved) {
@@ -1258,7 +1236,7 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
// TODO(ncteisen): design and support channelz GetSocket for inproc.
grpc_server_setup_transport(server, server_transport, nullptr, server_args,
- 0);
+ nullptr);
grpc_channel* channel = grpc_channel_create(
"inproc", client_args, GRPC_CLIENT_DIRECT_CHANNEL, client_transport);
diff --git a/src/core/lib/channel/channel_stack.cc b/src/core/lib/channel/channel_stack.cc
index 056fcd93de..df956c7176 100644
--- a/src/core/lib/channel/channel_stack.cc
+++ b/src/core/lib/channel/channel_stack.cc
@@ -157,7 +157,6 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
size_t count = channel_stack->count;
grpc_call_element* call_elems;
char* user_data;
- size_t i;
elem_args->call_stack->count = count;
GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy,
@@ -168,10 +167,14 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
/* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE;
- for (i = 0; i < count; i++) {
+ for (size_t i = 0; i < count; i++) {
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
+ user_data +=
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
+ }
+ for (size_t i = 0; i < count; i++) {
grpc_error* error =
call_elems[i].filter->init_call_elem(&call_elems[i], elem_args);
if (error != GRPC_ERROR_NONE) {
@@ -181,8 +184,6 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
GRPC_ERROR_UNREF(error);
}
}
- user_data +=
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
return first_error;
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 35c3fb01ea..0de8c67079 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -79,11 +79,11 @@ typedef struct {
} grpc_call_stats;
/** Information about the call upon completion. */
-typedef struct {
+struct grpc_call_final_info {
grpc_call_stats stats;
- grpc_status_code final_status;
- const char* error_string;
-} grpc_call_final_info;
+ grpc_status_code final_status = GRPC_STATUS_OK;
+ const char* error_string = nullptr;
+};
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
diff --git a/src/core/lib/channel/channel_stack_builder.cc b/src/core/lib/channel/channel_stack_builder.cc
index df5a783631..8b3008f221 100644
--- a/src/core/lib/channel/channel_stack_builder.cc
+++ b/src/core/lib/channel/channel_stack_builder.cc
@@ -40,6 +40,7 @@ struct grpc_channel_stack_builder {
// various set/get-able parameters
grpc_channel_args* args;
grpc_transport* transport;
+ grpc_resource_user* resource_user;
char* target;
const char* name;
};
@@ -157,6 +158,11 @@ void grpc_channel_stack_builder_set_channel_arguments(
builder->args = grpc_channel_args_copy(args);
}
+const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
+ grpc_channel_stack_builder* builder) {
+ return builder->args;
+}
+
void grpc_channel_stack_builder_set_transport(
grpc_channel_stack_builder* builder, grpc_transport* transport) {
GPR_ASSERT(builder->transport == nullptr);
@@ -168,9 +174,15 @@ grpc_transport* grpc_channel_stack_builder_get_transport(
return builder->transport;
}
-const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
+void grpc_channel_stack_builder_set_resource_user(
+ grpc_channel_stack_builder* builder, grpc_resource_user* resource_user) {
+ GPR_ASSERT(builder->resource_user == nullptr);
+ builder->resource_user = resource_user;
+}
+
+grpc_resource_user* grpc_channel_stack_builder_get_resource_user(
grpc_channel_stack_builder* builder) {
- return builder->args;
+ return builder->resource_user;
}
bool grpc_channel_stack_builder_append_filter(
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index 9196de9378..89c30e0c5e 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -54,6 +54,14 @@ void grpc_channel_stack_builder_set_transport(
grpc_transport* grpc_channel_stack_builder_get_transport(
grpc_channel_stack_builder* builder);
+/// Attach \a resource_user to the builder (does not take ownership)
+void grpc_channel_stack_builder_set_resource_user(
+ grpc_channel_stack_builder* builder, grpc_resource_user* resource_user);
+
+/// Fetch attached resource user
+grpc_resource_user* grpc_channel_stack_builder_get_resource_user(
+ grpc_channel_stack_builder* builder);
+
/// Set channel arguments: copies args
void grpc_channel_stack_builder_set_channel_arguments(
grpc_channel_stack_builder* builder, const grpc_channel_args* args);
diff --git a/src/core/lib/channel/channelz.cc b/src/core/lib/channel/channelz.cc
index 33577d890a..0cb2890518 100644
--- a/src/core/lib/channel/channelz.cc
+++ b/src/core/lib/channel/channelz.cc
@@ -30,15 +30,18 @@
#include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/channel/status_util.h"
+#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/b64.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/error_utils.h"
+#include "src/core/lib/uri/uri_parser.h"
namespace grpc_core {
namespace channelz {
@@ -204,22 +207,20 @@ char* ServerNode::RenderServerSockets(intptr_t start_socket_id) {
grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
grpc_json* json = top_level_json;
grpc_json* json_iterator = nullptr;
- ChildRefsList socket_refs;
- // uuids index into entities one-off (idx 0 is really uuid 1, since 0 is
- // reserved). However, we want to support requests coming in with
- // start_server_id=0, which signifies "give me everything."
- size_t start_idx = start_socket_id == 0 ? 0 : start_socket_id - 1;
- grpc_server_populate_server_sockets(server_, &socket_refs, start_idx);
+ ChildSocketsList socket_refs;
+ grpc_server_populate_server_sockets(server_, &socket_refs, start_socket_id);
if (!socket_refs.empty()) {
// create list of socket refs
grpc_json* array_parent = grpc_json_create_child(
nullptr, json, "socketRef", nullptr, GRPC_JSON_ARRAY, false);
for (size_t i = 0; i < socket_refs.size(); ++i) {
- json_iterator =
+ grpc_json* socket_ref_json =
grpc_json_create_child(json_iterator, array_parent, nullptr, nullptr,
GRPC_JSON_OBJECT, false);
- grpc_json_add_number_string_child(json_iterator, nullptr, "socketId",
- socket_refs[i]);
+ json_iterator = grpc_json_add_number_string_child(
+ socket_ref_json, nullptr, "socketId", socket_refs[i]->uuid());
+ grpc_json_create_child(json_iterator, socket_ref_json, "name",
+ socket_refs[i]->remote(), GRPC_JSON_STRING, false);
}
}
// For now we do not have any pagination rules. In the future we could
@@ -277,7 +278,61 @@ grpc_json* ServerNode::RenderJson() {
return top_level_json;
}
-SocketNode::SocketNode() : BaseNode(EntityType::kSocket) {}
+static void PopulateSocketAddressJson(grpc_json* json, const char* name,
+ const char* addr_str) {
+ if (addr_str == nullptr) return;
+ grpc_json* json_iterator = nullptr;
+ json_iterator = grpc_json_create_child(json_iterator, json, name, nullptr,
+ GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ grpc_uri* uri = grpc_uri_parse(addr_str, true);
+ if ((uri != nullptr) && ((strcmp(uri->scheme, "ipv4") == 0) ||
+ (strcmp(uri->scheme, "ipv6") == 0))) {
+ const char* host_port = uri->path;
+ if (*host_port == '/') ++host_port;
+ char* host = nullptr;
+ char* port = nullptr;
+ GPR_ASSERT(gpr_split_host_port(host_port, &host, &port));
+ int port_num = -1;
+ if (port != nullptr) {
+ port_num = atoi(port);
+ }
+ char* b64_host = grpc_base64_encode(host, strlen(host), false, false);
+ json_iterator = grpc_json_create_child(json_iterator, json, "tcpip_address",
+ nullptr, GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ json_iterator = grpc_json_add_number_string_child(json, json_iterator,
+ "port", port_num);
+ json_iterator = grpc_json_create_child(json_iterator, json, "ip_address",
+ b64_host, GRPC_JSON_STRING, true);
+ gpr_free(host);
+ gpr_free(port);
+
+ } else if (uri != nullptr && strcmp(uri->scheme, "unix") == 0) {
+ json_iterator = grpc_json_create_child(json_iterator, json, "uds_address",
+ nullptr, GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ json_iterator =
+ grpc_json_create_child(json_iterator, json, "filename",
+ gpr_strdup(uri->path), GRPC_JSON_STRING, true);
+ } else {
+ json_iterator = grpc_json_create_child(json_iterator, json, "other_address",
+ nullptr, GRPC_JSON_OBJECT, false);
+ json = json_iterator;
+ json_iterator = nullptr;
+ json_iterator = grpc_json_create_child(json_iterator, json, "name",
+ addr_str, GRPC_JSON_STRING, false);
+ }
+ grpc_uri_destroy(uri);
+}
+
+SocketNode::SocketNode(UniquePtr<char> local, UniquePtr<char> remote)
+ : BaseNode(EntityType::kSocket),
+ local_(std::move(local)),
+ remote_(std::move(remote)) {}
void SocketNode::RecordStreamStartedFromLocal() {
gpr_atm_no_barrier_fetch_add(&streams_started_, static_cast<gpr_atm>(1));
@@ -315,6 +370,9 @@ grpc_json* SocketNode::RenderJson() {
json_iterator = nullptr;
json_iterator = grpc_json_add_number_string_child(json, json_iterator,
"socketId", uuid());
+ json = top_level_json;
+ PopulateSocketAddressJson(json, "remote", remote_.get());
+ PopulateSocketAddressJson(json, "local", local_.get());
// reset json iterators to top level object
json = top_level_json;
json_iterator = nullptr;
@@ -374,7 +432,8 @@ grpc_json* SocketNode::RenderJson() {
return top_level_json;
}
-ListenSocketNode::ListenSocketNode() : BaseNode(EntityType::kSocket) {}
+ListenSocketNode::ListenSocketNode(UniquePtr<char> local_addr)
+ : BaseNode(EntityType::kSocket), local_addr_(std::move(local_addr)) {}
grpc_json* ListenSocketNode::RenderJson() {
// We need to track these three json objects to build our object
@@ -388,6 +447,9 @@ grpc_json* ListenSocketNode::RenderJson() {
json_iterator = nullptr;
json_iterator = grpc_json_add_number_string_child(json, json_iterator,
"socketId", uuid());
+ json = top_level_json;
+ PopulateSocketAddressJson(json, "local", local_addr_.get());
+
return top_level_json;
}
diff --git a/src/core/lib/channel/channelz.h b/src/core/lib/channel/channelz.h
index 88551befc8..96a4333083 100644
--- a/src/core/lib/channel/channelz.h
+++ b/src/core/lib/channel/channelz.h
@@ -59,6 +59,9 @@ namespace channelz {
// add human readable names as in the channelz.proto
typedef InlinedVector<intptr_t, 10> ChildRefsList;
+class SocketNode;
+typedef InlinedVector<SocketNode*, 10> ChildSocketsList;
+
namespace testing {
class CallCountingHelperPeer;
class ChannelNodePeer;
@@ -232,7 +235,7 @@ class ServerNode : public BaseNode {
// Handles channelz bookkeeping for sockets
class SocketNode : public BaseNode {
public:
- SocketNode();
+ SocketNode(UniquePtr<char> local, UniquePtr<char> remote);
~SocketNode() override {}
grpc_json* RenderJson() override;
@@ -251,6 +254,8 @@ class SocketNode : public BaseNode {
gpr_atm_no_barrier_fetch_add(&keepalives_sent_, static_cast<gpr_atm>(1));
}
+ const char* remote() { return remote_.get(); }
+
private:
gpr_atm streams_started_ = 0;
gpr_atm streams_succeeded_ = 0;
@@ -262,16 +267,21 @@ class SocketNode : public BaseNode {
gpr_atm last_remote_stream_created_millis_ = 0;
gpr_atm last_message_sent_millis_ = 0;
gpr_atm last_message_received_millis_ = 0;
- UniquePtr<char> peer_string_;
+ UniquePtr<char> local_;
+ UniquePtr<char> remote_;
};
// Handles channelz bookkeeping for listen sockets
class ListenSocketNode : public BaseNode {
public:
- ListenSocketNode();
+ // ListenSocketNode takes ownership of host.
+ explicit ListenSocketNode(UniquePtr<char> local_addr);
~ListenSocketNode() override {}
grpc_json* RenderJson() override;
+
+ private:
+ UniquePtr<char> local_addr_;
};
// Creation functions
diff --git a/src/core/lib/channel/channelz_registry.cc b/src/core/lib/channel/channelz_registry.cc
index 1fe2fad3e1..bc23b90a66 100644
--- a/src/core/lib/channel/channelz_registry.cc
+++ b/src/core/lib/channel/channelz_registry.cc
@@ -210,6 +210,17 @@ char* ChannelzRegistry::InternalGetServers(intptr_t start_server_id) {
return json_str;
}
+void ChannelzRegistry::InternalLogAllEntities() {
+ MutexLock lock(&mu_);
+ for (size_t i = 0; i < entities_.size(); ++i) {
+ if (entities_[i] != nullptr) {
+ char* json = entities_[i]->RenderJsonString();
+ gpr_log(GPR_INFO, "%s", json);
+ gpr_free(json);
+ }
+ }
+}
+
} // namespace channelz
} // namespace grpc_core
@@ -222,6 +233,24 @@ char* grpc_channelz_get_servers(intptr_t start_server_id) {
return grpc_core::channelz::ChannelzRegistry::GetServers(start_server_id);
}
+char* grpc_channelz_get_server(intptr_t server_id) {
+ grpc_core::channelz::BaseNode* server_node =
+ grpc_core::channelz::ChannelzRegistry::Get(server_id);
+ if (server_node == nullptr ||
+ server_node->type() !=
+ grpc_core::channelz::BaseNode::EntityType::kServer) {
+ return nullptr;
+ }
+ grpc_json* top_level_json = grpc_json_create(GRPC_JSON_OBJECT);
+ grpc_json* json = top_level_json;
+ grpc_json* channel_json = server_node->RenderJson();
+ channel_json->key = "server";
+ grpc_json_link_child(json, channel_json, nullptr);
+ char* json_str = grpc_json_dump_to_string(top_level_json, 0);
+ grpc_json_destroy(top_level_json);
+ return json_str;
+}
+
char* grpc_channelz_get_server_sockets(intptr_t server_id,
intptr_t start_socket_id) {
grpc_core::channelz::BaseNode* base_node =
diff --git a/src/core/lib/channel/channelz_registry.h b/src/core/lib/channel/channelz_registry.h
index 326f0201c7..73b330785d 100644
--- a/src/core/lib/channel/channelz_registry.h
+++ b/src/core/lib/channel/channelz_registry.h
@@ -62,6 +62,10 @@ class ChannelzRegistry {
return Default()->InternalGetServers(start_server_id);
}
+ // Test only helper function to dump the JSON representation to std out.
+ // This can aid in debugging channelz code.
+ static void LogAllEntities() { Default()->InternalLogAllEntities(); }
+
private:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
@@ -96,6 +100,8 @@ class ChannelzRegistry {
// Else, will return idx of the first uuid higher than the target.
int FindByUuidLocked(intptr_t uuid, bool direct_hit_needed);
+ void InternalLogAllEntities();
+
// protects members
gpr_mu mu_;
InlinedVector<BaseNode*, 20> entities_;
diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h
index 5daf48a9a9..763e4ffc9f 100644
--- a/src/core/lib/channel/context.h
+++ b/src/core/lib/channel/context.h
@@ -41,9 +41,9 @@ typedef enum {
GRPC_CONTEXT_COUNT
} grpc_context_index;
-typedef struct {
- void* value;
- void (*destroy)(void*);
-} grpc_call_context_element;
+struct grpc_call_context_element {
+ void* value = nullptr;
+ void (*destroy)(void*) = nullptr;
+};
#endif /* GRPC_CORE_LIB_CHANNEL_CONTEXT_H */
diff --git a/src/core/lib/debug/trace.cc b/src/core/lib/debug/trace.cc
index 01c1e867d9..cafdb15c69 100644
--- a/src/core/lib/debug/trace.cc
+++ b/src/core/lib/debug/trace.cc
@@ -21,6 +21,7 @@
#include "src/core/lib/debug/trace.h"
#include <string.h>
+#include <type_traits>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
@@ -79,6 +80,8 @@ void TraceFlagList::LogAllTracers() {
// Flags register themselves on the list during construction
TraceFlag::TraceFlag(bool default_enabled, const char* name) : name_(name) {
+ static_assert(std::is_trivially_destructible<TraceFlag>::value,
+ "TraceFlag needs to be trivially destructible.");
set_enabled(default_enabled);
TraceFlagList::Add(this);
}
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index fe6301a3fc..4623494520 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -53,7 +53,8 @@ void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag);
class TraceFlag {
public:
TraceFlag(bool default_enabled, const char* name);
- ~TraceFlag() {}
+ // This needs to be trivially destructible as it is used as global variable.
+ ~TraceFlag() = default;
const char* name() const { return name_; }
@@ -102,8 +103,9 @@ typedef TraceFlag DebugOnlyTraceFlag;
#else
class DebugOnlyTraceFlag {
public:
- DebugOnlyTraceFlag(bool default_enabled, const char* name) {}
- bool enabled() { return false; }
+ constexpr DebugOnlyTraceFlag(bool default_enabled, const char* name) {}
+ constexpr bool enabled() const { return false; }
+ constexpr const char* name() const { return "DebugOnlyTraceFlag"; }
private:
void set_enabled(bool enabled) {}
diff --git a/src/core/lib/gpr/arena.cc b/src/core/lib/gpr/arena.cc
index 77f9357146..836a7ca793 100644
--- a/src/core/lib/gpr/arena.cc
+++ b/src/core/lib/gpr/arena.cc
@@ -21,6 +21,7 @@
#include "src/core/lib/gpr/arena.h"
#include <string.h>
+#include <new>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
@@ -28,34 +29,79 @@
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gprpp/memory.h"
+
+namespace {
+enum init_strategy {
+ NO_INIT, // Do not initialize the arena blocks.
+ ZERO_INIT, // Initialize arena blocks with 0.
+ NON_ZERO_INIT, // Initialize arena blocks with a non-zero value.
+};
+
+gpr_once g_init_strategy_once = GPR_ONCE_INIT;
+init_strategy g_init_strategy = NO_INIT;
+} // namespace
+
+static void set_strategy_from_env() {
+ char* str = gpr_getenv("GRPC_ARENA_INIT_STRATEGY");
+ if (str == nullptr) {
+ g_init_strategy = NO_INIT;
+ } else if (strcmp(str, "zero_init") == 0) {
+ g_init_strategy = ZERO_INIT;
+ } else if (strcmp(str, "non_zero_init") == 0) {
+ g_init_strategy = NON_ZERO_INIT;
+ } else {
+ g_init_strategy = NO_INIT;
+ }
+ gpr_free(str);
+}
+
+static void* gpr_arena_alloc_maybe_init(size_t size) {
+ void* mem = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
+ gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
+ if (GPR_UNLIKELY(g_init_strategy != NO_INIT)) {
+ if (g_init_strategy == ZERO_INIT) {
+ memset(mem, 0, size);
+ } else { // NON_ZERO_INIT.
+ memset(mem, 0xFE, size);
+ }
+ }
+ return mem;
+}
+
+void gpr_arena_init() {
+ gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
+}
// Uncomment this to use a simple arena that simply allocates the
// requested amount of memory for each call to gpr_arena_alloc(). This
// effectively eliminates the efficiency gain of using an arena, but it
// may be useful for debugging purposes.
//#define SIMPLE_ARENA_FOR_DEBUGGING
-
#ifdef SIMPLE_ARENA_FOR_DEBUGGING
struct gpr_arena {
+ gpr_arena() { gpr_mu_init(&mu); }
+ ~gpr_arena() {
+ gpr_mu_destroy(&mu);
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ gpr_free_aligned(ptrs[i]);
+ }
+ gpr_free(ptrs);
+ }
+
gpr_mu mu;
- void** ptrs;
- size_t num_ptrs;
+ void** ptrs = nullptr;
+ size_t num_ptrs = 0;
};
gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
- gpr_arena* arena = (gpr_arena*)gpr_zalloc(sizeof(*arena));
- gpr_mu_init(&arena->mu);
- return arena;
+ return grpc_core::New<gpr_arena>();
}
size_t gpr_arena_destroy(gpr_arena* arena) {
- gpr_mu_destroy(&arena->mu);
- for (size_t i = 0; i < arena->num_ptrs; ++i) {
- gpr_free(arena->ptrs[i]);
- }
- gpr_free(arena->ptrs);
- gpr_free(arena);
+ grpc_core::Delete(arena);
return 1; // Value doesn't matter, since it won't be used.
}
@@ -63,7 +109,8 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
gpr_mu_lock(&arena->mu);
arena->ptrs =
(void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
- void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size);
+ void* retval = arena->ptrs[arena->num_ptrs++] =
+ gpr_arena_alloc_maybe_init(size);
gpr_mu_unlock(&arena->mu);
return retval;
}
@@ -77,45 +124,45 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
// would allow us to use the alignment actually needed by the caller.
typedef struct zone {
- zone* next;
+ zone* next = nullptr;
} zone;
struct gpr_arena {
+ gpr_arena(size_t initial_size)
+ : initial_zone_size(initial_size), last_zone(&initial_zone) {
+ gpr_mu_init(&arena_growth_mutex);
+ }
+ ~gpr_arena() {
+ gpr_mu_destroy(&arena_growth_mutex);
+ zone* z = initial_zone.next;
+ while (z) {
+ zone* next_z = z->next;
+ z->~zone();
+ gpr_free_aligned(z);
+ z = next_z;
+ }
+ }
+
// Keep track of the total used size. We use this in our call sizing
// historesis.
- gpr_atm total_used;
+ gpr_atm total_used = 0;
size_t initial_zone_size;
zone initial_zone;
zone* last_zone;
gpr_mu arena_growth_mutex;
};
-static void* zalloc_aligned(size_t size) {
- void* ptr = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
- memset(ptr, 0, size);
- return ptr;
-}
-
gpr_arena* gpr_arena_create(size_t initial_size) {
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
- gpr_arena* a = static_cast<gpr_arena*>(zalloc_aligned(
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size));
- a->initial_zone_size = initial_size;
- a->last_zone = &a->initial_zone;
- gpr_mu_init(&a->arena_growth_mutex);
- return a;
+ return new (gpr_arena_alloc_maybe_init(
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size))
+ gpr_arena(initial_size);
}
size_t gpr_arena_destroy(gpr_arena* arena) {
- gpr_mu_destroy(&arena->arena_growth_mutex);
- gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
- zone* z = arena->initial_zone.next;
+ const gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
+ arena->~gpr_arena();
gpr_free_aligned(arena);
- while (z) {
- zone* next_z = z->next;
- gpr_free_aligned(z);
- z = next_z;
- }
return static_cast<size_t>(size);
}
@@ -132,8 +179,8 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
// sizing historesis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
gpr_mu_lock(&arena->arena_growth_mutex);
- zone* z = static_cast<zone*>(
- zalloc_aligned(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size));
+ zone* z = new (gpr_arena_alloc_maybe_init(
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone();
arena->last_zone->next = z;
arena->last_zone = z;
gpr_mu_unlock(&arena->arena_growth_mutex);
diff --git a/src/core/lib/gpr/arena.h b/src/core/lib/gpr/arena.h
index 6d2a073dd5..069892b228 100644
--- a/src/core/lib/gpr/arena.h
+++ b/src/core/lib/gpr/arena.h
@@ -37,5 +37,7 @@ gpr_arena* gpr_arena_create(size_t initial_size);
void* gpr_arena_alloc(gpr_arena* arena, size_t size);
// Destroy an arena, returning the total number of bytes allocated
size_t gpr_arena_destroy(gpr_arena* arena);
+// Initializes the Arena component.
+void gpr_arena_init();
#endif /* GRPC_CORE_LIB_GPR_ARENA_H */
diff --git a/src/core/lib/gprpp/inlined_vector.h b/src/core/lib/gprpp/inlined_vector.h
index 65c2b9634f..66dc751a56 100644
--- a/src/core/lib/gprpp/inlined_vector.h
+++ b/src/core/lib/gprpp/inlined_vector.h
@@ -100,10 +100,7 @@ class InlinedVector {
void reserve(size_t capacity) {
if (capacity > capacity_) {
T* new_dynamic = static_cast<T*>(gpr_malloc(sizeof(T) * capacity));
- for (size_t i = 0; i < size_; ++i) {
- new (&new_dynamic[i]) T(std::move(data()[i]));
- data()[i].~T();
- }
+ move_elements(data(), new_dynamic, size_);
gpr_free(dynamic_);
dynamic_ = new_dynamic;
capacity_ = capacity;
@@ -131,13 +128,25 @@ class InlinedVector {
size_--;
}
+ size_t size() const { return size_; }
+ bool empty() const { return size_ == 0; }
+
+ size_t capacity() const { return capacity_; }
+
+ void clear() {
+ destroy_elements();
+ init_data();
+ }
+
+ private:
void copy_from(const InlinedVector& v) {
- // if v is allocated, copy over the buffer.
+ // if v is allocated, make sure we have enough capacity.
if (v.dynamic_ != nullptr) {
reserve(v.capacity_);
- memcpy(dynamic_, v.dynamic_, v.size_ * sizeof(T));
- } else {
- memcpy(inline_, v.inline_, v.size_ * sizeof(T));
+ }
+ // copy over elements
+ for (size_t i = 0; i < v.size_; ++i) {
+ new (&(data()[i])) T(v[i]);
}
// copy over metadata
size_ = v.size_;
@@ -145,11 +154,12 @@ class InlinedVector {
}
void move_from(InlinedVector& v) {
- // if v is allocated, then we steal its buffer, else we copy it.
+ // if v is allocated, then we steal its dynamic array; otherwise, we
+ // move the elements individually.
if (v.dynamic_ != nullptr) {
dynamic_ = v.dynamic_;
} else {
- memcpy(inline_, v.inline_, v.size_ * sizeof(T));
+ move_elements(v.data(), data(), v.size_);
}
// copy over metadata
size_ = v.size_;
@@ -158,17 +168,13 @@ class InlinedVector {
v.init_data();
}
- size_t size() const { return size_; }
- bool empty() const { return size_ == 0; }
-
- size_t capacity() const { return capacity_; }
-
- void clear() {
- destroy_elements();
- init_data();
+ static void move_elements(T* src, T* dst, size_t num_elements) {
+ for (size_t i = 0; i < num_elements; ++i) {
+ new (&dst[i]) T(std::move(src[i]));
+ src[i].~T();
+ }
}
- private:
void init_data() {
dynamic_ = nullptr;
size_ = 0;
diff --git a/src/core/lib/gprpp/orphanable.h b/src/core/lib/gprpp/orphanable.h
index 3123e3f5a3..9053c60111 100644
--- a/src/core/lib/gprpp/orphanable.h
+++ b/src/core/lib/gprpp/orphanable.h
@@ -31,6 +31,7 @@
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
namespace grpc_core {
@@ -89,107 +90,42 @@ class InternallyRefCounted : public Orphanable {
template <typename T>
friend class RefCountedPtr;
- InternallyRefCounted() { gpr_ref_init(&refs_, 1); }
- virtual ~InternallyRefCounted() {}
+ // TraceFlagT is defined to accept both DebugOnlyTraceFlag and TraceFlag.
+ // Note: RefCount tracing is only enabled on debug builds, even when a
+ // TraceFlag is used.
+ template <typename TraceFlagT = TraceFlag>
+ explicit InternallyRefCounted(TraceFlagT* trace_flag = nullptr)
+ : refs_(1, trace_flag) {}
+ virtual ~InternallyRefCounted() = default;
RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
IncrementRefCount();
return RefCountedPtr<Child>(static_cast<Child*>(this));
}
-
- void Unref() {
- if (gpr_unref(&refs_)) {
- Delete(static_cast<Child*>(this));
- }
- }
-
- private:
- void IncrementRefCount() { gpr_ref(&refs_); }
-
- gpr_refcount refs_;
-};
-
-// An alternative version of the InternallyRefCounted base class that
-// supports tracing. This is intended to be used in cases where the
-// object will be handled both by idiomatic C++ code using smart
-// pointers and legacy code that is manually calling Ref() and Unref().
-// Once all of our code is converted to idiomatic C++, we may be able to
-// eliminate this class.
-template <typename Child>
-class InternallyRefCountedWithTracing : public Orphanable {
- public:
- // Not copyable nor movable.
- InternallyRefCountedWithTracing(const InternallyRefCountedWithTracing&) =
- delete;
- InternallyRefCountedWithTracing& operator=(
- const InternallyRefCountedWithTracing&) = delete;
-
- GRPC_ABSTRACT_BASE_CLASS
-
- protected:
- GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
-
- // Allow RefCountedPtr<> to access Unref() and IncrementRefCount().
- template <typename T>
- friend class RefCountedPtr;
-
- InternallyRefCountedWithTracing()
- : InternallyRefCountedWithTracing(static_cast<TraceFlag*>(nullptr)) {}
-
- explicit InternallyRefCountedWithTracing(TraceFlag* trace_flag)
- : trace_flag_(trace_flag) {
- gpr_ref_init(&refs_, 1);
- }
-
-#ifdef NDEBUG
- explicit InternallyRefCountedWithTracing(DebugOnlyTraceFlag* trace_flag)
- : InternallyRefCountedWithTracing() {}
-#endif
-
- virtual ~InternallyRefCountedWithTracing() {}
-
- RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
- IncrementRefCount();
- return RefCountedPtr<Child>(static_cast<Child*>(this));
- }
-
RefCountedPtr<Child> Ref(const DebugLocation& location,
const char* reason) GRPC_MUST_USE_RESULT {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs + 1, reason);
- }
- return Ref();
+ IncrementRefCount(location, reason);
+ return RefCountedPtr<Child>(static_cast<Child*>(this));
}
- // TODO(roth): Once all of our code is converted to C++ and can use
- // RefCountedPtr<> instead of manual ref-counting, make the Unref() methods
- // private, since they will only be used by RefCountedPtr<>, which is a
- // friend of this class.
-
void Unref() {
- if (gpr_unref(&refs_)) {
+ if (refs_.Unref()) {
Delete(static_cast<Child*>(this));
}
}
-
void Unref(const DebugLocation& location, const char* reason) {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs - 1, reason);
+ if (refs_.Unref(location, reason)) {
+ Delete(static_cast<Child*>(this));
}
- Unref();
}
private:
- void IncrementRefCount() { gpr_ref(&refs_); }
+ void IncrementRefCount() { refs_.Ref(); }
+ void IncrementRefCount(const DebugLocation& location, const char* reason) {
+ refs_.Ref(location, reason);
+ }
- TraceFlag* trace_flag_ = nullptr;
- gpr_refcount refs_;
+ grpc_core::RefCount refs_;
};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index 03c293f6ed..fa97ffcfed 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -21,9 +21,12 @@
#include <grpc/support/port_platform.h>
+#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include <atomic>
+#include <cassert>
#include <cinttypes>
#include "src/core/lib/debug/trace.h"
@@ -34,61 +37,150 @@
namespace grpc_core {
-// A base class for reference-counted objects.
-// New objects should be created via New() and start with a refcount of 1.
-// When the refcount reaches 0, the object will be deleted via Delete().
-//
-// This will commonly be used by CRTP (curiously-recurring template pattern)
-// e.g., class MyClass : public RefCounted<MyClass>
-template <typename Child>
-class RefCounted {
+// PolymorphicRefCount enforces polymorphic destruction of RefCounted.
+class PolymorphicRefCount {
public:
- RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
- IncrementRefCount();
- return RefCountedPtr<Child>(static_cast<Child*>(this));
- }
+ GRPC_ABSTRACT_BASE_CLASS
- // TODO(roth): Once all of our code is converted to C++ and can use
- // RefCountedPtr<> instead of manual ref-counting, make this method
- // private, since it will only be used by RefCountedPtr<>, which is a
- // friend of this class.
- void Unref() {
- if (gpr_unref(&refs_)) {
- Delete(static_cast<Child*>(this));
- }
- }
+ protected:
+ GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
- // Not copyable nor movable.
- RefCounted(const RefCounted&) = delete;
- RefCounted& operator=(const RefCounted&) = delete;
+ virtual ~PolymorphicRefCount() = default;
+};
+// NonPolymorphicRefCount does not enforce polymorphic destruction of
+// RefCounted. Please refer to grpc_core::RefCounted for more details, and
+// when in doubt use PolymorphicRefCount.
+class NonPolymorphicRefCount {
+ public:
GRPC_ABSTRACT_BASE_CLASS
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
- RefCounted() { gpr_ref_init(&refs_, 1); }
+ ~NonPolymorphicRefCount() = default;
+};
- virtual ~RefCounted() {}
+// RefCount is a simple atomic ref-count.
+//
+// This is a C++ implementation of gpr_refcount, with inline functions. Due to
+// inline functions, this class is significantly more efficient than
+// gpr_refcount and should be preferred over gpr_refcount whenever possible.
+//
+// TODO(soheil): Remove gpr_refcount after submitting the GRFC and the paragraph
+// above.
+class RefCount {
+ public:
+ using Value = intptr_t;
+
+ // `init` is the initial refcount stored in this object.
+ //
+ // TraceFlagT is defined to accept both DebugOnlyTraceFlag and TraceFlag.
+ // Note: RefCount tracing is only enabled on debug builds, even when a
+ // TraceFlag is used.
+ template <typename TraceFlagT = TraceFlag>
+ constexpr explicit RefCount(Value init = 1, TraceFlagT* trace_flag = nullptr)
+ :
+#ifndef NDEBUG
+ trace_flag_(trace_flag),
+#endif
+ value_(init) {
+ }
- private:
- // Allow RefCountedPtr<> to access IncrementRefCount().
- template <typename T>
- friend class RefCountedPtr;
+ // Increases the ref-count by `n`.
+ void Ref(Value n = 1) {
+ GPR_ATM_INC_ADD_THEN(value_.fetch_add(n, std::memory_order_relaxed));
+ }
+ void Ref(const DebugLocation& location, const char* reason, Value n = 1) {
+#ifndef NDEBUG
+ if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+ const RefCount::Value old_refs = get();
+ gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+ trace_flag_->name(), this, location.file(), location.line(),
+ old_refs, old_refs + n, reason);
+ }
+#endif
+ Ref(n);
+ }
+
+ // Similar to Ref() with an assert on the ref-count being non-zero.
+ void RefNonZero() {
+#ifndef NDEBUG
+ const Value prior =
+ GPR_ATM_INC_ADD_THEN(value_.fetch_add(1, std::memory_order_relaxed));
+ assert(prior > 0);
+#else
+ Ref();
+#endif
+ }
+ void RefNonZero(const DebugLocation& location, const char* reason) {
+#ifndef NDEBUG
+ if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+ const RefCount::Value old_refs = get();
+ gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+ trace_flag_->name(), this, location.file(), location.line(),
+ old_refs, old_refs + 1, reason);
+ }
+#endif
+ RefNonZero();
+ }
- void IncrementRefCount() { gpr_ref(&refs_); }
+ // Decrements the ref-count and returns true if the ref-count reaches 0.
+ bool Unref() {
+ const Value prior =
+ GPR_ATM_INC_ADD_THEN(value_.fetch_sub(1, std::memory_order_acq_rel));
+ GPR_DEBUG_ASSERT(prior > 0);
+ return prior == 1;
+ }
+ bool Unref(const DebugLocation& location, const char* reason) {
+#ifndef NDEBUG
+ if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+ const RefCount::Value old_refs = get();
+ gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
+ trace_flag_->name(), this, location.file(), location.line(),
+ old_refs, old_refs - 1, reason);
+ }
+#endif
+ return Unref();
+ }
- gpr_refcount refs_;
+ private:
+ Value get() const { return value_.load(std::memory_order_relaxed); }
+
+#ifndef NDEBUG
+ TraceFlag* trace_flag_;
+#endif
+ std::atomic<Value> value_;
};
-// An alternative version of the RefCounted base class that
-// supports tracing. This is intended to be used in cases where the
-// object will be handled both by idiomatic C++ code using smart
-// pointers and legacy code that is manually calling Ref() and Unref().
-// Once all of our code is converted to idiomatic C++, we may be able to
-// eliminate this class.
-template <typename Child>
-class RefCountedWithTracing {
+// A base class for reference-counted objects.
+// New objects should be created via New() and start with a refcount of 1.
+// When the refcount reaches 0, the object will be deleted via Delete().
+//
+// This will commonly be used by CRTP (curiously-recurring template pattern)
+// e.g., class MyClass : public RefCounted<MyClass>
+//
+// Use PolymorphicRefCount and NonPolymorphicRefCount to select between
+// different implementations of RefCounted.
+//
+// Note that NonPolymorphicRefCount does not support polymorphic destruction.
+// So, use NonPolymorphicRefCount only when both of the following conditions
+// are guaranteed to hold:
+// (a) Child is a concrete leaf class in RefCounted<Child>, and
+// (b) you are gauranteed to call Unref only on concrete leaf classes and not
+// their parents.
+//
+// The following example is illegal, because calling Unref() will not call
+// the dtor of Child.
+//
+// class Parent : public RefCounted<Parent, NonPolymorphicRefCount> {}
+// class Child : public Parent {}
+//
+// Child* ch;
+// ch->Unref();
+//
+template <typename Child, typename Impl = PolymorphicRefCount>
+class RefCounted : public Impl {
public:
RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {
IncrementRefCount();
@@ -97,69 +189,55 @@ class RefCountedWithTracing {
RefCountedPtr<Child> Ref(const DebugLocation& location,
const char* reason) GRPC_MUST_USE_RESULT {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs + 1, reason);
- }
- return Ref();
+ IncrementRefCount(location, reason);
+ return RefCountedPtr<Child>(static_cast<Child*>(this));
}
// TODO(roth): Once all of our code is converted to C++ and can use
- // RefCountedPtr<> instead of manual ref-counting, make the Unref() methods
- // private, since they will only be used by RefCountedPtr<>, which is a
+ // RefCountedPtr<> instead of manual ref-counting, make this method
+ // private, since it will only be used by RefCountedPtr<>, which is a
// friend of this class.
-
void Unref() {
- if (gpr_unref(&refs_)) {
+ if (refs_.Unref()) {
Delete(static_cast<Child*>(this));
}
}
-
void Unref(const DebugLocation& location, const char* reason) {
- if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
- gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
- gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
- trace_flag_->name(), this, location.file(), location.line(),
- old_refs, old_refs - 1, reason);
+ if (refs_.Unref(location, reason)) {
+ Delete(static_cast<Child*>(this));
}
- Unref();
}
// Not copyable nor movable.
- RefCountedWithTracing(const RefCountedWithTracing&) = delete;
- RefCountedWithTracing& operator=(const RefCountedWithTracing&) = delete;
+ RefCounted(const RefCounted&) = delete;
+ RefCounted& operator=(const RefCounted&) = delete;
GRPC_ABSTRACT_BASE_CLASS
protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
- RefCountedWithTracing()
- : RefCountedWithTracing(static_cast<TraceFlag*>(nullptr)) {}
-
- explicit RefCountedWithTracing(TraceFlag* trace_flag)
- : trace_flag_(trace_flag) {
- gpr_ref_init(&refs_, 1);
- }
-
-#ifdef NDEBUG
- explicit RefCountedWithTracing(DebugOnlyTraceFlag* trace_flag)
- : RefCountedWithTracing() {}
-#endif
+ // TraceFlagT is defined to accept both DebugOnlyTraceFlag and TraceFlag.
+ // Note: RefCount tracing is only enabled on debug builds, even when a
+ // TraceFlag is used.
+ template <typename TraceFlagT = TraceFlag>
+ explicit RefCounted(TraceFlagT* trace_flag = nullptr)
+ : refs_(1, trace_flag) {}
- virtual ~RefCountedWithTracing() {}
+ // Note: Depending on the Impl used, this dtor can be implicitly virtual.
+ ~RefCounted() = default;
private:
// Allow RefCountedPtr<> to access IncrementRefCount().
template <typename T>
friend class RefCountedPtr;
- void IncrementRefCount() { gpr_ref(&refs_); }
+ void IncrementRefCount() { refs_.Ref(); }
+ void IncrementRefCount(const DebugLocation& location, const char* reason) {
+ refs_.Ref(location, reason);
+ }
- TraceFlag* trace_flag_ = nullptr;
- gpr_refcount refs_;
+ RefCount refs_;
};
} // namespace grpc_core
diff --git a/src/core/lib/gprpp/ref_counted_ptr.h b/src/core/lib/gprpp/ref_counted_ptr.h
index c2dfbdd90f..1ed5d584c7 100644
--- a/src/core/lib/gprpp/ref_counted_ptr.h
+++ b/src/core/lib/gprpp/ref_counted_ptr.h
@@ -21,8 +21,10 @@
#include <grpc/support/port_platform.h>
+#include <type_traits>
#include <utility>
+#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/memory.h"
namespace grpc_core {
@@ -54,15 +56,13 @@ class RefCountedPtr {
// Move assignment.
RefCountedPtr& operator=(RefCountedPtr&& other) {
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
other.value_ = nullptr;
return *this;
}
template <typename Y>
RefCountedPtr& operator=(RefCountedPtr<Y>&& other) {
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
other.value_ = nullptr;
return *this;
}
@@ -74,6 +74,8 @@ class RefCountedPtr {
}
template <typename Y>
RefCountedPtr(const RefCountedPtr<Y>& other) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
if (other.value_ != nullptr) other.value_->IncrementRefCount();
value_ = other.value_;
}
@@ -83,17 +85,17 @@ class RefCountedPtr {
// Note: Order of reffing and unreffing is important here in case value_
// and other.value_ are the same object.
if (other.value_ != nullptr) other.value_->IncrementRefCount();
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
return *this;
}
template <typename Y>
RefCountedPtr& operator=(const RefCountedPtr<Y>& other) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
// Note: Order of reffing and unreffing is important here in case value_
// and other.value_ are the same object.
if (other.value_ != nullptr) other.value_->IncrementRefCount();
- if (value_ != nullptr) value_->Unref();
- value_ = other.value_;
+ reset(other.value_);
return *this;
}
@@ -102,15 +104,29 @@ class RefCountedPtr {
}
// If value is non-null, we take ownership of a ref to it.
- template <typename Y>
- void reset(Y* value) {
+ void reset(T* value = nullptr) {
if (value_ != nullptr) value_->Unref();
value_ = value;
}
-
- void reset() {
+ void reset(const DebugLocation& location, const char* reason,
+ T* value = nullptr) {
+ if (value_ != nullptr) value_->Unref(location, reason);
+ value_ = value;
+ }
+ template <typename Y>
+ void reset(Y* value = nullptr) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
if (value_ != nullptr) value_->Unref();
- value_ = nullptr;
+ value_ = value;
+ }
+ template <typename Y>
+ void reset(const DebugLocation& location, const char* reason,
+ Y* value = nullptr) {
+ static_assert(std::has_virtual_destructor<T>::value,
+ "T does not have a virtual dtor");
+ if (value_ != nullptr) value_->Unref(location, reason);
+ value_ = value;
}
// TODO(roth): This method exists solely as a transition mechanism to allow
diff --git a/src/core/lib/iomgr/buffer_list.cc b/src/core/lib/iomgr/buffer_list.cc
index 6ada23db1c..ace17a108d 100644
--- a/src/core/lib/iomgr/buffer_list.cc
+++ b/src/core/lib/iomgr/buffer_list.cc
@@ -35,6 +35,9 @@ void TracedBuffer::AddNewEntry(TracedBuffer** head, uint32_t seq_no,
TracedBuffer* new_elem = New<TracedBuffer>(seq_no, arg);
/* Store the current time as the sendmsg time. */
new_elem->ts_.sendmsg_time = gpr_now(GPR_CLOCK_REALTIME);
+ new_elem->ts_.scheduled_time = gpr_inf_past(GPR_CLOCK_REALTIME);
+ new_elem->ts_.sent_time = gpr_inf_past(GPR_CLOCK_REALTIME);
+ new_elem->ts_.acked_time = gpr_inf_past(GPR_CLOCK_REALTIME);
if (*head == nullptr) {
*head = new_elem;
return;
@@ -55,10 +58,16 @@ void fill_gpr_from_timestamp(gpr_timespec* gts, const struct timespec* ts) {
gts->clock_type = GPR_CLOCK_REALTIME;
}
+void default_timestamps_callback(void* arg, grpc_core::Timestamps* ts,
+ grpc_error* shudown_err) {
+ gpr_log(GPR_DEBUG, "Timestamps callback has not been registered");
+}
+
/** The saved callback function that will be invoked when we get all the
* timestamps that we are going to get for a TracedBuffer. */
void (*timestamps_callback)(void*, grpc_core::Timestamps*,
- grpc_error* shutdown_err);
+ grpc_error* shutdown_err) =
+ default_timestamps_callback;
} /* namespace */
void TracedBuffer::ProcessTimestamp(TracedBuffer** head,
@@ -99,18 +108,20 @@ void TracedBuffer::ProcessTimestamp(TracedBuffer** head,
}
}
-void TracedBuffer::Shutdown(TracedBuffer** head, grpc_error* shutdown_err) {
+void TracedBuffer::Shutdown(TracedBuffer** head, void* remaining,
+ grpc_error* shutdown_err) {
GPR_DEBUG_ASSERT(head != nullptr);
TracedBuffer* elem = *head;
while (elem != nullptr) {
- if (timestamps_callback) {
- timestamps_callback(elem->arg_, &(elem->ts_), shutdown_err);
- }
+ timestamps_callback(elem->arg_, &(elem->ts_), shutdown_err);
auto* next = elem->next_;
Delete<TracedBuffer>(elem);
elem = next;
}
*head = nullptr;
+ if (remaining != nullptr) {
+ timestamps_callback(remaining, nullptr, shutdown_err);
+ }
GRPC_ERROR_UNREF(shutdown_err);
}
diff --git a/src/core/lib/iomgr/buffer_list.h b/src/core/lib/iomgr/buffer_list.h
index cbbf50a657..627f1bde99 100644
--- a/src/core/lib/iomgr/buffer_list.h
+++ b/src/core/lib/iomgr/buffer_list.h
@@ -37,6 +37,8 @@ struct Timestamps {
gpr_timespec scheduled_time;
gpr_timespec sent_time;
gpr_timespec acked_time;
+
+ uint32_t byte_offset; /* byte offset relative to the start of the RPC */
};
/** TracedBuffer is a class to keep track of timestamps for a specific buffer in
@@ -67,13 +69,13 @@ class TracedBuffer {
/** Cleans the list by calling the callback for each traced buffer in the list
* with timestamps that it has. */
- static void Shutdown(grpc_core::TracedBuffer** head,
+ static void Shutdown(grpc_core::TracedBuffer** head, void* remaining,
grpc_error* shutdown_err);
private:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_NEW
- TracedBuffer(int seq_no, void* arg)
+ TracedBuffer(uint32_t seq_no, void* arg)
: seq_no_(seq_no), arg_(arg), next_(nullptr) {}
uint32_t seq_no_; /* The sequence number for the last byte in the buffer */
@@ -82,7 +84,12 @@ class TracedBuffer {
grpc_core::TracedBuffer* next_; /* The next TracedBuffer in the list */
};
#else /* GRPC_LINUX_ERRQUEUE */
-class TracedBuffer {};
+class TracedBuffer {
+ public:
+ /* Dummy shutdown function */
+ static void Shutdown(grpc_core::TracedBuffer** head, void* remaining,
+ grpc_error* shutdown_err) {}
+};
#endif /* GRPC_LINUX_ERRQUEUE */
/** Sets the callback function to call when timestamps for a write are
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index 00a839b64c..6b5759a036 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -39,8 +39,57 @@ static gpr_atm encode_cancel_state_error(grpc_error* error) {
return static_cast<gpr_atm>(1) | (gpr_atm)error;
}
+#ifdef GRPC_TSAN_ENABLED
+static void tsan_closure(void* user_data, grpc_error* error) {
+ grpc_call_combiner* call_combiner =
+ static_cast<grpc_call_combiner*>(user_data);
+ // We ref-count the lock, and check if it's already taken.
+ // If it was taken, we should do nothing. Otherwise, we will mark it as
+ // locked. Note that if two different threads try to do this, only one of
+ // them will be able to mark the lock as acquired, while they both run their
+ // callbacks. In such cases (which should never happen for call_combiner),
+ // TSAN will correctly produce an error.
+ //
+ // TODO(soheil): This only covers the callbacks scheduled by
+ // grpc_call_combiner_(start|finish). If in the future, a
+ // callback gets scheduled using other mechanisms, we will need
+ // to add APIs to externally lock call combiners.
+ grpc_core::RefCountedPtr<grpc_call_combiner::TsanLock> lock =
+ call_combiner->tsan_lock;
+ bool prev = false;
+ if (lock->taken.compare_exchange_strong(prev, true)) {
+ TSAN_ANNOTATE_RWLOCK_ACQUIRED(&lock->taken, true);
+ } else {
+ lock.reset();
+ }
+ GRPC_CLOSURE_RUN(call_combiner->original_closure, GRPC_ERROR_REF(error));
+ if (lock != nullptr) {
+ TSAN_ANNOTATE_RWLOCK_RELEASED(&lock->taken, true);
+ bool prev = true;
+ GPR_ASSERT(lock->taken.compare_exchange_strong(prev, false));
+ }
+}
+#endif
+
+static void call_combiner_sched_closure(grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error) {
+#ifdef GRPC_TSAN_ENABLED
+ call_combiner->original_closure = closure;
+ GRPC_CLOSURE_SCHED(&call_combiner->tsan_closure, error);
+#else
+ GRPC_CLOSURE_SCHED(closure, error);
+#endif
+}
+
void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+ gpr_atm_no_barrier_store(&call_combiner->cancel_state, 0);
+ gpr_atm_no_barrier_store(&call_combiner->size, 0);
gpr_mpscq_init(&call_combiner->queue);
+#ifdef GRPC_TSAN_ENABLED
+ GRPC_CLOSURE_INIT(&call_combiner->tsan_closure, tsan_closure, call_combiner,
+ grpc_schedule_on_exec_ctx);
+#endif
}
void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
@@ -85,7 +134,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
// Queue was empty, so execute this closure immediately.
- GRPC_CLOSURE_SCHED(closure, error);
+ call_combiner_sched_closure(call_combiner, closure, error);
} else {
if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_INFO, " QUEUING");
@@ -132,7 +181,8 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error));
}
- GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
+ call_combiner_sched_closure(call_combiner, closure,
+ closure->error_data.error);
break;
}
} else if (grpc_call_combiner_trace.enabled()) {
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 6f7ddd4043..4ec0044f05 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -27,7 +27,10 @@
#include "src/core/lib/gpr/mpscq.h"
#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/dynamic_annotations.h"
// A simple, lock-free mechanism for serializing activity related to a
// single call. This is similar to a combiner but is more lightweight.
@@ -40,14 +43,38 @@
extern grpc_core::TraceFlag grpc_call_combiner_trace;
-typedef struct {
- gpr_atm size; // size_t, num closures in queue or currently executing
+struct grpc_call_combiner {
+ gpr_atm size = 0; // size_t, num closures in queue or currently executing
gpr_mpscq queue;
// Either 0 (if not cancelled and no cancellation closure set),
// a grpc_closure* (if the lowest bit is 0),
// or a grpc_error* (if the lowest bit is 1).
- gpr_atm cancel_state;
-} grpc_call_combiner;
+ gpr_atm cancel_state = 0;
+#ifdef GRPC_TSAN_ENABLED
+ // A fake ref-counted lock that is kept alive after the destruction of
+ // grpc_call_combiner, when we are running the original closure.
+ //
+ // Ideally we want to lock and unlock the call combiner as a pointer, when the
+ // callback is called. However, original_closure is free to trigger
+ // anything on the call combiner (including destruction of grpc_call).
+ // Thus, we need a ref-counted structure that can outlive the call combiner.
+ struct TsanLock
+ : public grpc_core::RefCounted<TsanLock,
+ grpc_core::NonPolymorphicRefCount> {
+ TsanLock() { TSAN_ANNOTATE_RWLOCK_CREATE(&taken); }
+ ~TsanLock() { TSAN_ANNOTATE_RWLOCK_DESTROY(&taken); }
+
+ // To avoid double-locking by the same thread, we should acquire/release
+ // the lock only when taken is false. On each acquire taken must be set to
+ // true.
+ std::atomic<bool> taken{false};
+ };
+ grpc_core::RefCountedPtr<TsanLock> tsan_lock =
+ grpc_core::MakeRefCounted<TsanLock>();
+ grpc_closure tsan_closure;
+ grpc_closure* original_closure;
+#endif
+};
// Assumes memory was initialized to zero.
void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index f14c723844..bde3437c02 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -114,6 +114,7 @@ inline grpc_closure* grpc_closure_init(grpc_closure* closure,
closure->cb = cb;
closure->cb_arg = cb_arg;
closure->scheduler = scheduler;
+ closure->error_data.error = GRPC_ERROR_NONE;
#ifndef NDEBUG
closure->scheduled = false;
closure->file_initiated = nullptr;
diff --git a/src/core/lib/iomgr/dynamic_annotations.h b/src/core/lib/iomgr/dynamic_annotations.h
new file mode 100644
index 0000000000..713928023a
--- /dev/null
+++ b/src/core/lib/iomgr/dynamic_annotations.h
@@ -0,0 +1,67 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H
+#define GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GRPC_TSAN_ENABLED
+
+#define TSAN_ANNOTATE_HAPPENS_BEFORE(addr) \
+ AnnotateHappensBefore(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_HAPPENS_AFTER(addr) \
+ AnnotateHappensAfter(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_CREATE(addr) \
+ AnnotateRWLockCreate(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_DESTROY(addr) \
+ AnnotateRWLockDestroy(__FILE__, __LINE__, (void*)(addr))
+#define TSAN_ANNOTATE_RWLOCK_ACQUIRED(addr, is_w) \
+ AnnotateRWLockAcquired(__FILE__, __LINE__, (void*)(addr), (is_w))
+#define TSAN_ANNOTATE_RWLOCK_RELEASED(addr, is_w) \
+ AnnotateRWLockReleased(__FILE__, __LINE__, (void*)(addr), (is_w))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void AnnotateHappensBefore(const char* file, int line, const volatile void* cv);
+void AnnotateHappensAfter(const char* file, int line, const volatile void* cv);
+void AnnotateRWLockCreate(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockDestroy(const char* file, int line,
+ const volatile void* lock);
+void AnnotateRWLockAcquired(const char* file, int line,
+ const volatile void* lock, long is_w);
+void AnnotateRWLockReleased(const char* file, int line,
+ const volatile void* lock, long is_w);
+#ifdef __cplusplus
+}
+#endif
+
+#else /* GRPC_TSAN_ENABLED */
+
+#define TSAN_ANNOTATE_HAPPENS_BEFORE(addr)
+#define TSAN_ANNOTATE_HAPPENS_AFTER(addr)
+#define TSAN_ANNOTATE_RWLOCK_CREATE(addr)
+#define TSAN_ANNOTATE_RWLOCK_DESTROY(addr)
+#define TSAN_ANNOTATE_RWLOCK_ACQUIRED(addr, is_w)
+#define TSAN_ANNOTATE_RWLOCK_RELEASED(addr, is_w)
+
+#endif /* GRPC_TSAN_ENABLED */
+
+#endif /* GRPC_CORE_LIB_IOMGR_DYNAMIC_ANNOTATIONS_H */
diff --git a/src/core/lib/iomgr/endpoint.cc b/src/core/lib/iomgr/endpoint.cc
index 44fb47e19d..06316c6031 100644
--- a/src/core/lib/iomgr/endpoint.cc
+++ b/src/core/lib/iomgr/endpoint.cc
@@ -61,3 +61,7 @@ int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}
+
+bool grpc_endpoint_can_track_err(grpc_endpoint* ep) {
+ return ep->vtable->can_track_err(ep);
+}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 1f590a80ca..79c8ece263 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -47,6 +47,7 @@ struct grpc_endpoint_vtable {
grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep);
char* (*get_peer)(grpc_endpoint* ep);
int (*get_fd)(grpc_endpoint* ep);
+ bool (*can_track_err)(grpc_endpoint* ep);
};
/* When data is available on the connection, calls the callback with slices.
@@ -95,6 +96,8 @@ void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* endpoint);
+bool grpc_endpoint_can_track_err(grpc_endpoint* ep);
+
struct grpc_endpoint {
const grpc_endpoint_vtable* vtable;
};
diff --git a/src/core/lib/iomgr/endpoint_cfstream.cc b/src/core/lib/iomgr/endpoint_cfstream.cc
index df2cf508c8..7c4bc1ace2 100644
--- a/src/core/lib/iomgr/endpoint_cfstream.cc
+++ b/src/core/lib/iomgr/endpoint_cfstream.cc
@@ -315,6 +315,8 @@ char* CFStreamGetPeer(grpc_endpoint* ep) {
int CFStreamGetFD(grpc_endpoint* ep) { return 0; }
+bool CFStreamCanTrackErr(grpc_endpoint* ep) { return false; }
+
void CFStreamAddToPollset(grpc_endpoint* ep, grpc_pollset* pollset) {}
void CFStreamAddToPollsetSet(grpc_endpoint* ep, grpc_pollset_set* pollset) {}
void CFStreamDeleteFromPollsetSet(grpc_endpoint* ep,
@@ -329,7 +331,8 @@ static const grpc_endpoint_vtable vtable = {CFStreamRead,
CFStreamDestroy,
CFStreamGetResourceUser,
CFStreamGetPeer,
- CFStreamGetFD};
+ CFStreamGetFD,
+ CFStreamCanTrackErr};
grpc_endpoint* grpc_cfstream_endpoint_create(
CFReadStreamRef read_stream, CFWriteStreamRef write_stream,
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.cc b/src/core/lib/iomgr/endpoint_pair_posix.cc
index 3afbfd7254..5c5c246f99 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.cc
+++ b/src/core/lib/iomgr/endpoint_pair_posix.cc
@@ -59,11 +59,11 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
grpc_core::ExecCtx exec_ctx;
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name, true), args,
+ p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name, false), args,
"socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name, true), args,
+ p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name, false), args,
"socketpair-client");
gpr_free(final_name);
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 38571b1957..4b8c891e9b 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -1242,6 +1242,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
* Event engine binding
*/
+static void shutdown_background_closure(void) {}
+
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
@@ -1255,6 +1257,7 @@ static void shutdown_engine(void) {
static const grpc_event_engine_vtable vtable = {
sizeof(grpc_pollset),
true,
+ false,
fd_create,
fd_wrapped_fd,
@@ -1284,6 +1287,7 @@ static const grpc_event_engine_vtable vtable = {
pollset_set_add_fd,
pollset_set_del_fd,
+ shutdown_background_closure,
shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 06a382c556..7a4870db78 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -1604,6 +1604,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
* Event engine binding
*/
+static void shutdown_background_closure(void) {}
+
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
@@ -1612,6 +1614,7 @@ static void shutdown_engine(void) {
static const grpc_event_engine_vtable vtable = {
sizeof(grpc_pollset),
true,
+ false,
fd_create,
fd_wrapped_fd,
@@ -1641,6 +1644,7 @@ static const grpc_event_engine_vtable vtable = {
pollset_set_add_fd,
pollset_set_del_fd,
+ shutdown_background_closure,
shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 16562538a6..67cbfbbd02 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -1782,6 +1782,8 @@ static void global_cv_fd_table_shutdown() {
* event engine binding
*/
+static void shutdown_background_closure(void) {}
+
static void shutdown_engine(void) {
pollset_global_shutdown();
if (grpc_cv_wakeup_fds_enabled()) {
@@ -1796,6 +1798,7 @@ static void shutdown_engine(void) {
static const grpc_event_engine_vtable vtable = {
sizeof(grpc_pollset),
false,
+ false,
fd_create,
fd_wrapped_fd,
@@ -1825,6 +1828,7 @@ static const grpc_event_engine_vtable vtable = {
pollset_set_add_fd,
pollset_set_del_fd,
+ shutdown_background_closure,
shutdown_engine,
};
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 8a7dc7b004..32d1b6c43e 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
#include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
+#include "src/core/lib/iomgr/internal_errqueue.h"
grpc_core::TraceFlag grpc_polling_trace(false,
"polling"); /* Disabled by default */
@@ -236,19 +237,22 @@ void grpc_event_engine_shutdown(void) {
}
bool grpc_event_engine_can_track_errors(void) {
-/* Only track errors if platform supports errqueue. */
-#ifdef GRPC_LINUX_ERRQUEUE
- return g_event_engine->can_track_err;
-#else
+ /* Only track errors if platform supports errqueue. */
+ if (grpc_core::kernel_supports_errqueue()) {
+ return g_event_engine->can_track_err;
+ }
return false;
-#endif /* GRPC_LINUX_ERRQUEUE */
+}
+
+bool grpc_event_engine_run_in_background(void) {
+ return g_event_engine->run_in_background;
}
grpc_fd* grpc_fd_create(int fd, const char* name, bool track_err) {
GRPC_POLLING_API_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
GRPC_FD_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
- return g_event_engine->fd_create(fd, name,
- track_err && g_event_engine->can_track_err);
+ return g_event_engine->fd_create(
+ fd, name, track_err && grpc_event_engine_can_track_errors());
}
int grpc_fd_wrapped_fd(grpc_fd* fd) {
@@ -395,4 +399,8 @@ void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
g_event_engine->pollset_set_del_fd(pollset_set, fd);
}
+void grpc_shutdown_background_closure(void) {
+ g_event_engine->shutdown_background_closure();
+}
+
#endif // GRPC_POSIX_SOCKET_EV
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index b8fb8f534b..812c7a0f0f 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -42,6 +42,7 @@ typedef struct grpc_fd grpc_fd;
typedef struct grpc_event_engine_vtable {
size_t pollset_size;
bool can_track_err;
+ bool run_in_background;
grpc_fd* (*fd_create)(int fd, const char* name, bool track_err);
int (*fd_wrapped_fd)(grpc_fd* fd);
@@ -79,6 +80,7 @@ typedef struct grpc_event_engine_vtable {
void (*pollset_set_add_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
void (*pollset_set_del_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
+ void (*shutdown_background_closure)(void);
void (*shutdown_engine)(void);
} grpc_event_engine_vtable;
@@ -101,6 +103,11 @@ const char* grpc_get_poll_strategy_name();
*/
bool grpc_event_engine_can_track_errors();
+/* Returns true if polling engine runs in the background, false otherwise.
+ * Currently only 'epollbg' runs in the background.
+ */
+bool grpc_event_engine_run_in_background();
+
/* Create a wrapped file descriptor.
Requires fd is a non-blocking file descriptor.
\a track_err if true means that error events would be tracked separately
@@ -174,6 +181,9 @@ void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
+/* Shut down all the closures registered in the background poller. */
+void grpc_shutdown_background_closure();
+
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd*, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
diff --git a/src/core/lib/iomgr/fork_posix.cc b/src/core/lib/iomgr/fork_posix.cc
index e957bad73d..05ecd2a49b 100644
--- a/src/core/lib/iomgr/fork_posix.cc
+++ b/src/core/lib/iomgr/fork_posix.cc
@@ -60,7 +60,7 @@ void grpc_prefork() {
}
if (strcmp(grpc_get_poll_strategy_name(), "epoll1") != 0 &&
strcmp(grpc_get_poll_strategy_name(), "poll") != 0) {
- gpr_log(GPR_ERROR,
+ gpr_log(GPR_INFO,
"Fork support is only compatible with the epoll1 and poll polling "
"strategies");
}
diff --git a/src/core/lib/iomgr/internal_errqueue.cc b/src/core/lib/iomgr/internal_errqueue.cc
index 99c22e9055..982d709f09 100644
--- a/src/core/lib/iomgr/internal_errqueue.cc
+++ b/src/core/lib/iomgr/internal_errqueue.cc
@@ -20,17 +20,50 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/impl/codegen/log.h>
#include "src/core/lib/iomgr/internal_errqueue.h"
#ifdef GRPC_POSIX_SOCKET_TCP
-bool kernel_supports_errqueue() {
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/utsname.h>
+
+namespace grpc_core {
+static bool errqueue_supported = false;
+
+bool kernel_supports_errqueue() { return errqueue_supported; }
+
+void grpc_errqueue_init() {
+/* Both-compile time and run-time linux kernel versions should be atleast 4.0.0
+ */
#ifdef LINUX_VERSION_CODE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
- return true;
+ struct utsname buffer;
+ if (uname(&buffer) != 0) {
+ gpr_log(GPR_ERROR, "uname: %s", strerror(errno));
+ return;
+ }
+ char* release = buffer.release;
+ if (release == nullptr) {
+ return;
+ }
+
+ if (strtol(release, nullptr, 10) >= 4) {
+ errqueue_supported = true;
+ } else {
+ gpr_log(GPR_DEBUG, "ERRQUEUE support not enabled");
+ }
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(4, 0, 0) */
#endif /* LINUX_VERSION_CODE */
- return false;
}
+} /* namespace grpc_core */
+
+#else
+
+namespace grpc_core {
+void grpc_errqueue_init() {}
+} /* namespace grpc_core */
#endif /* GRPC_POSIX_SOCKET_TCP */
diff --git a/src/core/lib/iomgr/internal_errqueue.h b/src/core/lib/iomgr/internal_errqueue.h
index 9d122808f9..f8644c2536 100644
--- a/src/core/lib/iomgr/internal_errqueue.h
+++ b/src/core/lib/iomgr/internal_errqueue.h
@@ -76,8 +76,14 @@ constexpr uint32_t kTimestampingRecordingOptions =
* Currently allowing only linux kernels above 4.0.0
*/
bool kernel_supports_errqueue();
-} // namespace grpc_core
+
+} /* namespace grpc_core */
#endif /* GRPC_POSIX_SOCKET_TCP */
+namespace grpc_core {
+/* Initializes errqueue support */
+void grpc_errqueue_init();
+} /* namespace grpc_core */
+
#endif /* GRPC_CORE_LIB_IOMGR_INTERNAL_ERRQUEUE_H */
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index 46afda1774..eb29973514 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -33,8 +33,10 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/buffer_list.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/internal_errqueue.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/timer.h"
@@ -57,6 +59,7 @@ void grpc_iomgr_init() {
g_root_object.name = (char*)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
+ grpc_core::grpc_errqueue_init();
}
void grpc_iomgr_start() { grpc_timer_manager_init(); }
@@ -154,6 +157,10 @@ void grpc_iomgr_shutdown() {
gpr_cv_destroy(&g_rcv);
}
+void grpc_iomgr_shutdown_background_closure() {
+ grpc_iomgr_platform_shutdown_background_closure();
+}
+
void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name) {
obj->name = gpr_strdup(name);
gpr_mu_lock(&g_mu);
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 537ef8a6ff..8ea9289e06 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -35,6 +35,10 @@ void grpc_iomgr_start();
* exec_ctx. */
void grpc_iomgr_shutdown();
+/** Signals the intention to shutdown all the closures registered in the
+ * background poller. */
+void grpc_iomgr_shutdown_background_closure();
+
/* Exposed only for testing */
size_t grpc_iomgr_count_objects_for_testing();
diff --git a/src/core/lib/iomgr/iomgr_custom.cc b/src/core/lib/iomgr/iomgr_custom.cc
index d34c8e7cd1..4b112c9097 100644
--- a/src/core/lib/iomgr/iomgr_custom.cc
+++ b/src/core/lib/iomgr/iomgr_custom.cc
@@ -40,9 +40,11 @@ static void iomgr_platform_init(void) {
}
static void iomgr_platform_flush(void) {}
static void iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
+static void iomgr_platform_shutdown_background_closure(void) {}
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_custom_iomgr_init(grpc_socket_vtable* socket,
grpc_custom_resolver_vtable* resolver,
diff --git a/src/core/lib/iomgr/iomgr_internal.cc b/src/core/lib/iomgr/iomgr_internal.cc
index 32dbabb79d..b6c9211865 100644
--- a/src/core/lib/iomgr/iomgr_internal.cc
+++ b/src/core/lib/iomgr/iomgr_internal.cc
@@ -41,3 +41,7 @@ void grpc_iomgr_platform_init() { iomgr_platform_vtable->init(); }
void grpc_iomgr_platform_flush() { iomgr_platform_vtable->flush(); }
void grpc_iomgr_platform_shutdown() { iomgr_platform_vtable->shutdown(); }
+
+void grpc_iomgr_platform_shutdown_background_closure() {
+ iomgr_platform_vtable->shutdown_background_closure();
+}
diff --git a/src/core/lib/iomgr/iomgr_internal.h b/src/core/lib/iomgr/iomgr_internal.h
index b011d9c7b1..bca7409907 100644
--- a/src/core/lib/iomgr/iomgr_internal.h
+++ b/src/core/lib/iomgr/iomgr_internal.h
@@ -35,6 +35,7 @@ typedef struct grpc_iomgr_platform_vtable {
void (*init)(void);
void (*flush)(void);
void (*shutdown)(void);
+ void (*shutdown_background_closure)(void);
} grpc_iomgr_platform_vtable;
void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name);
@@ -52,6 +53,9 @@ void grpc_iomgr_platform_flush(void);
/** tear down all platform specific global iomgr structures */
void grpc_iomgr_platform_shutdown(void);
+/** shut down all the closures registered in the background poller */
+void grpc_iomgr_platform_shutdown_background_closure(void);
+
bool grpc_iomgr_abort_on_leaks(void);
#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/iomgr_posix.cc b/src/core/lib/iomgr/iomgr_posix.cc
index ca7334c9a4..9386adf060 100644
--- a/src/core/lib/iomgr/iomgr_posix.cc
+++ b/src/core/lib/iomgr/iomgr_posix.cc
@@ -51,8 +51,13 @@ static void iomgr_platform_shutdown(void) {
grpc_wakeup_fd_global_destroy();
}
+static void iomgr_platform_shutdown_background_closure(void) {
+ grpc_shutdown_background_closure();
+}
+
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_set_default_iomgr_platform() {
grpc_set_tcp_client_impl(&grpc_posix_tcp_client_vtable);
diff --git a/src/core/lib/iomgr/iomgr_posix_cfstream.cc b/src/core/lib/iomgr/iomgr_posix_cfstream.cc
index 235a9e0712..552ef4309c 100644
--- a/src/core/lib/iomgr/iomgr_posix_cfstream.cc
+++ b/src/core/lib/iomgr/iomgr_posix_cfstream.cc
@@ -54,8 +54,13 @@ static void iomgr_platform_shutdown(void) {
grpc_wakeup_fd_global_destroy();
}
+static void iomgr_platform_shutdown_background_closure(void) {
+ grpc_shutdown_background_closure();
+}
+
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_set_default_iomgr_platform() {
char* enable_cfstream = getenv(grpc_cfstream_env_var);
diff --git a/src/core/lib/iomgr/iomgr_windows.cc b/src/core/lib/iomgr/iomgr_windows.cc
index cdef89cbf0..24ef0dba7b 100644
--- a/src/core/lib/iomgr/iomgr_windows.cc
+++ b/src/core/lib/iomgr/iomgr_windows.cc
@@ -71,8 +71,11 @@ static void iomgr_platform_shutdown(void) {
winsock_shutdown();
}
+static void iomgr_platform_shutdown_background_closure(void) {}
+
static grpc_iomgr_platform_vtable vtable = {
- iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown};
+ iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown,
+ iomgr_platform_shutdown_background_closure};
void grpc_set_default_iomgr_platform() {
grpc_set_tcp_client_impl(&grpc_windows_tcp_client_vtable);
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index a95e08524c..6f4c5bdd66 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -34,13 +34,13 @@ typedef enum grpc_pollset_tag {
* functions that accept a pollset XOR a pollset_set to do so through an
* abstract interface. No ownership is taken. */
-typedef struct grpc_polling_entity {
+struct grpc_polling_entity {
union {
- grpc_pollset* pollset;
+ grpc_pollset* pollset = nullptr;
grpc_pollset_set* pollset_set;
} pollent;
- grpc_pollset_tag tag;
-} grpc_polling_entity;
+ grpc_pollset_tag tag = GRPC_POLLS_NONE;
+};
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set* pollset_set);
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index bf56a7298d..c8046b21dc 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -62,8 +62,7 @@
#define GRPC_HAVE_UNIX_SOCKET 1
#ifdef LINUX_VERSION_CODE
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
-/* TODO(yashykt): Re-enable once Fathom changes are commited.
-#define GRPC_LINUX_ERRQUEUE 1 */
+#define GRPC_LINUX_ERRQUEUE 1
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
#endif /* LINUX_VERSION_CODE */
#define GRPC_LINUX_MULTIPOLL_WITH_EPOLL 1
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 6afe94a7a9..7016ffc31a 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -65,7 +65,7 @@ void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable);
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
-/* TODO(ctiller): add a timeout here */
+/* TODO(apolcyn): add a timeout here */
void grpc_resolve_address(const char* addr, const char* default_port,
grpc_pollset_set* interested_parties,
grpc_closure* on_done,
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index b6fc7579f7..7e4b3c9b2f 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -90,7 +90,8 @@ struct grpc_resource_user {
grpc_closure_list on_allocated;
/* True if we are currently trying to allocate from the quota, false if not */
bool allocating;
- /* How many bytes of allocations are outstanding */
+ /* The amount of memory (in bytes) that has been requested from this user
+ * asynchronously but hasn't been granted yet. */
int64_t outstanding_allocations;
/* True if we are currently trying to add ourselves to the non-free quota
list, false otherwise */
@@ -135,6 +136,9 @@ struct grpc_resource_quota {
int64_t size;
/* Amount of free memory in the resource quota */
int64_t free_pool;
+ /* Used size of memory in the resource quota. Updated as soon as the resource
+ * users start to allocate or free the memory. */
+ gpr_atm used;
gpr_atm last_size;
@@ -371,6 +375,7 @@ static bool rq_reclaim_from_per_user_free_pool(
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
gpr_mu_lock(&resource_user->mu);
+ resource_user->added_to_free_pool = false;
if (resource_user->free_pool > 0) {
int64_t amt = resource_user->free_pool;
resource_user->free_pool = 0;
@@ -386,6 +391,13 @@ static bool rq_reclaim_from_per_user_free_pool(
gpr_mu_unlock(&resource_user->mu);
return true;
} else {
+ if (grpc_resource_quota_trace.enabled()) {
+ gpr_log(GPR_INFO,
+ "RQ %s %s: failed to reclaim_from_per_user_free_pool; "
+ "free_pool = %" PRId64 "; rq_free_pool = %" PRId64,
+ resource_quota->name, resource_user->name,
+ resource_user->free_pool, resource_quota->free_pool);
+ }
gpr_mu_unlock(&resource_user->mu);
}
}
@@ -622,6 +634,7 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
resource_quota->size = INT64_MAX;
+ resource_quota->used = 0;
gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
gpr_mu_init(&resource_quota->thread_count_mu);
resource_quota->max_threads = INT_MAX;
@@ -712,7 +725,7 @@ size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
*/
grpc_resource_quota* grpc_resource_quota_from_channel_args(
- const grpc_channel_args* channel_args) {
+ const grpc_channel_args* channel_args, bool create) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
@@ -724,7 +737,7 @@ grpc_resource_quota* grpc_resource_quota_from_channel_args(
}
}
}
- return grpc_resource_quota_create(nullptr);
+ return create ? grpc_resource_quota_create(nullptr) : nullptr;
}
static void* rq_copy(void* rq) {
@@ -863,33 +876,68 @@ void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
}
-void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
- grpc_closure* optional_on_done) {
- gpr_mu_lock(&resource_user->mu);
+static void resource_user_alloc_locked(grpc_resource_user* resource_user,
+ size_t size,
+ grpc_closure* optional_on_done) {
ru_ref_by(resource_user, static_cast<gpr_atm>(size));
resource_user->free_pool -= static_cast<int64_t>(size);
- resource_user->outstanding_allocations += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool);
}
if (resource_user->free_pool < 0) {
- grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
- GRPC_ERROR_NONE);
+ if (optional_on_done != nullptr) {
+ resource_user->outstanding_allocations += static_cast<int64_t>(size);
+ grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
+ GRPC_ERROR_NONE);
+ }
if (!resource_user->allocating) {
resource_user->allocating = true;
GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE);
}
} else {
- resource_user->outstanding_allocations -= static_cast<int64_t>(size);
GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE);
}
+}
+
+bool grpc_resource_user_safe_alloc(grpc_resource_user* resource_user,
+ size_t size) {
+ if (gpr_atm_no_barrier_load(&resource_user->shutdown)) return false;
+ gpr_mu_lock(&resource_user->mu);
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ bool cas_success;
+ do {
+ gpr_atm used = gpr_atm_no_barrier_load(&resource_quota->used);
+ gpr_atm new_used = used + size;
+ if (static_cast<size_t>(new_used) >
+ grpc_resource_quota_peek_size(resource_quota)) {
+ gpr_mu_unlock(&resource_user->mu);
+ return false;
+ }
+ cas_success = gpr_atm_full_cas(&resource_quota->used, used, new_used);
+ } while (!cas_success);
+ resource_user_alloc_locked(resource_user, size, nullptr);
+ gpr_mu_unlock(&resource_user->mu);
+ return true;
+}
+
+void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
+ grpc_closure* optional_on_done) {
+ // TODO(juanlishen): Maybe return immediately if shutting down. Deferring this
+ // because some tests become flaky after the change.
+ gpr_mu_lock(&resource_user->mu);
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ gpr_atm_no_barrier_fetch_add(&resource_quota->used, size);
+ resource_user_alloc_locked(resource_user, size, optional_on_done);
gpr_mu_unlock(&resource_user->mu);
}
void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ gpr_atm prior = gpr_atm_no_barrier_fetch_add(&resource_quota->used, -size);
+ GPR_ASSERT(prior >= static_cast<long>(size));
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) {
@@ -940,6 +988,12 @@ void grpc_resource_user_slice_allocator_init(
void grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest) {
+ if (gpr_atm_no_barrier_load(&slice_allocator->resource_user->shutdown)) {
+ GRPC_CLOSURE_SCHED(
+ &slice_allocator->on_allocated,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resource user shutdown"));
+ return;
+ }
slice_allocator->length = length;
slice_allocator->count = count;
slice_allocator->dest = dest;
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 7b0ed7417a..1c79b52e3f 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -65,11 +65,16 @@
extern grpc_core::TraceFlag grpc_resource_quota_trace;
+// TODO(juanlishen): This is a hack. We need to do real accounting instead of
+// hard coding.
+constexpr size_t GRPC_RESOURCE_QUOTA_CALL_SIZE = 15 * 1024;
+constexpr size_t GRPC_RESOURCE_QUOTA_CHANNEL_SIZE = 50 * 1024;
+
grpc_resource_quota* grpc_resource_quota_ref_internal(
grpc_resource_quota* resource_quota);
void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota);
grpc_resource_quota* grpc_resource_quota_from_channel_args(
- const grpc_channel_args* channel_args);
+ const grpc_channel_args* channel_args, bool create = true);
/* Return a number indicating current memory pressure:
0.0 ==> no memory usage
@@ -109,11 +114,21 @@ bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
int thread_count);
-/* Allocate from the resource user (and its quota).
- If optional_on_done is NULL, then allocate immediately. This may push the
- quota over-limit, at which point reclamation will kick in.
- If optional_on_done is non-NULL, it will be scheduled when the allocation has
- been granted by the quota. */
+/* Allocates from the resource user 'size' worth of memory if this won't exceed
+ * the resource quota's total size. Returns whether the allocation is done
+ * successfully. If allocated successfully, the memory should be freed by the
+ * caller eventually. */
+bool grpc_resource_user_safe_alloc(grpc_resource_user* resource_user,
+ size_t size);
+/* Allocates from the resource user 'size' worth of memory.
+ * If optional_on_done is NULL, then allocate immediately. This may push the
+ * quota over-limit, at which point reclamation will kick in. The caller is
+ * always responsible to free the memory eventually.
+ * If optional_on_done is non-NULL, it will be scheduled without error when the
+ * allocation has been granted by the quota, and the caller is responsible to
+ * free the memory eventually. Or it may be scheduled with an error, in which
+ * case the caller fails to allocate the memory and shouldn't free the memory.
+ */
void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
grpc_closure* optional_on_done);
/* Release memory back to the quota */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.cc b/src/core/lib/iomgr/socket_utils_common_posix.cc
index bdfc1d70c3..4c337a0521 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_common_posix.cc
@@ -296,14 +296,17 @@ grpc_error* grpc_set_socket_tcp_user_timeout(
socklen_t len = sizeof(newval);
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout,
sizeof(timeout))) {
- return GRPC_OS_ERROR(errno, "setsockopt(TCP_USER_TIMEOUT)");
+ gpr_log(GPR_ERROR, "setsockopt(TCP_USER_TIMEOUT) %s", strerror(errno));
+ return GRPC_ERROR_NONE;
}
if (0 != getsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &newval, &len)) {
- return GRPC_OS_ERROR(errno, "getsockopt(TCP_USER_TIMEOUT)");
+ gpr_log(GPR_ERROR, "getsockopt(TCP_USER_TIMEOUT) %s", strerror(errno));
+ return GRPC_ERROR_NONE;
}
if (newval != timeout) {
- return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to set TCP_USER_TIMEOUT");
+ /* Do not fail on failing to set TCP_USER_TIMEOUT for now. */
+ gpr_log(GPR_ERROR, "Failed to set TCP_USER_TIMEOUT");
+ return GRPC_ERROR_NONE;
}
}
#else
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 8553ed0db4..0bff74e88b 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -76,6 +76,8 @@ static grpc_error* prepare_socket(const grpc_resolved_address* addr, int fd,
if (!grpc_is_unix_socket(addr)) {
err = grpc_set_socket_low_latency(fd, 1);
if (err != GRPC_ERROR_NONE) goto error;
+ err = grpc_set_socket_reuse_addr(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
err = grpc_set_socket_tcp_user_timeout(fd, channel_args,
true /* is_client */);
if (err != GRPC_ERROR_NONE) goto error;
diff --git a/src/core/lib/iomgr/tcp_custom.cc b/src/core/lib/iomgr/tcp_custom.cc
index e02a1898f2..f7a5f36cdc 100644
--- a/src/core/lib/iomgr/tcp_custom.cc
+++ b/src/core/lib/iomgr/tcp_custom.cc
@@ -326,6 +326,8 @@ static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
static int endpoint_get_fd(grpc_endpoint* ep) { return -1; }
+static bool endpoint_can_track_err(grpc_endpoint* ep) { return false; }
+
static grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_add_to_pollset,
@@ -335,7 +337,8 @@ static grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
- endpoint_get_fd};
+ endpoint_get_fd,
+ endpoint_can_track_err};
grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_resource_quota* resource_quota,
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index aa2704ce26..cfcb190d60 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -260,10 +260,17 @@ static void notify_on_write(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
}
- cover_self(tcp);
- GRPC_CLOSURE_INIT(&tcp->write_done_closure,
- tcp_drop_uncovered_then_handle_write, tcp,
- grpc_schedule_on_exec_ctx);
+ if (grpc_event_engine_run_in_background()) {
+ // If there is a polling engine always running in the background, there is
+ // no need to run the backup poller.
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ } else {
+ cover_self(tcp);
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure,
+ tcp_drop_uncovered_then_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ }
grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
}
@@ -384,6 +391,12 @@ static void tcp_destroy(grpc_endpoint* ep) {
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
if (grpc_event_engine_can_track_errors()) {
+ gpr_mu_lock(&tcp->tb_mu);
+ grpc_core::TracedBuffer::Shutdown(
+ &tcp->tb_head, tcp->outgoing_buffer_arg,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+ gpr_mu_unlock(&tcp->tb_mu);
+ tcp->outgoing_buffer_arg = nullptr;
gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
grpc_fd_set_error(tcp->em_fd);
}
@@ -621,7 +634,7 @@ static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
if (sending_length == static_cast<size_t>(length)) {
gpr_mu_lock(&tcp->tb_mu);
grpc_core::TracedBuffer::AddNewEntry(
- &tcp->tb_head, static_cast<int>(tcp->bytes_counter + length),
+ &tcp->tb_head, static_cast<uint32_t>(tcp->bytes_counter + length),
tcp->outgoing_buffer_arg);
gpr_mu_unlock(&tcp->tb_mu);
tcp->outgoing_buffer_arg = nullptr;
@@ -673,11 +686,9 @@ struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
}
/** For linux platforms, reads the socket's error queue and processes error
- * messages from the queue. Returns true if all the errors processed were
- * timestamps. Returns false if any of the errors were not timestamps. For
- * non-linux platforms, error processing is not used/enabled currently.
+ * messages from the queue.
*/
-static bool process_errors(grpc_tcp* tcp) {
+static void process_errors(grpc_tcp* tcp) {
while (true) {
struct iovec iov;
iov.iov_base = nullptr;
@@ -706,10 +717,10 @@ static bool process_errors(grpc_tcp* tcp) {
} while (r < 0 && saved_errno == EINTR);
if (r == -1 && saved_errno == EAGAIN) {
- return true; /* No more errors to process */
+ return; /* No more errors to process */
}
if (r == -1) {
- return false;
+ return;
}
if (grpc_tcp_trace.enabled()) {
if ((msg.msg_flags & MSG_CTRUNC) == 1) {
@@ -719,8 +730,9 @@ static bool process_errors(grpc_tcp* tcp) {
if (msg.msg_controllen == 0) {
/* There was no control message found. It was probably spurious. */
- return true;
+ return;
}
+ bool seen = false;
for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level != SOL_SOCKET ||
@@ -732,9 +744,13 @@ static bool process_errors(grpc_tcp* tcp) {
"unknown control message cmsg_level:%d cmsg_type:%d",
cmsg->cmsg_level, cmsg->cmsg_type);
}
- return false;
+ return;
}
- process_timestamp(tcp, &msg, cmsg);
+ cmsg = process_timestamp(tcp, &msg, cmsg);
+ seen = true;
+ }
+ if (!seen) {
+ return;
}
}
}
@@ -749,20 +765,17 @@ static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
/* We aren't going to register to hear on error anymore, so it is safe to
* unref. */
- grpc_core::TracedBuffer::Shutdown(&tcp->tb_head, GRPC_ERROR_REF(error));
TCP_UNREF(tcp, "error-tracking");
return;
}
/* We are still interested in collecting timestamps, so let's try reading
* them. */
- if (!process_errors(tcp)) {
- /* This was not a timestamps error. This was an actual error. Set the
- * read and write closures to be ready.
- */
- grpc_fd_set_readable(tcp->em_fd);
- grpc_fd_set_writable(tcp->em_fd);
- }
+ process_errors(tcp);
+ /* This might not a timestamps error. Set the read and write closures to be
+ * ready. */
+ grpc_fd_set_readable(tcp->em_fd);
+ grpc_fd_set_writable(tcp->em_fd);
GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
@@ -784,6 +797,19 @@ static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
}
#endif /* GRPC_LINUX_ERRQUEUE */
+/* If outgoing_buffer_arg is filled, shuts down the list early, so that any
+ * release operations needed can be performed on the arg */
+void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
+ if (tcp->outgoing_buffer_arg) {
+ gpr_mu_lock(&tcp->tb_mu);
+ grpc_core::TracedBuffer::Shutdown(
+ &tcp->tb_head, tcp->outgoing_buffer_arg,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+ gpr_mu_unlock(&tcp->tb_mu);
+ tcp->outgoing_buffer_arg = nullptr;
+ }
+}
+
/* returns true if done, false if pending; if returning true, *error is set */
#if defined(IOV_MAX) && IOV_MAX < 1000
#define MAX_WRITE_IOVEC IOV_MAX
@@ -831,8 +857,10 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
msg.msg_flags = 0;
if (tcp->outgoing_buffer_arg != nullptr) {
if (!tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
- error))
+ error)) {
+ tcp_shutdown_buffer_list(tcp);
return true; /* something went wrong with timestamps */
+ }
} else {
msg.msg_control = nullptr;
msg.msg_controllen = 0;
@@ -856,10 +884,12 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
} else if (errno == EPIPE) {
*error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ tcp_shutdown_buffer_list(tcp);
return true;
} else {
*error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
+ tcp_shutdown_buffer_list(tcp);
return true;
}
}
@@ -936,17 +966,18 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
GPR_ASSERT(tcp->write_cb == nullptr);
+ tcp->outgoing_buffer_arg = arg;
if (buf->length == 0) {
GRPC_CLOSURE_SCHED(
cb, grpc_fd_is_shutdown(tcp->em_fd)
? tcp_annotate_error(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp)
: GRPC_ERROR_NONE);
+ tcp_shutdown_buffer_list(tcp);
return;
}
tcp->outgoing_buffer = buf;
tcp->outgoing_byte_idx = 0;
- tcp->outgoing_buffer_arg = arg;
if (arg) {
GPR_ASSERT(grpc_event_engine_can_track_errors());
}
@@ -999,6 +1030,22 @@ static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
return tcp->resource_user;
}
+static bool tcp_can_track_err(grpc_endpoint* ep) {
+ grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
+ if (!grpc_event_engine_can_track_errors()) {
+ return false;
+ }
+ struct sockaddr addr;
+ socklen_t len = sizeof(addr);
+ if (getsockname(tcp->fd, &addr, &len) < 0) {
+ return false;
+ }
+ if (addr.sa_family == AF_INET || addr.sa_family == AF_INET6) {
+ return true;
+ }
+ return false;
+}
+
static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write,
tcp_add_to_pollset,
@@ -1008,7 +1055,8 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_destroy,
tcp_get_resource_user,
tcp_get_peer,
- tcp_get_fd};
+ tcp_get_fd,
+ tcp_can_track_err};
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
@@ -1069,6 +1117,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp->is_first_read = true;
tcp->bytes_counter = -1;
tcp->socket_ts_enabled = false;
+ tcp->outgoing_buffer_arg = nullptr;
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
@@ -1113,6 +1162,12 @@ void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
if (grpc_event_engine_can_track_errors()) {
/* Stop errors notification. */
+ gpr_mu_lock(&tcp->tb_mu);
+ grpc_core::TracedBuffer::Shutdown(
+ &tcp->tb_head, tcp->outgoing_buffer_arg,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+ gpr_mu_unlock(&tcp->tb_mu);
+ tcp->outgoing_buffer_arg = nullptr;
gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
grpc_fd_set_error(tcp->em_fd);
}
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index 64c4a56ae9..86ee1010cf 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -42,6 +42,7 @@
#include "src/core/lib/iomgr/tcp_windows.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#if defined(__MSYS__) && defined(GPR_ARCH_64)
/* Nasty workaround for nasty bug when using the 64 bits msys compiler
@@ -112,7 +113,10 @@ typedef struct grpc_tcp {
grpc_closure* read_cb;
grpc_closure* write_cb;
- grpc_slice read_slice;
+
+ /* garbage after the last read */
+ grpc_slice_buffer last_read_buffer;
+
grpc_slice_buffer* write_slices;
grpc_slice_buffer* read_slices;
@@ -131,6 +135,7 @@ static void tcp_free(grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string);
+ grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
grpc_resource_user_unref(tcp->resource_user);
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
gpr_free(tcp);
@@ -179,9 +184,12 @@ static void on_read(void* tcpp, grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)tcpp;
grpc_closure* cb = tcp->read_cb;
grpc_winsocket* socket = tcp->socket;
- grpc_slice sub;
grpc_winsocket_callback_info* info = &socket->read_info;
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
+ }
+
GRPC_ERROR_REF(error);
if (error == GRPC_ERROR_NONE) {
@@ -189,13 +197,35 @@ static void on_read(void* tcpp, grpc_error* error) {
char* utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
gpr_free(utf8_message);
- grpc_slice_unref_internal(tcp->read_slice);
+ grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else {
if (info->bytes_transfered != 0 && !tcp->shutting_down) {
- sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
- grpc_slice_buffer_add(tcp->read_slices, sub);
+ GPR_ASSERT((size_t)info->bytes_transfered <= tcp->read_slices->length);
+ if (static_cast<size_t>(info->bytes_transfered) !=
+ tcp->read_slices->length) {
+ grpc_slice_buffer_trim_end(
+ tcp->read_slices,
+ tcp->read_slices->length -
+ static_cast<size_t>(info->bytes_transfered),
+ &tcp->last_read_buffer);
+ }
+ GPR_ASSERT((size_t)info->bytes_transfered == tcp->read_slices->length);
+
+ if (grpc_tcp_trace.enabled()) {
+ size_t i;
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
+ dump);
+ gpr_free(dump);
+ }
+ }
} else {
- grpc_slice_unref_internal(tcp->read_slice);
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p unref read_slice", tcp);
+ }
+ grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
error = tcp->shutting_down
? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP stream shutting down", &tcp->shutdown_error, 1)
@@ -209,6 +239,8 @@ static void on_read(void* tcpp, grpc_error* error) {
GRPC_CLOSURE_SCHED(cb, error);
}
+#define DEFAULT_TARGET_READ_SIZE 8192
+#define MAX_WSABUF_COUNT 16
static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep;
@@ -217,7 +249,12 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
int status;
DWORD bytes_read = 0;
DWORD flags = 0;
- WSABUF buffer;
+ WSABUF buffers[MAX_WSABUF_COUNT];
+ size_t i;
+
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p win_read", tcp);
+ }
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(
@@ -229,18 +266,27 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
tcp->read_cb = cb;
tcp->read_slices = read_slices;
grpc_slice_buffer_reset_and_unref_internal(read_slices);
+ grpc_slice_buffer_swap(read_slices, &tcp->last_read_buffer);
- tcp->read_slice = GRPC_SLICE_MALLOC(8192);
+ if (tcp->read_slices->length < DEFAULT_TARGET_READ_SIZE / 2 &&
+ tcp->read_slices->count < MAX_WSABUF_COUNT) {
+ // TODO(jtattermusch): slice should be allocated using resource quota
+ grpc_slice_buffer_add(tcp->read_slices,
+ GRPC_SLICE_MALLOC(DEFAULT_TARGET_READ_SIZE));
+ }
- buffer.len = (ULONG)GRPC_SLICE_LENGTH(
- tcp->read_slice); // we know slice size fits in 32bit.
- buffer.buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slice);
+ GPR_ASSERT(tcp->read_slices->count <= MAX_WSABUF_COUNT);
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ buffers[i].len = (ULONG)GRPC_SLICE_LENGTH(
+ tcp->read_slices->slices[i]); // we know slice size fits in 32bit.
+ buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[i]);
+ }
TCP_REF(tcp, "read");
/* First let's try a synchronous, non-blocking read. */
- status =
- WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
+ status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
+ &bytes_read, &flags, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* Did we get data immediately ? Yay. */
@@ -252,8 +298,8 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
/* Otherwise, let's retry, by queuing a read. */
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
- status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
- &info->overlapped, NULL);
+ status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
+ &bytes_read, &flags, &info->overlapped, NULL);
if (status != 0) {
int wsa_error = WSAGetLastError();
@@ -275,6 +321,10 @@ static void on_write(void* tcpp, grpc_error* error) {
grpc_winsocket_callback_info* info = &handle->write_info;
grpc_closure* cb;
+ if (grpc_tcp_trace.enabled()) {
+ gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
+ }
+
GRPC_ERROR_REF(error);
gpr_mu_lock(&tcp->mu);
@@ -303,11 +353,21 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
unsigned i;
DWORD bytes_sent;
int status;
- WSABUF local_buffers[16];
+ WSABUF local_buffers[MAX_WSABUF_COUNT];
WSABUF* allocated = NULL;
WSABUF* buffers = local_buffers;
size_t len;
+ if (grpc_tcp_trace.enabled()) {
+ size_t i;
+ for (i = 0; i < slices->count; i++) {
+ char* data =
+ grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
+ gpr_free(data);
+ }
+ }
+
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(
cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@@ -412,6 +472,7 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
static void win_destroy(grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
TCP_UNREF(tcp, "destroy");
}
@@ -427,6 +488,8 @@ static grpc_resource_user* win_get_resource_user(grpc_endpoint* ep) {
static int win_get_fd(grpc_endpoint* ep) { return -1; }
+static bool win_can_track_err(grpc_endpoint* ep) { return false; }
+
static grpc_endpoint_vtable vtable = {win_read,
win_write,
win_add_to_pollset,
@@ -436,7 +499,8 @@ static grpc_endpoint_vtable vtable = {win_read,
win_destroy,
win_get_resource_user,
win_get_peer,
- win_get_fd};
+ win_get_fd,
+ win_can_track_err};
grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
grpc_channel_args* channel_args,
@@ -460,6 +524,7 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string);
+ grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
diff --git a/src/core/lib/security/context/security_context.cc b/src/core/lib/security/context/security_context.cc
index 94c9c69fcd..16f40b4f55 100644
--- a/src/core/lib/security/context/security_context.cc
+++ b/src/core/lib/security/context/security_context.cc
@@ -81,38 +81,45 @@ void grpc_auth_context_release(grpc_auth_context* context) {
}
/* --- grpc_client_security_context --- */
+grpc_client_security_context::~grpc_client_security_context() {
+ grpc_call_credentials_unref(creds);
+ GRPC_AUTH_CONTEXT_UNREF(auth_context, "client_security_context");
+ if (extension.instance != nullptr && extension.destroy != nullptr) {
+ extension.destroy(extension.instance);
+ }
+}
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena) {
- return static_cast<grpc_client_security_context*>(
- gpr_arena_alloc(arena, sizeof(grpc_client_security_context)));
+ return new (gpr_arena_alloc(arena, sizeof(grpc_client_security_context)))
+ grpc_client_security_context();
}
void grpc_client_security_context_destroy(void* ctx) {
grpc_core::ExecCtx exec_ctx;
grpc_client_security_context* c =
static_cast<grpc_client_security_context*>(ctx);
- grpc_call_credentials_unref(c->creds);
- GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
- if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
- c->extension.destroy(c->extension.instance);
- }
+ c->~grpc_client_security_context();
}
/* --- grpc_server_security_context --- */
+grpc_server_security_context::~grpc_server_security_context() {
+ GRPC_AUTH_CONTEXT_UNREF(auth_context, "server_security_context");
+ if (extension.instance != nullptr && extension.destroy != nullptr) {
+ extension.destroy(extension.instance);
+ }
+}
+
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena) {
- return static_cast<grpc_server_security_context*>(
- gpr_arena_alloc(arena, sizeof(grpc_server_security_context)));
+ return new (gpr_arena_alloc(arena, sizeof(grpc_server_security_context)))
+ grpc_server_security_context();
}
void grpc_server_security_context_destroy(void* ctx) {
grpc_server_security_context* c =
static_cast<grpc_server_security_context*>(ctx);
- GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context");
- if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
- c->extension.destroy(c->extension.instance);
- }
+ c->~grpc_server_security_context();
}
/* --- grpc_auth_context --- */
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index a8e1c3fd64..e45415f63b 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -34,18 +34,20 @@ struct gpr_arena;
/* Property names are always NULL terminated. */
-typedef struct {
- grpc_auth_property* array;
- size_t count;
- size_t capacity;
-} grpc_auth_property_array;
+struct grpc_auth_property_array {
+ grpc_auth_property* array = nullptr;
+ size_t count = 0;
+ size_t capacity = 0;
+};
struct grpc_auth_context {
- struct grpc_auth_context* chained;
+ grpc_auth_context() { gpr_ref_init(&refcount, 0); }
+
+ struct grpc_auth_context* chained = nullptr;
grpc_auth_property_array properties;
gpr_refcount refcount;
- const char* peer_identity_property_name;
- grpc_pollset* pollset;
+ const char* peer_identity_property_name = nullptr;
+ grpc_pollset* pollset = nullptr;
};
/* Creation. */
@@ -76,20 +78,23 @@ void grpc_auth_property_reset(grpc_auth_property* property);
Extension to the security context that may be set in a filter and accessed
later by a higher level method on a grpc_call object. */
-typedef struct {
- void* instance;
- void (*destroy)(void*);
-} grpc_security_context_extension;
+struct grpc_security_context_extension {
+ void* instance = nullptr;
+ void (*destroy)(void*) = nullptr;
+};
/* --- grpc_client_security_context ---
Internal client-side security context. */
-typedef struct {
- grpc_call_credentials* creds;
- grpc_auth_context* auth_context;
+struct grpc_client_security_context {
+ grpc_client_security_context() = default;
+ ~grpc_client_security_context();
+
+ grpc_call_credentials* creds = nullptr;
+ grpc_auth_context* auth_context = nullptr;
grpc_security_context_extension extension;
-} grpc_client_security_context;
+};
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena);
@@ -99,10 +104,13 @@ void grpc_client_security_context_destroy(void* ctx);
Internal server-side security context. */
-typedef struct {
- grpc_auth_context* auth_context;
+struct grpc_server_security_context {
+ grpc_server_security_context() = default;
+ ~grpc_server_security_context();
+
+ grpc_auth_context* auth_context = nullptr;
grpc_security_context_extension extension;
-} grpc_server_security_context;
+};
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena);
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index b486d25ab2..3878958b38 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -142,8 +142,8 @@ grpc_channel_credentials* grpc_channel_credentials_find_in_args(
/* --- grpc_credentials_mdelem_array. --- */
typedef struct {
- grpc_mdelem* md;
- size_t size;
+ grpc_mdelem* md = nullptr;
+ size_t size = 0;
} grpc_credentials_mdelem_array;
/// Takes a new ref to \a md.
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
index c456ffaf5d..0674540d01 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
@@ -49,9 +49,16 @@
/* -- Default credentials. -- */
-static grpc_channel_credentials* g_default_credentials = nullptr;
-static int g_compute_engine_detection_done = 0;
+/* A sticky bit that will be set only if the result of metadata server detection
+ * is positive. We do not set the bit if the result is negative. Because it
+ * means the detection is done via network test that is unreliable and the
+ * unreliable result should not be referred by successive calls. */
+static int g_metadata_server_available = 0;
+static int g_is_on_gce = 0;
static gpr_mu g_state_mu;
+/* Protect a metadata_server_detector instance that can be modified by more than
+ * one gRPC threads */
+static gpr_mu* g_polling_mu;
static gpr_once g_once = GPR_ONCE_INIT;
static grpc_core::internal::grpc_gce_tenancy_checker g_gce_tenancy_checker =
grpc_alts_is_running_on_gcp;
@@ -63,7 +70,7 @@ typedef struct {
int is_done;
int success;
grpc_http_response response;
-} compute_engine_detector;
+} metadata_server_detector;
static void google_default_credentials_destruct(
grpc_channel_credentials* creds) {
@@ -89,15 +96,21 @@ static grpc_security_status google_default_create_security_connector(
bool use_alts =
is_grpclb_load_balancer || is_backend_from_grpclb_load_balancer;
grpc_security_status status = GRPC_SECURITY_ERROR;
+ /* Return failure if ALTS is selected but not running on GCE. */
+ if (use_alts && !g_is_on_gce) {
+ gpr_log(GPR_ERROR, "ALTS is selected, but not running on GCE.");
+ goto end;
+ }
status = use_alts ? c->alts_creds->vtable->create_security_connector(
c->alts_creds, call_creds, target, args, sc, new_args)
: c->ssl_creds->vtable->create_security_connector(
c->ssl_creds, call_creds, target, args, sc, new_args);
- /* grpclb-specific channel args are removed from the channel args set
- * to ensure backends and fallback adresses will have the same set of channel
- * args. By doing that, it guarantees the connections to backends will not be
- * torn down and re-connected when switching in and out of fallback mode.
- */
+/* grpclb-specific channel args are removed from the channel args set
+ * to ensure backends and fallback adresses will have the same set of channel
+ * args. By doing that, it guarantees the connections to backends will not be
+ * torn down and re-connected when switching in and out of fallback mode.
+ */
+end:
if (use_alts) {
static const char* args_to_remove[] = {
GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER,
@@ -113,6 +126,93 @@ static grpc_channel_credentials_vtable google_default_credentials_vtable = {
google_default_credentials_destruct,
google_default_create_security_connector, nullptr};
+static void on_metadata_server_detection_http_response(void* user_data,
+ grpc_error* error) {
+ metadata_server_detector* detector =
+ static_cast<metadata_server_detector*>(user_data);
+ if (error == GRPC_ERROR_NONE && detector->response.status == 200 &&
+ detector->response.hdr_count > 0) {
+ /* Internet providers can return a generic response to all requests, so
+ it is necessary to check that metadata header is present also. */
+ size_t i;
+ for (i = 0; i < detector->response.hdr_count; i++) {
+ grpc_http_header* header = &detector->response.hdrs[i];
+ if (strcmp(header->key, "Metadata-Flavor") == 0 &&
+ strcmp(header->value, "Google") == 0) {
+ detector->success = 1;
+ break;
+ }
+ }
+ }
+ gpr_mu_lock(g_polling_mu);
+ detector->is_done = 1;
+ GRPC_LOG_IF_ERROR(
+ "Pollset kick",
+ grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent),
+ nullptr));
+ gpr_mu_unlock(g_polling_mu);
+}
+
+static void destroy_pollset(void* p, grpc_error* e) {
+ grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
+}
+
+static int is_metadata_server_reachable() {
+ metadata_server_detector detector;
+ grpc_httpcli_request request;
+ grpc_httpcli_context context;
+ grpc_closure destroy_closure;
+ /* The http call is local. If it takes more than one sec, it is for sure not
+ on compute engine. */
+ grpc_millis max_detection_delay = GPR_MS_PER_SEC;
+ grpc_pollset* pollset =
+ static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+ grpc_pollset_init(pollset, &g_polling_mu);
+ detector.pollent = grpc_polling_entity_create_from_pollset(pollset);
+ detector.is_done = 0;
+ detector.success = 0;
+ memset(&detector.response, 0, sizeof(detector.response));
+ memset(&request, 0, sizeof(grpc_httpcli_request));
+ request.host = (char*)GRPC_COMPUTE_ENGINE_DETECTION_HOST;
+ request.http.path = (char*)"/";
+ grpc_httpcli_context_init(&context);
+ grpc_resource_quota* resource_quota =
+ grpc_resource_quota_create("google_default_credentials");
+ grpc_httpcli_get(
+ &context, &detector.pollent, resource_quota, &request,
+ grpc_core::ExecCtx::Get()->Now() + max_detection_delay,
+ GRPC_CLOSURE_CREATE(on_metadata_server_detection_http_response, &detector,
+ grpc_schedule_on_exec_ctx),
+ &detector.response);
+ grpc_resource_quota_unref_internal(resource_quota);
+ grpc_core::ExecCtx::Get()->Flush();
+ /* Block until we get the response. This is not ideal but this should only be
+ called once for the lifetime of the process by the default credentials. */
+ gpr_mu_lock(g_polling_mu);
+ while (!detector.is_done) {
+ grpc_pollset_worker* worker = nullptr;
+ if (!GRPC_LOG_IF_ERROR(
+ "pollset_work",
+ grpc_pollset_work(grpc_polling_entity_pollset(&detector.pollent),
+ &worker, GRPC_MILLIS_INF_FUTURE))) {
+ detector.is_done = 1;
+ detector.success = 0;
+ }
+ }
+ gpr_mu_unlock(g_polling_mu);
+ grpc_httpcli_context_destroy(&context);
+ GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset,
+ grpc_polling_entity_pollset(&detector.pollent),
+ grpc_schedule_on_exec_ctx);
+ grpc_pollset_shutdown(grpc_polling_entity_pollset(&detector.pollent),
+ &destroy_closure);
+ g_polling_mu = nullptr;
+ grpc_core::ExecCtx::Get()->Flush();
+ gpr_free(grpc_polling_entity_pollset(&detector.pollent));
+ grpc_http_response_destroy(&detector.response);
+ return detector.success;
+}
+
/* Takes ownership of creds_path if not NULL. */
static grpc_error* create_default_creds_from_path(
char* creds_path, grpc_call_credentials** creds) {
@@ -188,13 +288,6 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
gpr_once_init(&g_once, init_default_credentials);
- gpr_mu_lock(&g_state_mu);
-
- if (g_default_credentials != nullptr) {
- result = grpc_channel_credentials_ref(g_default_credentials);
- goto end;
- }
-
/* First, try the environment variable. */
err = create_default_creds_from_path(
gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds);
@@ -207,55 +300,55 @@ grpc_channel_credentials* grpc_google_default_credentials_create(void) {
if (err == GRPC_ERROR_NONE) goto end;
error = grpc_error_add_child(error, err);
- /* At last try to see if we're on compute engine (do the detection only once
- since it requires a network test). */
- if (!g_compute_engine_detection_done) {
- int need_compute_engine_creds = g_gce_tenancy_checker();
- g_compute_engine_detection_done = 1;
- if (need_compute_engine_creds) {
- call_creds = grpc_google_compute_engine_credentials_create(nullptr);
- if (call_creds == nullptr) {
- error = grpc_error_add_child(
- error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Failed to get credentials from network"));
- }
- }
+ gpr_mu_lock(&g_state_mu);
+
+ /* Try a platform-provided hint for GCE. */
+ if (!g_metadata_server_available) {
+ g_is_on_gce = g_gce_tenancy_checker();
+ g_metadata_server_available = g_is_on_gce;
}
+ /* TODO: Add a platform-provided hint for GAE. */
-end:
- if (result == nullptr) {
- if (call_creds != nullptr) {
- /* Create google default credentials. */
- auto creds = static_cast<grpc_google_default_channel_credentials*>(
- gpr_zalloc(sizeof(grpc_google_default_channel_credentials)));
- creds->base.vtable = &google_default_credentials_vtable;
- creds->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_GOOGLE_DEFAULT;
- gpr_ref_init(&creds->base.refcount, 1);
- creds->ssl_creds =
- grpc_ssl_credentials_create(nullptr, nullptr, nullptr, nullptr);
- GPR_ASSERT(creds->ssl_creds != nullptr);
- grpc_alts_credentials_options* options =
- grpc_alts_credentials_client_options_create();
- creds->alts_creds = grpc_alts_credentials_create(options);
- grpc_alts_credentials_options_destroy(options);
- /* Add a global reference so that it can be cached and re-served. */
- g_default_credentials = grpc_composite_channel_credentials_create(
- &creds->base, call_creds, nullptr);
- GPR_ASSERT(g_default_credentials != nullptr);
- grpc_channel_credentials_unref(&creds->base);
- grpc_call_credentials_unref(call_creds);
- result = grpc_channel_credentials_ref(g_default_credentials);
- } else {
- gpr_log(GPR_ERROR, "Could not create google default credentials.");
- }
+ /* Do a network test for metadata server. */
+ if (!g_metadata_server_available) {
+ g_metadata_server_available = is_metadata_server_reachable();
}
gpr_mu_unlock(&g_state_mu);
- if (result == nullptr) {
- GRPC_LOG_IF_ERROR("grpc_google_default_credentials_create", error);
- } else {
- GRPC_ERROR_UNREF(error);
+
+ if (g_metadata_server_available) {
+ call_creds = grpc_google_compute_engine_credentials_create(nullptr);
+ if (call_creds == nullptr) {
+ error = grpc_error_add_child(
+ error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to get credentials from network"));
+ }
}
+end:
+ if (call_creds != nullptr) {
+ /* Create google default credentials. */
+ auto creds = static_cast<grpc_google_default_channel_credentials*>(
+ gpr_zalloc(sizeof(grpc_google_default_channel_credentials)));
+ creds->base.vtable = &google_default_credentials_vtable;
+ creds->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_GOOGLE_DEFAULT;
+ gpr_ref_init(&creds->base.refcount, 1);
+ creds->ssl_creds =
+ grpc_ssl_credentials_create(nullptr, nullptr, nullptr, nullptr);
+ GPR_ASSERT(creds->ssl_creds != nullptr);
+ grpc_alts_credentials_options* options =
+ grpc_alts_credentials_client_options_create();
+ creds->alts_creds = grpc_alts_credentials_create(options);
+ grpc_alts_credentials_options_destroy(options);
+ result = grpc_composite_channel_credentials_create(&creds->base, call_creds,
+ nullptr);
+ GPR_ASSERT(result != nullptr);
+ grpc_channel_credentials_unref(&creds->base);
+ grpc_call_credentials_unref(call_creds);
+ } else {
+ gpr_log(GPR_ERROR, "Could not create google default credentials: %s",
+ grpc_error_string(error));
+ }
+ GRPC_ERROR_UNREF(error);
return result;
}
@@ -266,21 +359,17 @@ void set_gce_tenancy_checker_for_testing(grpc_gce_tenancy_checker checker) {
g_gce_tenancy_checker = checker;
}
-} // namespace internal
-} // namespace grpc_core
-
void grpc_flush_cached_google_default_credentials(void) {
grpc_core::ExecCtx exec_ctx;
gpr_once_init(&g_once, init_default_credentials);
gpr_mu_lock(&g_state_mu);
- if (g_default_credentials != nullptr) {
- grpc_channel_credentials_unref(g_default_credentials);
- g_default_credentials = nullptr;
- }
- g_compute_engine_detection_done = 0;
+ g_metadata_server_available = 0;
gpr_mu_unlock(&g_state_mu);
}
+} // namespace internal
+} // namespace grpc_core
+
/* -- Well known credentials path. -- */
static grpc_well_known_credentials_path_getter creds_path_getter = nullptr;
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index a7dd0ea8ae..b9e2efb04f 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -45,8 +45,6 @@ typedef struct {
grpc_channel_credentials* ssl_creds;
} grpc_google_default_channel_credentials;
-void grpc_flush_cached_google_default_credentials(void);
-
namespace grpc_core {
namespace internal {
@@ -54,6 +52,9 @@ typedef bool (*grpc_gce_tenancy_checker)(void);
void set_gce_tenancy_checker_for_testing(grpc_gce_tenancy_checker checker);
+// TEST-ONLY. Reset the internal global state.
+void grpc_flush_cached_google_default_credentials(void);
+
} // namespace internal
} // namespace grpc_core
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index e34eacc8d7..6955e8698e 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -43,20 +43,39 @@
namespace {
/* We can have a per-call credentials. */
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : arena(args.arena),
+ owning_call(args.call_stack),
+ call_combiner(args.call_combiner) {}
+
+ // This method is technically the dtor of this class. However, since
+ // `get_request_metadata_cancel_closure` can run in parallel to
+ // `destroy_call_elem`, we cannot call the dtor in them. Otherwise,
+ // fields will be accessed after calling dtor, and msan correctly complains
+ // that the memory is not initialized.
+ void destroy() {
+ grpc_credentials_mdelem_array_destroy(&md_array);
+ grpc_call_credentials_unref(creds);
+ grpc_slice_unref_internal(host);
+ grpc_slice_unref_internal(method);
+ grpc_auth_metadata_context_reset(&auth_md_context);
+ }
+
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
- grpc_call_credentials* creds;
- grpc_slice host;
- grpc_slice method;
+ grpc_call_credentials* creds = nullptr;
+ grpc_slice host = grpc_empty_slice();
+ grpc_slice method = grpc_empty_slice();
/* pollset{_set} bound to this call; if we need to make external
network requests, they should be done under a pollset added to this
pollset_set so that work can progress when this call wants work to progress
*/
- grpc_polling_entity* pollent;
+ grpc_polling_entity* pollent = nullptr;
grpc_credentials_mdelem_array md_array;
- grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
- grpc_auth_metadata_context auth_md_context;
+ grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT] = {};
+ grpc_auth_metadata_context auth_md_context =
+ grpc_auth_metadata_context(); // Zero-initialize the C struct.
grpc_closure async_result_closure;
grpc_closure check_call_host_cancel_closure;
grpc_closure get_request_metadata_cancel_closure;
@@ -334,12 +353,7 @@ static void auth_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- calld->arena = args->arena;
- calld->owning_call = args->call_stack;
- calld->call_combiner = args->call_combiner;
- calld->host = grpc_empty_slice();
- calld->method = grpc_empty_slice();
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -354,11 +368,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- grpc_credentials_mdelem_array_destroy(&calld->md_array);
- grpc_call_credentials_unref(calld->creds);
- grpc_slice_unref_internal(calld->host);
- grpc_slice_unref_internal(calld->method);
- grpc_auth_metadata_context_reset(&calld->auth_md_context);
+ calld->destroy();
}
/* Constructor for channel_data */
diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc
index f40f969bb7..14fb55884f 100644
--- a/src/core/lib/security/transport/secure_endpoint.cc
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -22,6 +22,8 @@
headers. Therefore, sockaddr.h must always be included first */
#include <grpc/support/port_platform.h>
+#include <new>
+
#include "src/core/lib/iomgr/sockaddr.h"
#include <grpc/slice.h>
@@ -31,6 +33,7 @@
#include <grpc/support/sync.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
@@ -40,44 +43,68 @@
#define STAGING_BUFFER_SIZE 8192
-typedef struct {
+static void on_read(void* user_data, grpc_error* error);
+
+namespace {
+struct secure_endpoint {
+ secure_endpoint(const grpc_endpoint_vtable* vtable,
+ tsi_frame_protector* protector,
+ tsi_zero_copy_grpc_protector* zero_copy_protector,
+ grpc_endpoint* transport, grpc_slice* leftover_slices,
+ size_t leftover_nslices)
+ : wrapped_ep(transport),
+ protector(protector),
+ zero_copy_protector(zero_copy_protector) {
+ base.vtable = vtable;
+ gpr_mu_init(&protector_mu);
+ GRPC_CLOSURE_INIT(&on_read, ::on_read, this, grpc_schedule_on_exec_ctx);
+ grpc_slice_buffer_init(&source_buffer);
+ grpc_slice_buffer_init(&leftover_bytes);
+ for (size_t i = 0; i < leftover_nslices; i++) {
+ grpc_slice_buffer_add(&leftover_bytes,
+ grpc_slice_ref_internal(leftover_slices[i]));
+ }
+ grpc_slice_buffer_init(&output_buffer);
+ gpr_ref_init(&ref, 1);
+ }
+
+ ~secure_endpoint() {
+ grpc_endpoint_destroy(wrapped_ep);
+ tsi_frame_protector_destroy(protector);
+ tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
+ grpc_slice_buffer_destroy_internal(&source_buffer);
+ grpc_slice_buffer_destroy_internal(&leftover_bytes);
+ grpc_slice_unref_internal(read_staging_buffer);
+ grpc_slice_unref_internal(write_staging_buffer);
+ grpc_slice_buffer_destroy_internal(&output_buffer);
+ gpr_mu_destroy(&protector_mu);
+ }
+
grpc_endpoint base;
grpc_endpoint* wrapped_ep;
struct tsi_frame_protector* protector;
struct tsi_zero_copy_grpc_protector* zero_copy_protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
- grpc_closure* read_cb;
- grpc_closure* write_cb;
+ grpc_closure* read_cb = nullptr;
+ grpc_closure* write_cb = nullptr;
grpc_closure on_read;
- grpc_slice_buffer* read_buffer;
+ grpc_slice_buffer* read_buffer = nullptr;
grpc_slice_buffer source_buffer;
/* saved handshaker leftover data to unprotect. */
grpc_slice_buffer leftover_bytes;
/* buffers for read and write */
- grpc_slice read_staging_buffer;
-
- grpc_slice write_staging_buffer;
+ grpc_slice read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
+ grpc_slice write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
grpc_slice_buffer output_buffer;
gpr_refcount ref;
-} secure_endpoint;
+};
+} // namespace
grpc_core::TraceFlag grpc_trace_secure_endpoint(false, "secure_endpoint");
-static void destroy(secure_endpoint* secure_ep) {
- secure_endpoint* ep = secure_ep;
- grpc_endpoint_destroy(ep->wrapped_ep);
- tsi_frame_protector_destroy(ep->protector);
- tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector);
- grpc_slice_buffer_destroy_internal(&ep->leftover_bytes);
- grpc_slice_unref_internal(ep->read_staging_buffer);
- grpc_slice_unref_internal(ep->write_staging_buffer);
- grpc_slice_buffer_destroy_internal(&ep->output_buffer);
- grpc_slice_buffer_destroy_internal(&ep->source_buffer);
- gpr_mu_destroy(&ep->protector_mu);
- gpr_free(ep);
-}
+static void destroy(secure_endpoint* ep) { grpc_core::Delete(ep); }
#ifndef NDEBUG
#define SECURE_ENDPOINT_UNREF(ep, reason) \
@@ -389,6 +416,11 @@ static grpc_resource_user* endpoint_get_resource_user(
return grpc_endpoint_get_resource_user(ep->wrapped_ep);
}
+static bool endpoint_can_track_err(grpc_endpoint* secure_ep) {
+ secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
+ return grpc_endpoint_can_track_err(ep->wrapped_ep);
+}
+
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_add_to_pollset,
@@ -398,32 +430,16 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
- endpoint_get_fd};
+ endpoint_get_fd,
+ endpoint_can_track_err};
grpc_endpoint* grpc_secure_endpoint_create(
struct tsi_frame_protector* protector,
struct tsi_zero_copy_grpc_protector* zero_copy_protector,
grpc_endpoint* transport, grpc_slice* leftover_slices,
size_t leftover_nslices) {
- size_t i;
- secure_endpoint* ep =
- static_cast<secure_endpoint*>(gpr_malloc(sizeof(secure_endpoint)));
- ep->base.vtable = &vtable;
- ep->wrapped_ep = transport;
- ep->protector = protector;
- ep->zero_copy_protector = zero_copy_protector;
- grpc_slice_buffer_init(&ep->leftover_bytes);
- for (i = 0; i < leftover_nslices; i++) {
- grpc_slice_buffer_add(&ep->leftover_bytes,
- grpc_slice_ref_internal(leftover_slices[i]));
- }
- ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
- ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
- grpc_slice_buffer_init(&ep->output_buffer);
- grpc_slice_buffer_init(&ep->source_buffer);
- ep->read_buffer = nullptr;
- GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
- gpr_mu_init(&ep->protector_mu);
- gpr_ref_init(&ep->ref, 1);
+ secure_endpoint* ep = grpc_core::New<secure_endpoint>(
+ &vtable, protector, zero_copy_protector, transport, leftover_slices,
+ leftover_nslices);
return &ep->base;
}
diff --git a/src/core/lib/security/transport/security_handshaker.cc b/src/core/lib/security/transport/security_handshaker.cc
index 4d6b133809..854a1c4af9 100644
--- a/src/core/lib/security/transport/security_handshaker.cc
+++ b/src/core/lib/security/transport/security_handshaker.cc
@@ -275,9 +275,6 @@ static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void* user_data, const unsigned char* bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result) {
security_handshaker* h = static_cast<security_handshaker*>(user_data);
- // This callback will be invoked by TSI in a non-grpc thread, so it's
- // safe to create our own exec_ctx here.
- grpc_core::ExecCtx exec_ctx;
gpr_mu_lock(&h->mu);
grpc_error* error = on_handshake_next_done_locked(
h, result, bytes_to_send, bytes_to_send_size, handshaker_result);
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index b99fc5e178..362f49a584 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -28,6 +28,9 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
+static void recv_initial_metadata_ready(void* arg, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+
namespace {
enum async_state {
STATE_INIT = 0,
@@ -35,28 +38,55 @@ enum async_state {
STATE_CANCELLED,
};
+struct channel_data {
+ grpc_auth_context* auth_context;
+ grpc_server_credentials* creds;
+};
+
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call_combiner(args.call_combiner), owning_call(args.call_stack) {
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+ ::recv_initial_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ ::recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ // Create server security context. Set its auth context from channel
+ // data and save it in the call context.
+ grpc_server_security_context* server_ctx =
+ grpc_server_security_context_create(args.arena);
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ server_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "server_auth_filter");
+ if (args.context[GRPC_CONTEXT_SECURITY].value != nullptr) {
+ args.context[GRPC_CONTEXT_SECURITY].destroy(
+ args.context[GRPC_CONTEXT_SECURITY].value);
+ }
+ args.context[GRPC_CONTEXT_SECURITY].value = server_ctx;
+ args.context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_server_security_context_destroy;
+ }
+
+ ~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
+
grpc_call_combiner* call_combiner;
grpc_call_stack* owning_call;
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
- grpc_error* recv_initial_metadata_error;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_error* recv_trailing_metadata_error;
- bool seen_recv_trailing_metadata_ready;
+ bool seen_recv_trailing_metadata_ready = false;
grpc_metadata_array md;
const grpc_metadata* consumed_md;
size_t num_consumed_md;
grpc_closure cancel_closure;
- gpr_atm state; // async_state
+ gpr_atm state = STATE_INIT; // async_state
};
-struct channel_data {
- grpc_auth_context* auth_context;
- grpc_server_credentials* creds;
-};
} // namespace
static grpc_metadata_array metadata_batch_to_md_array(
@@ -244,29 +274,7 @@ static void auth_start_transport_stream_op_batch(
/* Constructor for call_data */
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- calld->call_combiner = args->call_combiner;
- calld->owning_call = args->call_stack;
- GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
- recv_initial_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
- // Create server security context. Set its auth context from channel
- // data and save it in the call context.
- grpc_server_security_context* server_ctx =
- grpc_server_security_context_create(args->arena);
- server_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "server_auth_filter");
- if (args->context[GRPC_CONTEXT_SECURITY].value != nullptr) {
- args->context[GRPC_CONTEXT_SECURITY].destroy(
- args->context[GRPC_CONTEXT_SECURITY].value);
- }
- args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
- args->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_server_security_context_destroy;
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
@@ -275,7 +283,7 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
+ calld->~call_data();
}
/* Constructor for channel_data */
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index a9349afa68..89b3f77822 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -72,8 +72,11 @@
// Used to create arena for the first call.
#define ESTIMATED_MDELEM_COUNT 16
-typedef struct batch_control {
- grpc_call* call;
+struct batch_control {
+ batch_control() { gpr_ref_init(&steps_to_complete, 0); }
+
+ grpc_call* call = nullptr;
+ grpc_transport_stream_op_batch op;
/* Share memory for cq_completion and notify_tag as they are never needed
simultaneously. Each byte used in this data structure count as six bytes
per call, so any savings we can make are worthwhile,
@@ -96,84 +99,110 @@ typedef struct batch_control {
grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
- gpr_atm batch_error;
- grpc_transport_stream_op_batch op;
-} batch_control;
+ gpr_atm batch_error = reinterpret_cast<gpr_atm>(GRPC_ERROR_NONE);
+};
+
+struct parent_call {
+ parent_call() { gpr_mu_init(&child_list_mu); }
+ ~parent_call() { gpr_mu_destroy(&child_list_mu); }
-typedef struct {
gpr_mu child_list_mu;
- grpc_call* first_child;
-} parent_call;
+ grpc_call* first_child = nullptr;
+};
-typedef struct {
+struct child_call {
+ child_call(grpc_call* parent) : parent(parent) {}
grpc_call* parent;
/** siblings: children of the same parent form a list, and this list is
protected under
parent->mu */
- grpc_call* sibling_next;
- grpc_call* sibling_prev;
-} child_call;
+ grpc_call* sibling_next = nullptr;
+ grpc_call* sibling_prev = nullptr;
+};
#define RECV_NONE ((gpr_atm)0)
#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
struct grpc_call {
+ grpc_call(gpr_arena* arena, const grpc_call_create_args& args)
+ : arena(arena),
+ cq(args.cq),
+ channel(args.channel),
+ is_client(args.server_transport_data == nullptr),
+ stream_op_payload(context) {
+ gpr_ref_init(&ext_ref, 1);
+ grpc_call_combiner_init(&call_combiner);
+ for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < 2; j++) {
+ metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
+ }
+ }
+ }
+
+ ~grpc_call() {
+ gpr_free(static_cast<void*>(const_cast<char*>(final_info.error_string)));
+ grpc_call_combiner_destroy(&call_combiner);
+ }
+
gpr_refcount ext_ref;
gpr_arena* arena;
grpc_call_combiner call_combiner;
grpc_completion_queue* cq;
grpc_polling_entity pollent;
grpc_channel* channel;
- gpr_timespec start_time;
- /* parent_call* */ gpr_atm parent_call_atm;
- child_call* child;
+ gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+ /* parent_call* */ gpr_atm parent_call_atm = 0;
+ child_call* child = nullptr;
/* client or server call */
bool is_client;
/** has grpc_call_unref been called */
- bool destroy_called;
+ bool destroy_called = false;
/** flag indicating that cancellation is inherited */
- bool cancellation_is_inherited;
+ bool cancellation_is_inherited = false;
/** which ops are in-flight */
- bool sent_initial_metadata;
- bool sending_message;
- bool sent_final_op;
- bool received_initial_metadata;
- bool receiving_message;
- bool requested_final_op;
- gpr_atm any_ops_sent_atm;
- gpr_atm received_final_op_atm;
-
- batch_control* active_batches[MAX_CONCURRENT_BATCHES];
+ bool sent_initial_metadata = false;
+ bool sending_message = false;
+ bool sent_final_op = false;
+ bool received_initial_metadata = false;
+ bool receiving_message = false;
+ bool requested_final_op = false;
+ gpr_atm any_ops_sent_atm = 0;
+ gpr_atm received_final_op_atm = 0;
+
+ batch_control* active_batches[MAX_CONCURRENT_BATCHES] = {};
grpc_transport_stream_op_batch_payload stream_op_payload;
/* first idx: is_receiving, second idx: is_trailing */
- grpc_metadata_batch metadata_batch[2][2];
+ grpc_metadata_batch metadata_batch[2][2] = {};
/* Buffered read metadata waiting to be returned to the application.
Element 0 is initial metadata, element 1 is trailing metadata. */
- grpc_metadata_array* buffered_metadata[2];
+ grpc_metadata_array* buffered_metadata[2] = {};
grpc_metadata compression_md;
// A char* indicating the peer name.
- gpr_atm peer_string;
+ gpr_atm peer_string = 0;
/* Call data useful used for reporting. Only valid after the call has
* completed */
grpc_call_final_info final_info;
/* Compression algorithm for *incoming* data */
- grpc_message_compression_algorithm incoming_message_compression_algorithm;
+ grpc_message_compression_algorithm incoming_message_compression_algorithm =
+ GRPC_MESSAGE_COMPRESS_NONE;
/* Stream compression algorithm for *incoming* data */
- grpc_stream_compression_algorithm incoming_stream_compression_algorithm;
- /* Supported encodings (compression algorithms), a bitset */
- uint32_t encodings_accepted_by_peer;
+ grpc_stream_compression_algorithm incoming_stream_compression_algorithm =
+ GRPC_STREAM_COMPRESS_NONE;
+ /* Supported encodings (compression algorithms), a bitset.
+ * Always support no compression. */
+ uint32_t encodings_accepted_by_peer = 1 << GRPC_MESSAGE_COMPRESS_NONE;
/* Supported stream encodings (stream compression algorithms), a bitset */
- uint32_t stream_encodings_accepted_by_peer;
+ uint32_t stream_encodings_accepted_by_peer = 0;
/* Contexts for various subsystems (security, tracing, ...). */
- grpc_call_context_element context[GRPC_CONTEXT_COUNT];
+ grpc_call_context_element context[GRPC_CONTEXT_COUNT] = {};
/* for the client, extra metadata is initial metadata; for the
server, it's trailing metadata */
@@ -184,14 +213,14 @@ struct grpc_call {
grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> sending_stream;
grpc_core::OrphanablePtr<grpc_core::ByteStream> receiving_stream;
- grpc_byte_buffer** receiving_buffer;
- grpc_slice receiving_slice;
+ grpc_byte_buffer** receiving_buffer = nullptr;
+ grpc_slice receiving_slice = grpc_empty_slice();
grpc_closure receiving_slice_ready;
grpc_closure receiving_stream_ready;
grpc_closure receiving_initial_metadata_ready;
grpc_closure receiving_trailing_metadata_ready;
- uint32_t test_only_last_message_flags;
- gpr_atm cancelled;
+ uint32_t test_only_last_message_flags = 0;
+ gpr_atm cancelled = 0;
grpc_closure release_call;
@@ -207,7 +236,7 @@ struct grpc_call {
grpc_server* server;
} server;
} final_op;
- gpr_atm status_error;
+ gpr_atm status_error = 0;
/* recv_state can contain one of the following values:
RECV_NONE : : no initial metadata and messages received
@@ -225,7 +254,7 @@ struct grpc_call {
For 1, 4: See receiving_initial_metadata_ready() function
For 2, 3: See receiving_stream_ready() function */
- gpr_atm recv_state;
+ gpr_atm recv_state = 0;
};
grpc_core::TraceFlag grpc_call_error_trace(false, "call_error");
@@ -269,11 +298,10 @@ void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
static parent_call* get_or_create_parent_call(grpc_call* call) {
parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
if (p == nullptr) {
- p = static_cast<parent_call*>(gpr_arena_alloc(call->arena, sizeof(*p)));
- gpr_mu_init(&p->child_list_mu);
+ p = new (gpr_arena_alloc(call->arena, sizeof(*p))) parent_call();
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
(gpr_atm)p)) {
- gpr_mu_destroy(&p->child_list_mu);
+ p->~parent_call();
p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
}
}
@@ -292,7 +320,9 @@ size_t grpc_call_get_initial_size_estimate() {
grpc_error* grpc_call_create(const grpc_call_create_args* args,
grpc_call** out_call) {
GPR_TIMER_SCOPE("grpc_call_create", 0);
- size_t i, j;
+
+ GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
+
grpc_error* error = GRPC_ERROR_NONE;
grpc_channel_stack* channel_stack =
grpc_channel_get_channel_stack(args->channel);
@@ -300,27 +330,19 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
gpr_arena* arena = gpr_arena_create(initial_size);
- call = static_cast<grpc_call*>(
- gpr_arena_alloc(arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
- channel_stack->call_stack_size));
- gpr_ref_init(&call->ext_ref, 1);
- gpr_atm_no_barrier_store(&call->cancelled, 0);
- call->arena = arena;
- grpc_call_combiner_init(&call->call_combiner);
+ call = new (gpr_arena_alloc(
+ arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
+ channel_stack->call_stack_size)) grpc_call(arena, *args);
*out_call = call;
- call->channel = args->channel;
- call->cq = args->cq;
- call->start_time = gpr_now(GPR_CLOCK_MONOTONIC);
- /* Always support no compression */
- GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_MESSAGE_COMPRESS_NONE);
- call->is_client = args->server_transport_data == nullptr;
- call->stream_op_payload.context = call->context;
grpc_slice path = grpc_empty_slice();
if (call->is_client) {
+ call->final_op.client.status_details = nullptr;
+ call->final_op.client.status = nullptr;
+ call->final_op.client.error_string = nullptr;
GRPC_STATS_INC_CLIENT_CALLS_CREATED();
GPR_ASSERT(args->add_initial_metadata_count <
MAX_SEND_EXTRA_METADATA_COUNT);
- for (i = 0; i < args->add_initial_metadata_count; i++) {
+ for (size_t i = 0; i < args->add_initial_metadata_count; i++) {
call->send_extra_metadata[i].md = args->add_initial_metadata[i];
if (grpc_slice_eq(GRPC_MDKEY(args->add_initial_metadata[i]),
GRPC_MDSTR_PATH)) {
@@ -332,23 +354,18 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
static_cast<int>(args->add_initial_metadata_count);
} else {
GRPC_STATS_INC_SERVER_CALLS_CREATED();
+ call->final_op.server.cancelled = nullptr;
call->final_op.server.server = args->server;
GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 2; j++) {
- call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
- }
- }
- grpc_millis send_deadline = args->send_deadline;
+ grpc_millis send_deadline = args->send_deadline;
bool immediately_cancel = false;
if (args->parent != nullptr) {
- call->child =
- static_cast<child_call*>(gpr_arena_alloc(arena, sizeof(child_call)));
- call->child->parent = args->parent;
+ call->child = new (gpr_arena_alloc(arena, sizeof(child_call)))
+ child_call(args->parent);
GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
@@ -382,10 +399,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
}
}
}
-
call->send_deadline = send_deadline;
-
- GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_unref */
grpc_call_element_args call_args = {CALL_STACK_FROM_CALL(call),
args->server_transport_data,
@@ -413,6 +427,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
}
gpr_mu_unlock(&pc->child_list_mu);
}
+
if (error != GRPC_ERROR_NONE) {
cancel_with_error(call, GRPC_ERROR_REF(error));
}
@@ -487,9 +502,9 @@ void grpc_call_internal_unref(grpc_call* c REF_ARG) {
static void release_call(void* call, grpc_error* error) {
grpc_call* c = static_cast<grpc_call*>(call);
grpc_channel* channel = c->channel;
- gpr_free(static_cast<void*>(const_cast<char*>(c->final_info.error_string)));
- grpc_call_combiner_destroy(&c->call_combiner);
- grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
+ gpr_arena* arena = c->arena;
+ c->~grpc_call();
+ grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(arena));
GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
}
@@ -505,7 +520,7 @@ static void destroy_call(void* call, grpc_error* error) {
c->receiving_stream.reset();
parent_call* pc = get_parent_call(c);
if (pc != nullptr) {
- gpr_mu_destroy(&pc->child_list_mu);
+ pc->~parent_call();
}
for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
@@ -679,6 +694,10 @@ static void cancel_with_error(grpc_call* c, grpc_error* error) {
execute_batch(c, op, &state->start_batch);
}
+void grpc_call_cancel_internal(grpc_call* call) {
+ cancel_with_error(call, GRPC_ERROR_CANCELLED);
+}
+
static grpc_error* error_from_status(grpc_status_code status,
const char* description) {
// copying 'description' is needed to ensure the grpc_call_cancel_with_status
@@ -1100,10 +1119,11 @@ static batch_control* reuse_or_allocate_batch_control(grpc_call* call,
if (bctl->call != nullptr) {
return nullptr;
}
- memset(bctl, 0, sizeof(*bctl));
+ bctl->~batch_control();
+ bctl->op = {};
} else {
- bctl = static_cast<batch_control*>(
- gpr_arena_alloc(call->arena, sizeof(batch_control)));
+ bctl = new (gpr_arena_alloc(call->arena, sizeof(batch_control)))
+ batch_control();
*pslot = bctl;
}
bctl->call = call;
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index b34260505a..bd7295fe11 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -81,6 +81,10 @@ grpc_call_error grpc_call_start_batch_and_execute(grpc_call* call,
size_t nops,
grpc_closure* closure);
+/* gRPC core internal version of grpc_call_cancel that does not create
+ * exec_ctx. */
+void grpc_call_cancel_internal(grpc_call* call);
+
/* Given the top call_element, get the call object. */
grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element);
diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc
index d7095c24d4..e47cb4360e 100644
--- a/src/core/lib/surface/channel.cc
+++ b/src/core/lib/surface/channel.cc
@@ -39,6 +39,7 @@
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -63,6 +64,7 @@ struct grpc_channel {
grpc_compression_options compression_options;
gpr_atm call_size_estimate;
+ grpc_resource_user* resource_user;
gpr_mu registered_call_mu;
registered_call* registered_calls;
@@ -82,6 +84,8 @@ grpc_channel* grpc_channel_create_with_builder(
char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
grpc_channel_args* args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
+ grpc_resource_user* resource_user =
+ grpc_channel_stack_builder_get_resource_user(builder);
grpc_channel* channel;
if (channel_stack_type == GRPC_SERVER_CHANNEL) {
GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
@@ -101,9 +105,11 @@ grpc_channel* grpc_channel_create_with_builder(
}
channel->target = target;
+ channel->resource_user = resource_user;
channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
bool channelz_enabled = GRPC_ENABLE_CHANNELZ_DEFAULT;
- size_t channel_tracer_max_memory = 0; // default to off
+ size_t channel_tracer_max_memory =
+ GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT;
bool internal_channel = false;
// this creates the default ChannelNode. Different types of channels may
// override this to ensure a correct ChannelNode is created.
@@ -142,7 +148,6 @@ grpc_channel* grpc_channel_create_with_builder(
0x1; /* always support no compression */
} else if (0 == strcmp(args->args[i].key,
GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE)) {
- GPR_ASSERT(channel_tracer_max_memory == 0);
const grpc_integer_options options = {
GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX};
channel_tracer_max_memory =
@@ -217,7 +222,8 @@ grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
grpc_channel* grpc_channel_create(const char* target,
const grpc_channel_args* input_args,
grpc_channel_stack_type channel_stack_type,
- grpc_transport* optional_transport) {
+ grpc_transport* optional_transport,
+ grpc_resource_user* resource_user) {
grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
const grpc_core::UniquePtr<char> default_authority =
get_default_authority(input_args);
@@ -227,11 +233,17 @@ grpc_channel* grpc_channel_create(const char* target,
grpc_channel_args_destroy(args);
grpc_channel_stack_builder_set_target(builder, target);
grpc_channel_stack_builder_set_transport(builder, optional_transport);
+ grpc_channel_stack_builder_set_resource_user(builder, resource_user);
if (!grpc_channel_init_create_stack(builder, channel_stack_type)) {
grpc_channel_stack_builder_destroy(builder);
+ if (resource_user != nullptr) {
+ grpc_resource_user_free(resource_user, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
+ }
return nullptr;
}
- return grpc_channel_create_with_builder(builder, channel_stack_type);
+ grpc_channel* channel =
+ grpc_channel_create_with_builder(builder, channel_stack_type);
+ return channel;
}
size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) {
@@ -310,8 +322,8 @@ static grpc_call* grpc_channel_create_call_internal(
}
grpc_call_create_args args;
- memset(&args, 0, sizeof(args));
args.channel = channel;
+ args.server = nullptr;
args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
@@ -441,6 +453,10 @@ static void destroy_channel(void* arg, grpc_error* error) {
GRPC_MDELEM_UNREF(rc->authority);
gpr_free(rc);
}
+ if (channel->resource_user != nullptr) {
+ grpc_resource_user_free(channel->resource_user,
+ GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
+ }
gpr_mu_destroy(&channel->registered_call_mu);
gpr_free(channel->target);
gpr_free(channel);
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 4ac76b8a29..ab00b8e94f 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -29,7 +29,8 @@
grpc_channel* grpc_channel_create(const char* target,
const grpc_channel_args* args,
grpc_channel_stack_type channel_stack_type,
- grpc_transport* optional_transport);
+ grpc_transport* optional_transport,
+ grpc_resource_user* resource_user = nullptr);
grpc_channel* grpc_channel_create_with_builder(
grpc_channel_stack_builder* builder,
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index b81ae73b4d..661022ec5f 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -859,8 +859,8 @@ static void cq_end_op_for_callback(
gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
- cq_finish_shutdown_callback(cq);
gpr_mu_unlock(cq->mu);
+ cq_finish_shutdown_callback(cq);
} else {
gpr_mu_unlock(cq->mu);
}
diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc
index 0ad82fed99..67cf5d89bf 100644
--- a/src/core/lib/surface/init.cc
+++ b/src/core/lib/surface/init.cc
@@ -123,6 +123,7 @@ void grpc_init(void) {
grpc_core::Fork::GlobalInit();
grpc_fork_handlers_auto_register();
gpr_time_init();
+ gpr_arena_init();
grpc_stats_init();
grpc_slice_intern_init();
grpc_mdctx_global_init();
@@ -160,6 +161,7 @@ void grpc_shutdown(void) {
if (--g_initializations == 0) {
{
grpc_core::ExecCtx exec_ctx(0);
+ grpc_iomgr_shutdown_background_closure();
{
grpc_timer_manager_set_threading(
false); // shutdown timer_manager thread
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index 35ab2c3bce..67b38e6f0c 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -28,6 +28,8 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <utility>
+
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
@@ -47,6 +49,10 @@
grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
+static void server_on_recv_initial_metadata(void* ptr, grpc_error* error);
+static void server_recv_trailing_metadata_ready(void* user_data,
+ grpc_error* error);
+
namespace {
struct listener {
void* arg;
@@ -105,7 +111,7 @@ struct channel_data {
uint32_t registered_method_max_probes;
grpc_closure finish_destroy_channel_closure;
grpc_closure channel_connectivity_changed;
- intptr_t socket_uuid;
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node;
};
typedef struct shutdown_tag {
@@ -128,46 +134,73 @@ typedef enum {
typedef struct request_matcher request_matcher;
struct call_data {
+ call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+ : call(grpc_call_from_top_element(elem)),
+ call_combiner(args.call_combiner) {
+ GRPC_CLOSURE_INIT(&server_on_recv_initial_metadata,
+ ::server_on_recv_initial_metadata, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+ server_recv_trailing_metadata_ready, elem,
+ grpc_schedule_on_exec_ctx);
+ }
+ ~call_data() {
+ GPR_ASSERT(state != PENDING);
+ GRPC_ERROR_UNREF(recv_initial_metadata_error);
+ if (host_set) {
+ grpc_slice_unref_internal(host);
+ }
+ if (path_set) {
+ grpc_slice_unref_internal(path);
+ }
+ grpc_metadata_array_destroy(&initial_metadata);
+ grpc_byte_buffer_destroy(payload);
+ }
+
grpc_call* call;
- gpr_atm state;
+ gpr_atm state = NOT_STARTED;
- bool path_set;
- bool host_set;
+ bool path_set = false;
+ bool host_set = false;
grpc_slice path;
grpc_slice host;
- grpc_millis deadline;
+ grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
- grpc_completion_queue* cq_new;
+ grpc_completion_queue* cq_new = nullptr;
- grpc_metadata_batch* recv_initial_metadata;
- uint32_t recv_initial_metadata_flags;
- grpc_metadata_array initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata = nullptr;
+ uint32_t recv_initial_metadata_flags = 0;
+ grpc_metadata_array initial_metadata =
+ grpc_metadata_array(); // Zero-initialize the C struct.
- request_matcher* matcher;
- grpc_byte_buffer* payload;
+ request_matcher* matcher = nullptr;
+ grpc_byte_buffer* payload = nullptr;
grpc_closure got_initial_metadata;
grpc_closure server_on_recv_initial_metadata;
grpc_closure kill_zombie_closure;
grpc_closure* on_done_recv_initial_metadata;
grpc_closure recv_trailing_metadata_ready;
- grpc_error* recv_initial_metadata_error;
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
grpc_closure* original_recv_trailing_metadata_ready;
- grpc_error* recv_trailing_metadata_error;
- bool seen_recv_trailing_metadata_ready;
+ grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
+ bool seen_recv_trailing_metadata_ready = false;
grpc_closure publish;
- call_data* pending_next;
+ call_data* pending_next = nullptr;
grpc_call_combiner* call_combiner;
};
struct request_matcher {
+ request_matcher(grpc_server* server);
+ ~request_matcher();
+
grpc_server* server;
- call_data* pending_head;
- call_data* pending_tail;
- gpr_locked_mpscq* requests_per_cq;
+ std::atomic<call_data*> pending_head{nullptr};
+ call_data* pending_tail = nullptr;
+ gpr_locked_mpscq* requests_per_cq = nullptr;
};
struct registered_method {
@@ -189,6 +222,8 @@ typedef struct {
struct grpc_server {
grpc_channel_args* channel_args;
+ grpc_resource_user* default_resource_user;
+
grpc_completion_queue** cqs;
grpc_pollset** pollsets;
size_t cq_count;
@@ -314,22 +349,30 @@ static void channel_broadcaster_shutdown(channel_broadcaster* cb,
* request_matcher
*/
-static void request_matcher_init(request_matcher* rm, grpc_server* server) {
- memset(rm, 0, sizeof(*rm));
- rm->server = server;
- rm->requests_per_cq = static_cast<gpr_locked_mpscq*>(
- gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count));
+namespace {
+request_matcher::request_matcher(grpc_server* server) : server(server) {
+ requests_per_cq = static_cast<gpr_locked_mpscq*>(
+ gpr_malloc(sizeof(*requests_per_cq) * server->cq_count));
for (size_t i = 0; i < server->cq_count; i++) {
- gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
+ gpr_locked_mpscq_init(&requests_per_cq[i]);
}
}
-static void request_matcher_destroy(request_matcher* rm) {
- for (size_t i = 0; i < rm->server->cq_count; i++) {
- GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == nullptr);
- gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
+request_matcher::~request_matcher() {
+ for (size_t i = 0; i < server->cq_count; i++) {
+ GPR_ASSERT(gpr_locked_mpscq_pop(&requests_per_cq[i]) == nullptr);
+ gpr_locked_mpscq_destroy(&requests_per_cq[i]);
}
- gpr_free(rm->requests_per_cq);
+ gpr_free(requests_per_cq);
+}
+} // namespace
+
+static void request_matcher_init(request_matcher* rm, grpc_server* server) {
+ new (rm) request_matcher(server);
+}
+
+static void request_matcher_destroy(request_matcher* rm) {
+ rm->~request_matcher();
}
static void kill_zombie(void* elem, grpc_error* error) {
@@ -338,9 +381,10 @@ static void kill_zombie(void* elem, grpc_error* error) {
}
static void request_matcher_zombify_all_pending_calls(request_matcher* rm) {
- while (rm->pending_head) {
- call_data* calld = rm->pending_head;
- rm->pending_head = calld->pending_next;
+ call_data* calld;
+ while ((calld = rm->pending_head.load(std::memory_order_relaxed)) !=
+ nullptr) {
+ rm->pending_head.store(calld->pending_next, std::memory_order_relaxed);
gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie,
@@ -538,8 +582,9 @@ static void publish_new_rpc(void* arg, grpc_error* error) {
}
gpr_atm_no_barrier_store(&calld->state, PENDING);
- if (rm->pending_head == nullptr) {
- rm->pending_tail = rm->pending_head = calld;
+ if (rm->pending_head.load(std::memory_order_relaxed) == nullptr) {
+ rm->pending_head.store(calld, std::memory_order_relaxed);
+ rm->pending_tail = calld;
} else {
rm->pending_tail->pending_next = calld;
rm->pending_tail = calld;
@@ -822,11 +867,16 @@ static void accept_stream(void* cd, grpc_transport* transport,
channel_data* chand = static_cast<channel_data*>(cd);
/* create a call */
grpc_call_create_args args;
- memset(&args, 0, sizeof(args));
args.channel = chand->channel;
+ args.server = chand->server;
+ args.parent = nullptr;
+ args.propagation_mask = 0;
+ args.cq = nullptr;
+ args.pollset_set_alternative = nullptr;
args.server_transport_data = transport_server_data;
+ args.add_initial_metadata = nullptr;
+ args.add_initial_metadata_count = 0;
args.send_deadline = GRPC_MILLIS_INF_FUTURE;
- args.server = chand->server;
grpc_call* call;
grpc_error* error = grpc_call_create(&args, &call);
grpc_call_element* elem =
@@ -838,8 +888,9 @@ static void accept_stream(void* cd, grpc_transport* transport,
}
call_data* calld = static_cast<call_data*>(elem->call_data);
grpc_op op;
- memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
+ op.flags = 0;
+ op.reserved = nullptr;
op.data.recv_initial_metadata.recv_initial_metadata =
&calld->initial_metadata;
GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
@@ -867,40 +918,18 @@ static void channel_connectivity_changed(void* cd, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
- call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
- memset(calld, 0, sizeof(call_data));
- calld->deadline = GRPC_MILLIS_INF_FUTURE;
- calld->call = grpc_call_from_top_element(elem);
- calld->call_combiner = args->call_combiner;
-
- GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
- server_on_recv_initial_metadata, elem,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
- server_recv_trailing_metadata_ready, elem,
- grpc_schedule_on_exec_ctx);
server_ref(chand->server);
+ new (elem->call_data) call_data(elem, *args);
return GRPC_ERROR_NONE;
}
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
-
- GPR_ASSERT(calld->state != PENDING);
- GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
- if (calld->host_set) {
- grpc_slice_unref_internal(calld->host);
- }
- if (calld->path_set) {
- grpc_slice_unref_internal(calld->path);
- }
- grpc_metadata_array_destroy(&calld->initial_metadata);
- grpc_byte_buffer_destroy(calld->payload);
-
+ calld->~call_data();
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
server_unref(chand->server);
}
@@ -923,6 +952,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
static void destroy_channel_elem(grpc_channel_element* elem) {
size_t i;
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+ chand->socket_node.reset();
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
grpc_slice_unref_internal(chand->registered_methods[i].method);
@@ -1024,6 +1054,15 @@ grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
grpc_slice_from_static_string("Server created"));
}
+ if (args != nullptr) {
+ grpc_resource_quota* resource_quota =
+ grpc_resource_quota_from_channel_args(args, false /* create */);
+ if (resource_quota != nullptr) {
+ server->default_resource_user =
+ grpc_resource_user_create(resource_quota, "default");
+ }
+ }
+
return server;
}
@@ -1119,10 +1158,11 @@ void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
*pollsets = server->pollsets;
}
-void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
- grpc_pollset* accepting_pollset,
- const grpc_channel_args* args,
- intptr_t socket_uuid) {
+void grpc_server_setup_transport(
+ grpc_server* s, grpc_transport* transport, grpc_pollset* accepting_pollset,
+ const grpc_channel_args* args,
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node,
+ grpc_resource_user* resource_user) {
size_t num_registered_methods;
size_t alloc;
registered_method* rm;
@@ -1135,14 +1175,15 @@ void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
uint32_t max_probes = 0;
grpc_transport_op* op = nullptr;
- channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport);
+ channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport,
+ resource_user);
chand = static_cast<channel_data*>(
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0)
->channel_data);
chand->server = s;
server_ref(s);
chand->channel = channel;
- chand->socket_uuid = socket_uuid;
+ chand->socket_node = std::move(socket_node);
size_t cq_idx;
for (cq_idx = 0; cq_idx < s->cq_count; cq_idx++) {
@@ -1218,14 +1259,13 @@ void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
}
void grpc_server_populate_server_sockets(
- grpc_server* s, grpc_core::channelz::ChildRefsList* server_sockets,
+ grpc_server* s, grpc_core::channelz::ChildSocketsList* server_sockets,
intptr_t start_idx) {
gpr_mu_lock(&s->mu_global);
channel_data* c = nullptr;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
- intptr_t socket_uuid = c->socket_uuid;
- if (socket_uuid >= start_idx) {
- server_sockets->push_back(socket_uuid);
+ if (c->socket_node != nullptr && c->socket_node->uuid() >= start_idx) {
+ server_sockets->push_back(c->socket_node.get());
}
}
gpr_mu_unlock(&s->mu_global);
@@ -1330,6 +1370,13 @@ void grpc_server_shutdown_and_notify(grpc_server* server,
channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */,
GRPC_ERROR_NONE);
+
+ if (server->default_resource_user != nullptr) {
+ grpc_resource_quota_unref(
+ grpc_resource_user_quota(server->default_resource_user));
+ grpc_resource_user_shutdown(server->default_resource_user);
+ grpc_resource_user_unref(server->default_resource_user);
+ }
}
void grpc_server_cancel_all_calls(grpc_server* server) {
@@ -1401,30 +1448,39 @@ static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
rm = &rc->data.registered.method->matcher;
break;
}
- if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
- /* this was the first queued request: we need to lock and start
- matching calls */
- gpr_mu_lock(&server->mu_call);
- while ((calld = rm->pending_head) != nullptr) {
- rc = reinterpret_cast<requested_call*>(
- gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
- if (rc == nullptr) break;
- rm->pending_head = calld->pending_next;
- gpr_mu_unlock(&server->mu_call);
- if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
- // Zombied Call
- GRPC_CLOSURE_INIT(
- &calld->kill_zombie_closure, kill_zombie,
- grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
- } else {
- publish_call(server, calld, cq_idx, rc);
- }
- gpr_mu_lock(&server->mu_call);
- }
+
+ // Fast path: if there is no pending request to be processed, immediately
+ // return.
+ if (!gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link) ||
+ // Note: We are reading the pending_head without holding the server's call
+ // mutex. Even if we read a non-null value here due to reordering,
+ // we will check it below again after grabbing the lock.
+ rm->pending_head.load(std::memory_order_relaxed) == nullptr) {
+ return GRPC_CALL_OK;
+ }
+ // Slow path: This was the first queued request and there are pendings:
+ // We need to lock and start matching calls.
+ gpr_mu_lock(&server->mu_call);
+ while ((calld = rm->pending_head.load(std::memory_order_relaxed)) !=
+ nullptr) {
+ rc = reinterpret_cast<requested_call*>(
+ gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
+ if (rc == nullptr) break;
+ rm->pending_head.store(calld->pending_next, std::memory_order_relaxed);
gpr_mu_unlock(&server->mu_call);
+ if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
+ // Zombied Call
+ GRPC_CLOSURE_INIT(
+ &calld->kill_zombie_closure, kill_zombie,
+ grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+ grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
+ } else {
+ publish_call(server, calld, cq_idx, rc);
+ }
+ gpr_mu_lock(&server->mu_call);
}
+ gpr_mu_unlock(&server->mu_call);
return GRPC_CALL_OK;
}
@@ -1546,6 +1602,10 @@ const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) {
return server->channel_args;
}
+grpc_resource_user* grpc_server_get_default_resource_user(grpc_server* server) {
+ return server->default_resource_user;
+}
+
int grpc_server_has_open_connections(grpc_server* server) {
int r;
gpr_mu_lock(&server->mu_global);
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index 33c205417e..393bb24214 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -44,14 +44,15 @@ void grpc_server_add_listener(grpc_server* server, void* listener,
/* Setup a transport - creates a channel stack, binds the transport to the
server */
-void grpc_server_setup_transport(grpc_server* server, grpc_transport* transport,
- grpc_pollset* accepting_pollset,
- const grpc_channel_args* args,
- intptr_t socket_uuid);
+void grpc_server_setup_transport(
+ grpc_server* server, grpc_transport* transport,
+ grpc_pollset* accepting_pollset, const grpc_channel_args* args,
+ grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket_node,
+ grpc_resource_user* resource_user = nullptr);
/* fills in the uuids of all sockets used for connections on this server */
void grpc_server_populate_server_sockets(
- grpc_server* server, grpc_core::channelz::ChildRefsList* server_sockets,
+ grpc_server* server, grpc_core::channelz::ChildSocketsList* server_sockets,
intptr_t start_idx);
/* fills in the uuids of all listen sockets on this server */
@@ -63,6 +64,8 @@ grpc_core::channelz::ServerNode* grpc_server_get_channelz_node(
const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server);
+grpc_resource_user* grpc_server_get_default_resource_user(grpc_server* server);
+
int grpc_server_has_open_connections(grpc_server* server);
/* Do not call this before grpc_server_start. Returns the pollsets and the
diff --git a/src/core/lib/surface/version.cc b/src/core/lib/surface/version.cc
index 66890ce65a..4829cc80a5 100644
--- a/src/core/lib/surface/version.cc
+++ b/src/core/lib/surface/version.cc
@@ -25,4 +25,4 @@
const char* grpc_version_string(void) { return "7.0.0-dev"; }
-const char* grpc_g_stands_for(void) { return "gizmo"; }
+const char* grpc_g_stands_for(void) { return "goose"; }
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 0bcbb32d1f..f6e8bbf205 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -31,9 +31,11 @@
#include "src/core/lib/transport/static_metadata.h"
typedef struct grpc_linked_mdelem {
+ grpc_linked_mdelem() {}
+
grpc_mdelem md;
- struct grpc_linked_mdelem* next;
- struct grpc_linked_mdelem* prev;
+ struct grpc_linked_mdelem* next = nullptr;
+ struct grpc_linked_mdelem* prev = nullptr;
void* reserved;
} grpc_linked_mdelem;
diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc
index 4ebe73f82a..3dfaaaad5c 100644
--- a/src/core/lib/transport/static_metadata.cc
+++ b/src/core/lib/transport/static_metadata.cc
@@ -65,51 +65,56 @@ static uint8_t g_bytes[] = {
97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111,
97, 100, 47, 103, 114, 112, 99, 46, 104, 101, 97, 108, 116, 104, 46,
118, 49, 46, 72, 101, 97, 108, 116, 104, 47, 87, 97, 116, 99, 104,
- 100, 101, 102, 108, 97, 116, 101, 103, 122, 105, 112, 115, 116, 114, 101,
- 97, 109, 47, 103, 122, 105, 112, 71, 69, 84, 80, 79, 83, 84, 47,
- 47, 105, 110, 100, 101, 120, 46, 104, 116, 109, 108, 104, 116, 116, 112,
- 104, 116, 116, 112, 115, 50, 48, 48, 50, 48, 52, 50, 48, 54, 51,
- 48, 52, 52, 48, 48, 52, 48, 52, 53, 48, 48, 97, 99, 99, 101,
- 112, 116, 45, 99, 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44,
- 32, 100, 101, 102, 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45,
- 108, 97, 110, 103, 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45,
- 114, 97, 110, 103, 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99,
- 101, 115, 115, 45, 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108,
- 111, 119, 45, 111, 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108,
- 111, 119, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110,
- 99, 97, 99, 104, 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111,
- 110, 116, 101, 110, 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105,
- 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117,
- 97, 103, 101, 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103,
- 116, 104, 99, 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116,
- 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103,
- 101, 99, 111, 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103,
- 101, 120, 112, 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114,
- 111, 109, 105, 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111,
- 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45,
- 110, 111, 110, 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97,
- 110, 103, 101, 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101,
- 100, 45, 115, 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100,
- 105, 102, 105, 101, 100, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105,
- 111, 110, 109, 97, 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112,
- 114, 111, 120, 121, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97,
- 116, 101, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105,
- 122, 97, 116, 105, 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101,
- 114, 101, 114, 114, 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121,
- 45, 97, 102, 116, 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116,
- 45, 99, 111, 111, 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116,
- 114, 97, 110, 115, 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105,
- 116, 121, 116, 114, 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111,
- 100, 105, 110, 103, 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45,
- 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 48, 105, 100,
- 101, 110, 116, 105, 116, 121, 116, 114, 97, 105, 108, 101, 114, 115, 97,
- 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99,
- 103, 114, 112, 99, 80, 85, 84, 108, 98, 45, 99, 111, 115, 116, 45,
- 98, 105, 110, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102,
- 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103, 122,
- 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112, 105,
- 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101,
- 44, 103, 122, 105, 112};
+ 47, 101, 110, 118, 111, 121, 46, 115, 101, 114, 118, 105, 99, 101, 46,
+ 100, 105, 115, 99, 111, 118, 101, 114, 121, 46, 118, 50, 46, 65, 103,
+ 103, 114, 101, 103, 97, 116, 101, 100, 68, 105, 115, 99, 111, 118, 101,
+ 114, 121, 83, 101, 114, 118, 105, 99, 101, 47, 83, 116, 114, 101, 97,
+ 109, 65, 103, 103, 114, 101, 103, 97, 116, 101, 100, 82, 101, 115, 111,
+ 117, 114, 99, 101, 115, 100, 101, 102, 108, 97, 116, 101, 103, 122, 105,
+ 112, 115, 116, 114, 101, 97, 109, 47, 103, 122, 105, 112, 71, 69, 84,
+ 80, 79, 83, 84, 47, 47, 105, 110, 100, 101, 120, 46, 104, 116, 109,
+ 108, 104, 116, 116, 112, 104, 116, 116, 112, 115, 50, 48, 48, 50, 48,
+ 52, 50, 48, 54, 51, 48, 52, 52, 48, 48, 52, 48, 52, 53, 48,
+ 48, 97, 99, 99, 101, 112, 116, 45, 99, 104, 97, 114, 115, 101, 116,
+ 103, 122, 105, 112, 44, 32, 100, 101, 102, 108, 97, 116, 101, 97, 99,
+ 99, 101, 112, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 97, 99,
+ 99, 101, 112, 116, 45, 114, 97, 110, 103, 101, 115, 97, 99, 99, 101,
+ 112, 116, 97, 99, 99, 101, 115, 115, 45, 99, 111, 110, 116, 114, 111,
+ 108, 45, 97, 108, 108, 111, 119, 45, 111, 114, 105, 103, 105, 110, 97,
+ 103, 101, 97, 108, 108, 111, 119, 97, 117, 116, 104, 111, 114, 105, 122,
+ 97, 116, 105, 111, 110, 99, 97, 99, 104, 101, 45, 99, 111, 110, 116,
+ 114, 111, 108, 99, 111, 110, 116, 101, 110, 116, 45, 100, 105, 115, 112,
+ 111, 115, 105, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, 45,
+ 108, 97, 110, 103, 117, 97, 103, 101, 99, 111, 110, 116, 101, 110, 116,
+ 45, 108, 101, 110, 103, 116, 104, 99, 111, 110, 116, 101, 110, 116, 45,
+ 108, 111, 99, 97, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116,
+ 45, 114, 97, 110, 103, 101, 99, 111, 111, 107, 105, 101, 100, 97, 116,
+ 101, 101, 116, 97, 103, 101, 120, 112, 101, 99, 116, 101, 120, 112, 105,
+ 114, 101, 115, 102, 114, 111, 109, 105, 102, 45, 109, 97, 116, 99, 104,
+ 105, 102, 45, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110,
+ 99, 101, 105, 102, 45, 110, 111, 110, 101, 45, 109, 97, 116, 99, 104,
+ 105, 102, 45, 114, 97, 110, 103, 101, 105, 102, 45, 117, 110, 109, 111,
+ 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 108, 97, 115,
+ 116, 45, 109, 111, 100, 105, 102, 105, 101, 100, 108, 105, 110, 107, 108,
+ 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111, 114, 119,
+ 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, 101,
+ 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45, 97, 117,
+ 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97, 110, 103,
+ 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101, 115, 104,
+ 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101, 114, 118,
+ 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115, 116, 114,
+ 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116, 45, 115,
+ 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102, 101, 114,
+ 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121, 118, 105,
+ 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97,
+ 116, 101, 48, 105, 100, 101, 110, 116, 105, 116, 121, 116, 114, 97, 105,
+ 108, 101, 114, 115, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110,
+ 47, 103, 114, 112, 99, 103, 114, 112, 99, 80, 85, 84, 108, 98, 45,
+ 99, 111, 115, 116, 45, 98, 105, 110, 105, 100, 101, 110, 116, 105, 116,
+ 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105,
+ 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44,
+ 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101,
+ 102, 108, 97, 116, 101, 44, 103, 122, 105, 112};
static void static_ref(void* unused) {}
static void static_unref(void* unused) {}
@@ -227,6 +232,7 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
{&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
};
const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
@@ -266,76 +272,77 @@ const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
{&grpc_static_metadata_refcounts[33], {{g_bytes + 415, 31}}},
{&grpc_static_metadata_refcounts[34], {{g_bytes + 446, 36}}},
{&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 28}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 7}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}},
- {&grpc_static_metadata_refcounts[38], {{g_bytes + 521, 11}}},
- {&grpc_static_metadata_refcounts[39], {{g_bytes + 532, 3}}},
- {&grpc_static_metadata_refcounts[40], {{g_bytes + 535, 4}}},
- {&grpc_static_metadata_refcounts[41], {{g_bytes + 539, 1}}},
- {&grpc_static_metadata_refcounts[42], {{g_bytes + 540, 11}}},
- {&grpc_static_metadata_refcounts[43], {{g_bytes + 551, 4}}},
- {&grpc_static_metadata_refcounts[44], {{g_bytes + 555, 5}}},
- {&grpc_static_metadata_refcounts[45], {{g_bytes + 560, 3}}},
- {&grpc_static_metadata_refcounts[46], {{g_bytes + 563, 3}}},
- {&grpc_static_metadata_refcounts[47], {{g_bytes + 566, 3}}},
- {&grpc_static_metadata_refcounts[48], {{g_bytes + 569, 3}}},
- {&grpc_static_metadata_refcounts[49], {{g_bytes + 572, 3}}},
- {&grpc_static_metadata_refcounts[50], {{g_bytes + 575, 3}}},
- {&grpc_static_metadata_refcounts[51], {{g_bytes + 578, 3}}},
- {&grpc_static_metadata_refcounts[52], {{g_bytes + 581, 14}}},
- {&grpc_static_metadata_refcounts[53], {{g_bytes + 595, 13}}},
- {&grpc_static_metadata_refcounts[54], {{g_bytes + 608, 15}}},
- {&grpc_static_metadata_refcounts[55], {{g_bytes + 623, 13}}},
- {&grpc_static_metadata_refcounts[56], {{g_bytes + 636, 6}}},
- {&grpc_static_metadata_refcounts[57], {{g_bytes + 642, 27}}},
- {&grpc_static_metadata_refcounts[58], {{g_bytes + 669, 3}}},
- {&grpc_static_metadata_refcounts[59], {{g_bytes + 672, 5}}},
- {&grpc_static_metadata_refcounts[60], {{g_bytes + 677, 13}}},
- {&grpc_static_metadata_refcounts[61], {{g_bytes + 690, 13}}},
- {&grpc_static_metadata_refcounts[62], {{g_bytes + 703, 19}}},
- {&grpc_static_metadata_refcounts[63], {{g_bytes + 722, 16}}},
- {&grpc_static_metadata_refcounts[64], {{g_bytes + 738, 14}}},
- {&grpc_static_metadata_refcounts[65], {{g_bytes + 752, 16}}},
- {&grpc_static_metadata_refcounts[66], {{g_bytes + 768, 13}}},
- {&grpc_static_metadata_refcounts[67], {{g_bytes + 781, 6}}},
- {&grpc_static_metadata_refcounts[68], {{g_bytes + 787, 4}}},
- {&grpc_static_metadata_refcounts[69], {{g_bytes + 791, 4}}},
- {&grpc_static_metadata_refcounts[70], {{g_bytes + 795, 6}}},
- {&grpc_static_metadata_refcounts[71], {{g_bytes + 801, 7}}},
- {&grpc_static_metadata_refcounts[72], {{g_bytes + 808, 4}}},
- {&grpc_static_metadata_refcounts[73], {{g_bytes + 812, 8}}},
- {&grpc_static_metadata_refcounts[74], {{g_bytes + 820, 17}}},
- {&grpc_static_metadata_refcounts[75], {{g_bytes + 837, 13}}},
- {&grpc_static_metadata_refcounts[76], {{g_bytes + 850, 8}}},
- {&grpc_static_metadata_refcounts[77], {{g_bytes + 858, 19}}},
- {&grpc_static_metadata_refcounts[78], {{g_bytes + 877, 13}}},
- {&grpc_static_metadata_refcounts[79], {{g_bytes + 890, 4}}},
- {&grpc_static_metadata_refcounts[80], {{g_bytes + 894, 8}}},
- {&grpc_static_metadata_refcounts[81], {{g_bytes + 902, 12}}},
- {&grpc_static_metadata_refcounts[82], {{g_bytes + 914, 18}}},
- {&grpc_static_metadata_refcounts[83], {{g_bytes + 932, 19}}},
- {&grpc_static_metadata_refcounts[84], {{g_bytes + 951, 5}}},
- {&grpc_static_metadata_refcounts[85], {{g_bytes + 956, 7}}},
- {&grpc_static_metadata_refcounts[86], {{g_bytes + 963, 7}}},
- {&grpc_static_metadata_refcounts[87], {{g_bytes + 970, 11}}},
- {&grpc_static_metadata_refcounts[88], {{g_bytes + 981, 6}}},
- {&grpc_static_metadata_refcounts[89], {{g_bytes + 987, 10}}},
- {&grpc_static_metadata_refcounts[90], {{g_bytes + 997, 25}}},
- {&grpc_static_metadata_refcounts[91], {{g_bytes + 1022, 17}}},
- {&grpc_static_metadata_refcounts[92], {{g_bytes + 1039, 4}}},
- {&grpc_static_metadata_refcounts[93], {{g_bytes + 1043, 3}}},
- {&grpc_static_metadata_refcounts[94], {{g_bytes + 1046, 16}}},
- {&grpc_static_metadata_refcounts[95], {{g_bytes + 1062, 1}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}},
- {&grpc_static_metadata_refcounts[97], {{g_bytes + 1071, 8}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1079, 16}}},
- {&grpc_static_metadata_refcounts[99], {{g_bytes + 1095, 4}}},
- {&grpc_static_metadata_refcounts[100], {{g_bytes + 1099, 3}}},
- {&grpc_static_metadata_refcounts[101], {{g_bytes + 1102, 11}}},
- {&grpc_static_metadata_refcounts[102], {{g_bytes + 1113, 16}}},
- {&grpc_static_metadata_refcounts[103], {{g_bytes + 1129, 13}}},
- {&grpc_static_metadata_refcounts[104], {{g_bytes + 1142, 12}}},
- {&grpc_static_metadata_refcounts[105], {{g_bytes + 1154, 21}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 80}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 601, 11}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 612, 3}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 615, 4}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 619, 1}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 620, 11}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 631, 4}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 635, 5}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 640, 3}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 643, 3}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 646, 3}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 649, 3}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 652, 3}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 655, 3}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 658, 3}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 661, 14}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 675, 13}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 688, 15}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 703, 13}}},
+ {&grpc_static_metadata_refcounts[57], {{g_bytes + 716, 6}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 722, 27}}},
+ {&grpc_static_metadata_refcounts[59], {{g_bytes + 749, 3}}},
+ {&grpc_static_metadata_refcounts[60], {{g_bytes + 752, 5}}},
+ {&grpc_static_metadata_refcounts[61], {{g_bytes + 757, 13}}},
+ {&grpc_static_metadata_refcounts[62], {{g_bytes + 770, 13}}},
+ {&grpc_static_metadata_refcounts[63], {{g_bytes + 783, 19}}},
+ {&grpc_static_metadata_refcounts[64], {{g_bytes + 802, 16}}},
+ {&grpc_static_metadata_refcounts[65], {{g_bytes + 818, 14}}},
+ {&grpc_static_metadata_refcounts[66], {{g_bytes + 832, 16}}},
+ {&grpc_static_metadata_refcounts[67], {{g_bytes + 848, 13}}},
+ {&grpc_static_metadata_refcounts[68], {{g_bytes + 861, 6}}},
+ {&grpc_static_metadata_refcounts[69], {{g_bytes + 867, 4}}},
+ {&grpc_static_metadata_refcounts[70], {{g_bytes + 871, 4}}},
+ {&grpc_static_metadata_refcounts[71], {{g_bytes + 875, 6}}},
+ {&grpc_static_metadata_refcounts[72], {{g_bytes + 881, 7}}},
+ {&grpc_static_metadata_refcounts[73], {{g_bytes + 888, 4}}},
+ {&grpc_static_metadata_refcounts[74], {{g_bytes + 892, 8}}},
+ {&grpc_static_metadata_refcounts[75], {{g_bytes + 900, 17}}},
+ {&grpc_static_metadata_refcounts[76], {{g_bytes + 917, 13}}},
+ {&grpc_static_metadata_refcounts[77], {{g_bytes + 930, 8}}},
+ {&grpc_static_metadata_refcounts[78], {{g_bytes + 938, 19}}},
+ {&grpc_static_metadata_refcounts[79], {{g_bytes + 957, 13}}},
+ {&grpc_static_metadata_refcounts[80], {{g_bytes + 970, 4}}},
+ {&grpc_static_metadata_refcounts[81], {{g_bytes + 974, 8}}},
+ {&grpc_static_metadata_refcounts[82], {{g_bytes + 982, 12}}},
+ {&grpc_static_metadata_refcounts[83], {{g_bytes + 994, 18}}},
+ {&grpc_static_metadata_refcounts[84], {{g_bytes + 1012, 19}}},
+ {&grpc_static_metadata_refcounts[85], {{g_bytes + 1031, 5}}},
+ {&grpc_static_metadata_refcounts[86], {{g_bytes + 1036, 7}}},
+ {&grpc_static_metadata_refcounts[87], {{g_bytes + 1043, 7}}},
+ {&grpc_static_metadata_refcounts[88], {{g_bytes + 1050, 11}}},
+ {&grpc_static_metadata_refcounts[89], {{g_bytes + 1061, 6}}},
+ {&grpc_static_metadata_refcounts[90], {{g_bytes + 1067, 10}}},
+ {&grpc_static_metadata_refcounts[91], {{g_bytes + 1077, 25}}},
+ {&grpc_static_metadata_refcounts[92], {{g_bytes + 1102, 17}}},
+ {&grpc_static_metadata_refcounts[93], {{g_bytes + 1119, 4}}},
+ {&grpc_static_metadata_refcounts[94], {{g_bytes + 1123, 3}}},
+ {&grpc_static_metadata_refcounts[95], {{g_bytes + 1126, 16}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1142, 1}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1151, 8}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1159, 16}}},
+ {&grpc_static_metadata_refcounts[100], {{g_bytes + 1175, 4}}},
+ {&grpc_static_metadata_refcounts[101], {{g_bytes + 1179, 3}}},
+ {&grpc_static_metadata_refcounts[102], {{g_bytes + 1182, 11}}},
+ {&grpc_static_metadata_refcounts[103], {{g_bytes + 1193, 16}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}},
+ {&grpc_static_metadata_refcounts[105], {{g_bytes + 1222, 12}}},
+ {&grpc_static_metadata_refcounts[106], {{g_bytes + 1234, 21}}},
};
uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
@@ -345,17 +352,17 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
static const int8_t elems_r[] = {
- 16, 11, -8, 0, 3, -42, -81, -43, 0, 6, -8, 0, 0, 0, -7,
- -3, -10, 0, 0, 0, -1, -2, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, -63, 0, -47, -68, -69, -70, 0, 33,
- 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 20,
- 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
- 4, 4, 4, 3, 10, 9, 0, 0, 0, 0, 0, 0, -3, 0};
+ 15, 10, -8, 0, 2, -42, -81, -43, 0, 6, -8, 0, 0, 0, 2,
+ -3, -10, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, -64, 0, -67, -68, -69, -70, 0,
+ 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21,
+ 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 5, 4, 4, 8, 8, 0, 0, 0, 0, 0, 0, -5, 0};
static uint32_t elems_phash(uint32_t i) {
- i -= 41;
- uint32_t x = i % 104;
- uint32_t y = i / 104;
+ i -= 42;
+ uint32_t x = i % 105;
+ uint32_t y = i / 105;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
uint32_t delta = (uint32_t)elems_r[y];
@@ -365,29 +372,29 @@ static uint32_t elems_phash(uint32_t i) {
}
static const uint16_t elem_keys[] = {
- 257, 258, 259, 260, 261, 262, 263, 1096, 1097, 1513, 1725, 145,
- 146, 467, 468, 1619, 41, 42, 1733, 990, 991, 767, 768, 1627,
- 627, 837, 2043, 2149, 2255, 5541, 5859, 5965, 6071, 6177, 1749, 6283,
- 6389, 6495, 6601, 6707, 6813, 6919, 7025, 7131, 7237, 7343, 7449, 7555,
- 7661, 5753, 7767, 7873, 7979, 8085, 8191, 8297, 8403, 8509, 8615, 8721,
- 8827, 8933, 9039, 9145, 9251, 9357, 9463, 1156, 9569, 523, 9675, 9781,
- 206, 1162, 1163, 1164, 1165, 1792, 1582, 1050, 9887, 9993, 1686, 10735,
- 1799, 0, 0, 0, 0, 0, 347, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 260, 261, 262, 263, 264, 265, 266, 1107, 1108, 1741, 147, 148,
+ 472, 473, 1634, 42, 43, 1527, 1750, 1000, 1001, 774, 775, 1643,
+ 633, 845, 2062, 2169, 2276, 5700, 5914, 6021, 6128, 6235, 1766, 6342,
+ 6449, 6556, 6663, 6770, 6877, 6984, 7091, 7198, 7305, 7412, 7519, 7626,
+ 7733, 7840, 7947, 8054, 8161, 8268, 8375, 8482, 8589, 8696, 8803, 8910,
+ 9017, 9124, 9231, 9338, 9445, 9552, 9659, 1167, 528, 9766, 9873, 208,
+ 9980, 1173, 1174, 1175, 1176, 1809, 10087, 1060, 10194, 10943, 1702, 0,
+ 1816, 0, 0, 1597, 0, 0, 350, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0};
static const uint8_t elem_idxs[] = {
- 7, 8, 9, 10, 11, 12, 13, 77, 79, 30, 71, 1, 2, 5, 6, 25,
- 3, 4, 84, 66, 65, 62, 63, 73, 67, 61, 57, 37, 74, 14, 17, 18,
- 19, 20, 15, 21, 22, 23, 24, 26, 27, 28, 29, 31, 32, 33, 34, 35,
- 36, 16, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- 52, 53, 54, 76, 55, 69, 56, 58, 70, 78, 80, 81, 82, 83, 68, 64,
- 59, 60, 72, 75, 85, 255, 255, 255, 255, 255, 0};
+ 7, 8, 9, 10, 11, 12, 13, 77, 79, 71, 1, 2, 5, 6, 25, 3,
+ 4, 30, 84, 66, 65, 62, 63, 73, 67, 61, 57, 37, 74, 14, 16, 17,
+ 18, 19, 15, 20, 21, 22, 23, 24, 26, 27, 28, 29, 31, 32, 33, 34,
+ 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 76, 69, 55, 56, 70, 58, 78, 80, 81, 82, 83, 59, 64,
+ 60, 75, 72, 255, 85, 255, 255, 68, 255, 255, 0};
grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = (uint32_t)(a * 106 + b);
+ uint32_t k = (uint32_t)(a * 107 + b);
uint32_t h = elems_phash(k);
return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
elem_idxs[h] != 255
@@ -400,175 +407,175 @@ grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
{{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[39], {{g_bytes + 532, 3}}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 612, 3}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[40], {{g_bytes + 535, 4}}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 615, 4}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
- {&grpc_static_metadata_refcounts[41], {{g_bytes + 539, 1}}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 619, 1}}}},
{{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
- {&grpc_static_metadata_refcounts[42], {{g_bytes + 540, 11}}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 620, 11}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[43], {{g_bytes + 551, 4}}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 631, 4}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[44], {{g_bytes + 555, 5}}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 635, 5}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[45], {{g_bytes + 560, 3}}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 640, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[46], {{g_bytes + 563, 3}}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 643, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[47], {{g_bytes + 566, 3}}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 646, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[48], {{g_bytes + 569, 3}}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 649, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[49], {{g_bytes + 572, 3}}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 652, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[50], {{g_bytes + 575, 3}}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 655, 3}}}},
{{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
- {&grpc_static_metadata_refcounts[51], {{g_bytes + 578, 3}}}},
- {{&grpc_static_metadata_refcounts[52], {{g_bytes + 581, 14}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 658, 3}}}},
+ {{&grpc_static_metadata_refcounts[53], {{g_bytes + 661, 14}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[53], {{g_bytes + 595, 13}}}},
- {{&grpc_static_metadata_refcounts[54], {{g_bytes + 608, 15}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 675, 13}}}},
+ {{&grpc_static_metadata_refcounts[55], {{g_bytes + 688, 15}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[55], {{g_bytes + 623, 13}}},
+ {{&grpc_static_metadata_refcounts[56], {{g_bytes + 703, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[56], {{g_bytes + 636, 6}}},
+ {{&grpc_static_metadata_refcounts[57], {{g_bytes + 716, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[57], {{g_bytes + 642, 27}}},
+ {{&grpc_static_metadata_refcounts[58], {{g_bytes + 722, 27}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[58], {{g_bytes + 669, 3}}},
+ {{&grpc_static_metadata_refcounts[59], {{g_bytes + 749, 3}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[59], {{g_bytes + 672, 5}}},
+ {{&grpc_static_metadata_refcounts[60], {{g_bytes + 752, 5}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[60], {{g_bytes + 677, 13}}},
+ {{&grpc_static_metadata_refcounts[61], {{g_bytes + 757, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[61], {{g_bytes + 690, 13}}},
+ {{&grpc_static_metadata_refcounts[62], {{g_bytes + 770, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[62], {{g_bytes + 703, 19}}},
+ {{&grpc_static_metadata_refcounts[63], {{g_bytes + 783, 19}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[63], {{g_bytes + 722, 16}}},
+ {{&grpc_static_metadata_refcounts[64], {{g_bytes + 802, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[64], {{g_bytes + 738, 14}}},
+ {{&grpc_static_metadata_refcounts[65], {{g_bytes + 818, 14}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[65], {{g_bytes + 752, 16}}},
+ {{&grpc_static_metadata_refcounts[66], {{g_bytes + 832, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[66], {{g_bytes + 768, 13}}},
+ {{&grpc_static_metadata_refcounts[67], {{g_bytes + 848, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[67], {{g_bytes + 781, 6}}},
+ {{&grpc_static_metadata_refcounts[68], {{g_bytes + 861, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[68], {{g_bytes + 787, 4}}},
+ {{&grpc_static_metadata_refcounts[69], {{g_bytes + 867, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[69], {{g_bytes + 791, 4}}},
+ {{&grpc_static_metadata_refcounts[70], {{g_bytes + 871, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[70], {{g_bytes + 795, 6}}},
+ {{&grpc_static_metadata_refcounts[71], {{g_bytes + 875, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[71], {{g_bytes + 801, 7}}},
+ {{&grpc_static_metadata_refcounts[72], {{g_bytes + 881, 7}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[72], {{g_bytes + 808, 4}}},
+ {{&grpc_static_metadata_refcounts[73], {{g_bytes + 888, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[73], {{g_bytes + 812, 8}}},
+ {{&grpc_static_metadata_refcounts[74], {{g_bytes + 892, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[74], {{g_bytes + 820, 17}}},
+ {{&grpc_static_metadata_refcounts[75], {{g_bytes + 900, 17}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[75], {{g_bytes + 837, 13}}},
+ {{&grpc_static_metadata_refcounts[76], {{g_bytes + 917, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[76], {{g_bytes + 850, 8}}},
+ {{&grpc_static_metadata_refcounts[77], {{g_bytes + 930, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[77], {{g_bytes + 858, 19}}},
+ {{&grpc_static_metadata_refcounts[78], {{g_bytes + 938, 19}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[78], {{g_bytes + 877, 13}}},
+ {{&grpc_static_metadata_refcounts[79], {{g_bytes + 957, 13}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[79], {{g_bytes + 890, 4}}},
+ {{&grpc_static_metadata_refcounts[80], {{g_bytes + 970, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[80], {{g_bytes + 894, 8}}},
+ {{&grpc_static_metadata_refcounts[81], {{g_bytes + 974, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[81], {{g_bytes + 902, 12}}},
+ {{&grpc_static_metadata_refcounts[82], {{g_bytes + 982, 12}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[82], {{g_bytes + 914, 18}}},
+ {{&grpc_static_metadata_refcounts[83], {{g_bytes + 994, 18}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[83], {{g_bytes + 932, 19}}},
+ {{&grpc_static_metadata_refcounts[84], {{g_bytes + 1012, 19}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[84], {{g_bytes + 951, 5}}},
+ {{&grpc_static_metadata_refcounts[85], {{g_bytes + 1031, 5}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[85], {{g_bytes + 956, 7}}},
+ {{&grpc_static_metadata_refcounts[86], {{g_bytes + 1036, 7}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[86], {{g_bytes + 963, 7}}},
+ {{&grpc_static_metadata_refcounts[87], {{g_bytes + 1043, 7}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[87], {{g_bytes + 970, 11}}},
+ {{&grpc_static_metadata_refcounts[88], {{g_bytes + 1050, 11}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[88], {{g_bytes + 981, 6}}},
+ {{&grpc_static_metadata_refcounts[89], {{g_bytes + 1061, 6}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[89], {{g_bytes + 987, 10}}},
+ {{&grpc_static_metadata_refcounts[90], {{g_bytes + 1067, 10}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[90], {{g_bytes + 997, 25}}},
+ {{&grpc_static_metadata_refcounts[91], {{g_bytes + 1077, 25}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[91], {{g_bytes + 1022, 17}}},
+ {{&grpc_static_metadata_refcounts[92], {{g_bytes + 1102, 17}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[92], {{g_bytes + 1039, 4}}},
+ {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1119, 4}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1043, 3}}},
+ {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1123, 3}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1046, 16}}},
+ {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1126, 16}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
- {&grpc_static_metadata_refcounts[95], {{g_bytes + 1062, 1}}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1142, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}}},
{{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
{&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 7}}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}}},
{{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
- {&grpc_static_metadata_refcounts[97], {{g_bytes + 1071, 8}}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1151, 8}}}},
{{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
- {&grpc_static_metadata_refcounts[98], {{g_bytes + 1079, 16}}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1159, 16}}}},
{{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
- {&grpc_static_metadata_refcounts[99], {{g_bytes + 1095, 4}}}},
+ {&grpc_static_metadata_refcounts[100], {{g_bytes + 1175, 4}}}},
{{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
- {&grpc_static_metadata_refcounts[100], {{g_bytes + 1099, 3}}}},
+ {&grpc_static_metadata_refcounts[101], {{g_bytes + 1179, 3}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
- {{&grpc_static_metadata_refcounts[101], {{g_bytes + 1102, 11}}},
+ {{&grpc_static_metadata_refcounts[102], {{g_bytes + 1182, 11}}},
{&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 7}}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[102], {{g_bytes + 1113, 16}}}},
+ {&grpc_static_metadata_refcounts[103], {{g_bytes + 1193, 16}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[103], {{g_bytes + 1129, 13}}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[104], {{g_bytes + 1142, 12}}}},
+ {&grpc_static_metadata_refcounts[105], {{g_bytes + 1222, 12}}}},
{{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
- {&grpc_static_metadata_refcounts[105], {{g_bytes + 1154, 21}}}},
+ {&grpc_static_metadata_refcounts[106], {{g_bytes + 1234, 21}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[96], {{g_bytes + 1063, 8}}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[37], {{g_bytes + 517, 4}}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
{{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
- {&grpc_static_metadata_refcounts[103], {{g_bytes + 1129, 13}}}},
+ {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}}},
};
const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
79, 80, 81, 82};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 2bb9f72838..4f9670232c 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -31,7 +31,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 106
+#define GRPC_STATIC_MDSTR_COUNT 107
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
@@ -110,147 +110,151 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* "/grpc.health.v1.Health/Watch" */
#define GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH \
(grpc_static_slice_table[35])
+/* "/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources"
+ */
+#define GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_DISCOVERY_DOT_V2_DOT_AGGREGATEDDISCOVERYSERVICE_SLASH_STREAMAGGREGATEDRESOURCES \
+ (grpc_static_slice_table[36])
/* "deflate" */
-#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[36])
+#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[37])
/* "gzip" */
-#define GRPC_MDSTR_GZIP (grpc_static_slice_table[37])
+#define GRPC_MDSTR_GZIP (grpc_static_slice_table[38])
/* "stream/gzip" */
-#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[38])
+#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[39])
/* "GET" */
-#define GRPC_MDSTR_GET (grpc_static_slice_table[39])
+#define GRPC_MDSTR_GET (grpc_static_slice_table[40])
/* "POST" */
-#define GRPC_MDSTR_POST (grpc_static_slice_table[40])
+#define GRPC_MDSTR_POST (grpc_static_slice_table[41])
/* "/" */
-#define GRPC_MDSTR_SLASH (grpc_static_slice_table[41])
+#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[42])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43])
/* "http" */
-#define GRPC_MDSTR_HTTP (grpc_static_slice_table[43])
+#define GRPC_MDSTR_HTTP (grpc_static_slice_table[44])
/* "https" */
-#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[44])
+#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[45])
/* "200" */
-#define GRPC_MDSTR_200 (grpc_static_slice_table[45])
+#define GRPC_MDSTR_200 (grpc_static_slice_table[46])
/* "204" */
-#define GRPC_MDSTR_204 (grpc_static_slice_table[46])
+#define GRPC_MDSTR_204 (grpc_static_slice_table[47])
/* "206" */
-#define GRPC_MDSTR_206 (grpc_static_slice_table[47])
+#define GRPC_MDSTR_206 (grpc_static_slice_table[48])
/* "304" */
-#define GRPC_MDSTR_304 (grpc_static_slice_table[48])
+#define GRPC_MDSTR_304 (grpc_static_slice_table[49])
/* "400" */
-#define GRPC_MDSTR_400 (grpc_static_slice_table[49])
+#define GRPC_MDSTR_400 (grpc_static_slice_table[50])
/* "404" */
-#define GRPC_MDSTR_404 (grpc_static_slice_table[50])
+#define GRPC_MDSTR_404 (grpc_static_slice_table[51])
/* "500" */
-#define GRPC_MDSTR_500 (grpc_static_slice_table[51])
+#define GRPC_MDSTR_500 (grpc_static_slice_table[52])
/* "accept-charset" */
-#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[52])
+#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[53])
/* "gzip, deflate" */
-#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[53])
+#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[54])
/* "accept-language" */
-#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[54])
+#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[55])
/* "accept-ranges" */
-#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[55])
+#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[56])
/* "accept" */
-#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[56])
+#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[57])
/* "access-control-allow-origin" */
-#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[57])
+#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[58])
/* "age" */
-#define GRPC_MDSTR_AGE (grpc_static_slice_table[58])
+#define GRPC_MDSTR_AGE (grpc_static_slice_table[59])
/* "allow" */
-#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[59])
+#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[60])
/* "authorization" */
-#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[60])
+#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[61])
/* "cache-control" */
-#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[61])
+#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[62])
/* "content-disposition" */
-#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[62])
+#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[63])
/* "content-language" */
-#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[63])
+#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[64])
/* "content-length" */
-#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[64])
+#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[65])
/* "content-location" */
-#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[65])
+#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[66])
/* "content-range" */
-#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[66])
+#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[67])
/* "cookie" */
-#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[67])
+#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[68])
/* "date" */
-#define GRPC_MDSTR_DATE (grpc_static_slice_table[68])
+#define GRPC_MDSTR_DATE (grpc_static_slice_table[69])
/* "etag" */
-#define GRPC_MDSTR_ETAG (grpc_static_slice_table[69])
+#define GRPC_MDSTR_ETAG (grpc_static_slice_table[70])
/* "expect" */
-#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[70])
+#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[71])
/* "expires" */
-#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[71])
+#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[72])
/* "from" */
-#define GRPC_MDSTR_FROM (grpc_static_slice_table[72])
+#define GRPC_MDSTR_FROM (grpc_static_slice_table[73])
/* "if-match" */
-#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[73])
+#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[74])
/* "if-modified-since" */
-#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[74])
+#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[75])
/* "if-none-match" */
-#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[75])
+#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[76])
/* "if-range" */
-#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[76])
+#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[77])
/* "if-unmodified-since" */
-#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[77])
+#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[78])
/* "last-modified" */
-#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[78])
+#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[79])
/* "link" */
-#define GRPC_MDSTR_LINK (grpc_static_slice_table[79])
+#define GRPC_MDSTR_LINK (grpc_static_slice_table[80])
/* "location" */
-#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[80])
+#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[81])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[81])
+#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[82])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[82])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[83])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[83])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[84])
/* "range" */
-#define GRPC_MDSTR_RANGE (grpc_static_slice_table[84])
+#define GRPC_MDSTR_RANGE (grpc_static_slice_table[85])
/* "referer" */
-#define GRPC_MDSTR_REFERER (grpc_static_slice_table[85])
+#define GRPC_MDSTR_REFERER (grpc_static_slice_table[86])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[86])
+#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[87])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[87])
+#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[88])
/* "server" */
-#define GRPC_MDSTR_SERVER (grpc_static_slice_table[88])
+#define GRPC_MDSTR_SERVER (grpc_static_slice_table[89])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[89])
+#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[90])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[90])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[91])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[91])
+#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[92])
/* "vary" */
-#define GRPC_MDSTR_VARY (grpc_static_slice_table[92])
+#define GRPC_MDSTR_VARY (grpc_static_slice_table[93])
/* "via" */
-#define GRPC_MDSTR_VIA (grpc_static_slice_table[93])
+#define GRPC_MDSTR_VIA (grpc_static_slice_table[94])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[94])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[95])
/* "0" */
-#define GRPC_MDSTR_0 (grpc_static_slice_table[95])
+#define GRPC_MDSTR_0 (grpc_static_slice_table[96])
/* "identity" */
-#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[96])
+#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[97])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[97])
+#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[98])
/* "application/grpc" */
-#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[98])
+#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[99])
/* "grpc" */
-#define GRPC_MDSTR_GRPC (grpc_static_slice_table[99])
+#define GRPC_MDSTR_GRPC (grpc_static_slice_table[100])
/* "PUT" */
-#define GRPC_MDSTR_PUT (grpc_static_slice_table[100])
+#define GRPC_MDSTR_PUT (grpc_static_slice_table[101])
/* "lb-cost-bin" */
-#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[101])
+#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[102])
/* "identity,deflate" */
-#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[102])
+#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[103])
/* "identity,gzip" */
-#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[103])
+#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[104])
/* "deflate,gzip" */
-#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[104])
+#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[105])
/* "identity,deflate,gzip" */
#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \
- (grpc_static_slice_table[105])
+ (grpc_static_slice_table[106])
extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;
extern grpc_slice_refcount
diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc
index cbdb77c844..b32f9c6ec1 100644
--- a/src/core/lib/transport/transport.cc
+++ b/src/core/lib/transport/transport.cc
@@ -27,6 +27,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
+#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -149,7 +150,7 @@ void grpc_transport_move_stats(grpc_transport_stream_stats* from,
}
size_t grpc_transport_stream_size(grpc_transport* transport) {
- return transport->vtable->sizeof_stream;
+ return GPR_ROUND_UP_TO_ALIGNMENT_SIZE(transport->vtable->sizeof_stream);
}
void grpc_transport_destroy(grpc_transport* transport) {
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 9e784635c6..5ce568834e 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -81,16 +81,16 @@ void grpc_stream_unref(grpc_stream_refcount* refcount);
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount,
void* buffer, size_t length);
-typedef struct {
- uint64_t framing_bytes;
- uint64_t data_bytes;
- uint64_t header_bytes;
-} grpc_transport_one_way_stats;
+struct grpc_transport_one_way_stats {
+ uint64_t framing_bytes = 0;
+ uint64_t data_bytes = 0;
+ uint64_t header_bytes = 0;
+};
-typedef struct grpc_transport_stream_stats {
+struct grpc_transport_stream_stats {
grpc_transport_one_way_stats incoming;
grpc_transport_one_way_stats outgoing;
-} grpc_transport_stream_stats;
+};
void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from,
grpc_transport_one_way_stats* to);
@@ -121,7 +121,17 @@ typedef struct grpc_transport_stream_op_batch_payload
/* Transport stream op: a set of operations to perform on a transport
against a single stream */
-typedef struct grpc_transport_stream_op_batch {
+struct grpc_transport_stream_op_batch {
+ grpc_transport_stream_op_batch()
+ : send_initial_metadata(false),
+ send_trailing_metadata(false),
+ send_message(false),
+ recv_initial_metadata(false),
+ recv_message(false),
+ recv_trailing_metadata(false),
+ cancel_stream(false),
+ is_traced(false) {}
+
/** Should be scheduled when all of the non-recv operations in the batch
are complete.
@@ -131,10 +141,10 @@ typedef struct grpc_transport_stream_op_batch {
scheduled as soon as the non-recv ops are complete, regardless of
whether or not the recv ops are complete. If a batch contains
only recv ops, on_complete can be null. */
- grpc_closure* on_complete;
+ grpc_closure* on_complete = nullptr;
/** Values for the stream op (fields set are determined by flags above) */
- grpc_transport_stream_op_batch_payload* payload;
+ grpc_transport_stream_op_batch_payload* payload = nullptr;
/** Send initial metadata to the peer, from the provided metadata batch. */
bool send_initial_metadata : 1;
@@ -158,29 +168,41 @@ typedef struct grpc_transport_stream_op_batch {
/** Cancel this stream with the provided error */
bool cancel_stream : 1;
+ /** Is this stream traced */
+ bool is_traced : 1;
+
/***************************************************************************
* remaining fields are initialized and used at the discretion of the
* current handler of the op */
grpc_handler_private_op_data handler_private;
-} grpc_transport_stream_op_batch;
+};
struct grpc_transport_stream_op_batch_payload {
+ explicit grpc_transport_stream_op_batch_payload(
+ grpc_call_context_element* context)
+ : context(context) {}
+ ~grpc_transport_stream_op_batch_payload() {
+ // We don't really own `send_message`, so release ownership and let the
+ // owner clean the data.
+ send_message.send_message.release();
+ }
+
struct {
- grpc_metadata_batch* send_initial_metadata;
+ grpc_metadata_batch* send_initial_metadata = nullptr;
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
- uint32_t send_initial_metadata_flags;
+ uint32_t send_initial_metadata_flags = 0;
// If non-NULL, will be set by the transport to the peer string (a char*).
// The transport retains ownership of the string.
// Note: This pointer may be used by the transport after the
// send_initial_metadata op is completed. It must remain valid
// until the call is destroyed.
- gpr_atm* peer_string;
+ gpr_atm* peer_string = nullptr;
} send_initial_metadata;
struct {
- grpc_metadata_batch* send_trailing_metadata;
+ grpc_metadata_batch* send_trailing_metadata = nullptr;
} send_trailing_metadata;
struct {
@@ -192,39 +214,39 @@ struct grpc_transport_stream_op_batch_payload {
} send_message;
struct {
- grpc_metadata_batch* recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata = nullptr;
// Flags are used only on the server side. If non-null, will be set to
// a bitfield of the GRPC_INITIAL_METADATA_xxx macros (e.g., to
// indicate if the call is idempotent).
- uint32_t* recv_flags;
+ uint32_t* recv_flags = nullptr;
/** Should be enqueued when initial metadata is ready to be processed. */
- grpc_closure* recv_initial_metadata_ready;
+ grpc_closure* recv_initial_metadata_ready = nullptr;
// If not NULL, will be set to true if trailing metadata is
// immediately available. This may be a signal that we received a
// Trailers-Only response.
- bool* trailing_metadata_available;
+ bool* trailing_metadata_available = nullptr;
// If non-NULL, will be set by the transport to the peer string (a char*).
// The transport retains ownership of the string.
// Note: This pointer may be used by the transport after the
// recv_initial_metadata op is completed. It must remain valid
// until the call is destroyed.
- gpr_atm* peer_string;
+ gpr_atm* peer_string = nullptr;
} recv_initial_metadata;
struct {
// Will be set by the transport to point to the byte stream
// containing a received message.
// Will be NULL if trailing metadata is received instead of a message.
- grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
+ grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message = nullptr;
/** Should be enqueued when one message is ready to be processed. */
- grpc_closure* recv_message_ready;
+ grpc_closure* recv_message_ready = nullptr;
} recv_message;
struct {
- grpc_metadata_batch* recv_trailing_metadata;
- grpc_transport_stream_stats* collect_stats;
+ grpc_metadata_batch* recv_trailing_metadata = nullptr;
+ grpc_transport_stream_stats* collect_stats = nullptr;
/** Should be enqueued when initial metadata is ready to be processed. */
- grpc_closure* recv_trailing_metadata_ready;
+ grpc_closure* recv_trailing_metadata_ready = nullptr;
} recv_trailing_metadata;
/** Forcefully close this stream.
@@ -240,7 +262,7 @@ struct grpc_transport_stream_op_batch_payload {
struct {
// Error contract: the transport that gets this op must cause cancel_error
// to be unref'ed after processing it
- grpc_error* cancel_error;
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
} cancel_stream;
/* Indexes correspond to grpc_context_index enum values */
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/lib/uri/uri_parser.cc
index 0572034a9c..f212c7d2c0 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/lib/uri/uri_parser.cc
@@ -18,7 +18,7 @@
#include <grpc/support/port_platform.h>
-#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/uri/uri_parser.h"
#include <string.h>
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/lib/uri/uri_parser.h
index d749f23308..b6771bbde3 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/lib/uri/uri_parser.h
@@ -16,8 +16,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H
+#ifndef GRPC_CORE_LIB_URI_URI_PARSER_H
+#define GRPC_CORE_LIB_URI_URI_PARSER_H
#include <grpc/support/port_platform.h>
@@ -47,4 +47,4 @@ const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key);
/** destroy a uri */
void grpc_uri_destroy(grpc_uri* uri);
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */
+#endif /* GRPC_CORE_LIB_URI_URI_PARSER_H */
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.cc b/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
index c0c17b0a4b..92085d31d7 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
@@ -28,8 +28,6 @@ void grpc_deadline_filter_init(void);
void grpc_deadline_filter_shutdown(void);
void grpc_client_channel_init(void);
void grpc_client_channel_shutdown(void);
-void grpc_tsi_alts_init(void);
-void grpc_tsi_alts_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_http_filters_init,
@@ -40,6 +38,4 @@ void grpc_register_built_in_plugins(void) {
grpc_deadline_filter_shutdown);
grpc_register_plugin(grpc_client_channel_init,
grpc_client_channel_shutdown);
- grpc_register_plugin(grpc_tsi_alts_init,
- grpc_tsi_alts_shutdown);
}
diff --git a/src/core/plugin_registry/grpc_plugin_registry.cc b/src/core/plugin_registry/grpc_plugin_registry.cc
index 94c2493d5e..cde40ef65c 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.cc
+++ b/src/core/plugin_registry/grpc_plugin_registry.cc
@@ -28,8 +28,6 @@ void grpc_deadline_filter_init(void);
void grpc_deadline_filter_shutdown(void);
void grpc_client_channel_init(void);
void grpc_client_channel_shutdown(void);
-void grpc_tsi_alts_init(void);
-void grpc_tsi_alts_shutdown(void);
void grpc_inproc_plugin_init(void);
void grpc_inproc_plugin_shutdown(void);
void grpc_resolver_fake_init(void);
@@ -66,8 +64,6 @@ void grpc_register_built_in_plugins(void) {
grpc_deadline_filter_shutdown);
grpc_register_plugin(grpc_client_channel_init,
grpc_client_channel_shutdown);
- grpc_register_plugin(grpc_tsi_alts_init,
- grpc_tsi_alts_shutdown);
grpc_register_plugin(grpc_inproc_plugin_init,
grpc_inproc_plugin_shutdown);
grpc_register_plugin(grpc_resolver_fake_init,
diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc
index 17e8026096..43d0979f4b 100644
--- a/src/core/tsi/alts/handshaker/alts_handshaker_client.cc
+++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.cc
@@ -25,30 +25,172 @@
#include <grpc/support/log.h>
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel.h"
#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api.h"
+#include "src/core/tsi/alts/handshaker/alts_shared_resource.h"
+#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h"
+#include "src/core/tsi/alts/handshaker/alts_tsi_utils.h"
+
+#define TSI_ALTS_INITIAL_BUFFER_SIZE 256
const int kHandshakerClientOpNum = 4;
+struct alts_handshaker_client {
+ const alts_handshaker_client_vtable* vtable;
+};
+
typedef struct alts_grpc_handshaker_client {
alts_handshaker_client base;
+ alts_tsi_handshaker* handshaker;
grpc_call* call;
+ /* A pointer to a function handling the interaction with handshaker service.
+ * That is, it points to grpc_call_start_batch_and_execute when the handshaker
+ * client is used in a non-testing use case and points to a custom function
+ * that validates the data to be sent to handshaker service in a testing use
+ * case. */
alts_grpc_caller grpc_caller;
+ /* A callback function provided by gRPC to handle the response returned from
+ * handshaker service. It also serves to bring the control safely back to
+ * application when dedicated CQ and thread are used. */
+ grpc_iomgr_cb_func grpc_cb;
+ /* A gRPC closure to be scheduled when the response from handshaker service
+ * is received. It will be initialized with grpc_cb. */
+ grpc_closure on_handshaker_service_resp_recv;
+ /* Buffers containing information to be sent (or received) to (or from) the
+ * handshaker service. */
+ grpc_byte_buffer* send_buffer;
+ grpc_byte_buffer* recv_buffer;
+ grpc_status_code status;
+ /* Initial metadata to be received from handshaker service. */
+ grpc_metadata_array recv_initial_metadata;
+ /* A callback function provided by an application to be invoked when response
+ * is received from handshaker service. */
+ tsi_handshaker_on_next_done_cb cb;
+ void* user_data;
+ /* ALTS credential options passed in from the caller. */
+ grpc_alts_credentials_options* options;
+ /* target name information to be passed to handshaker service for server
+ * authorization check. */
+ grpc_slice target_name;
+ /* boolean flag indicating if the handshaker client is used at client
+ * (is_client = true) or server (is_client = false) side. */
+ bool is_client;
+ /* a temporary store for data received from handshaker service used to extract
+ * unused data. */
+ grpc_slice recv_bytes;
+ /* a buffer containing data to be sent to the grpc client or server's peer. */
+ unsigned char* buffer;
+ size_t buffer_size;
} alts_grpc_handshaker_client;
-static grpc_call_error grpc_start_batch(grpc_call* call, const grpc_op* ops,
- size_t nops, void* tag) {
- return grpc_call_start_batch(call, ops, nops, tag, nullptr);
+static void handshaker_client_send_buffer_destroy(
+ alts_grpc_handshaker_client* client) {
+ GPR_ASSERT(client != nullptr);
+ grpc_byte_buffer_destroy(client->send_buffer);
+ client->send_buffer = nullptr;
+}
+
+static bool is_handshake_finished_properly(grpc_gcp_handshaker_resp* resp) {
+ GPR_ASSERT(resp != nullptr);
+ if (resp->has_result) {
+ return true;
+ }
+ return false;
+}
+
+void alts_handshaker_client_handle_response(alts_handshaker_client* c,
+ bool is_ok) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ grpc_byte_buffer* recv_buffer = client->recv_buffer;
+ grpc_status_code status = client->status;
+ tsi_handshaker_on_next_done_cb cb = client->cb;
+ void* user_data = client->user_data;
+ alts_tsi_handshaker* handshaker = client->handshaker;
+
+ /* Invalid input check. */
+ if (cb == nullptr) {
+ gpr_log(GPR_ERROR,
+ "cb is nullptr in alts_tsi_handshaker_handle_response()");
+ return;
+ }
+ if (handshaker == nullptr) {
+ gpr_log(GPR_ERROR,
+ "handshaker is nullptr in alts_tsi_handshaker_handle_response()");
+ cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr);
+ return;
+ }
+ /* TSI handshake has been shutdown. */
+ if (alts_tsi_handshaker_has_shutdown(handshaker)) {
+ gpr_log(GPR_ERROR, "TSI handshake shutdown");
+ cb(TSI_HANDSHAKE_SHUTDOWN, user_data, nullptr, 0, nullptr);
+ return;
+ }
+ /* Failed grpc call check. */
+ if (!is_ok || status != GRPC_STATUS_OK) {
+ gpr_log(GPR_ERROR, "grpc call made to handshaker service failed");
+ cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr);
+ return;
+ }
+ if (recv_buffer == nullptr) {
+ gpr_log(GPR_ERROR,
+ "recv_buffer is nullptr in alts_tsi_handshaker_handle_response()");
+ cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr);
+ return;
+ }
+ grpc_gcp_handshaker_resp* resp =
+ alts_tsi_utils_deserialize_response(recv_buffer);
+ grpc_byte_buffer_destroy(client->recv_buffer);
+ client->recv_buffer = nullptr;
+ /* Invalid handshaker response check. */
+ if (resp == nullptr) {
+ gpr_log(GPR_ERROR, "alts_tsi_utils_deserialize_response() failed");
+ cb(TSI_DATA_CORRUPTED, user_data, nullptr, 0, nullptr);
+ return;
+ }
+ grpc_slice* slice = static_cast<grpc_slice*>(resp->out_frames.arg);
+ unsigned char* bytes_to_send = nullptr;
+ size_t bytes_to_send_size = 0;
+ if (slice != nullptr) {
+ bytes_to_send_size = GRPC_SLICE_LENGTH(*slice);
+ while (bytes_to_send_size > client->buffer_size) {
+ client->buffer_size *= 2;
+ client->buffer = static_cast<unsigned char*>(
+ gpr_realloc(client->buffer, client->buffer_size));
+ }
+ memcpy(client->buffer, GRPC_SLICE_START_PTR(*slice), bytes_to_send_size);
+ bytes_to_send = client->buffer;
+ }
+ tsi_handshaker_result* result = nullptr;
+ if (is_handshake_finished_properly(resp)) {
+ alts_tsi_handshaker_result_create(resp, client->is_client, &result);
+ alts_tsi_handshaker_result_set_unused_bytes(result, &client->recv_bytes,
+ resp->bytes_consumed);
+ }
+ grpc_status_code code = static_cast<grpc_status_code>(resp->status.code);
+ if (code != GRPC_STATUS_OK) {
+ grpc_slice* details = static_cast<grpc_slice*>(resp->status.details.arg);
+ if (details != nullptr) {
+ char* error_details = grpc_slice_to_c_string(*details);
+ gpr_log(GPR_ERROR, "Error from handshaker service:%s", error_details);
+ gpr_free(error_details);
+ }
+ }
+ grpc_gcp_handshaker_resp_destroy(resp);
+ cb(alts_tsi_utils_convert_to_tsi_result(code), user_data, bytes_to_send,
+ bytes_to_send_size, result);
}
/**
- * Populate grpc operation data with the fields of ALTS TSI event and make a
- * grpc call.
+ * Populate grpc operation data with the fields of ALTS handshaker client and
+ * make a grpc call.
*/
-static tsi_result make_grpc_call(alts_handshaker_client* client,
- alts_tsi_event* event, bool is_start) {
- GPR_ASSERT(client != nullptr && event != nullptr);
- alts_grpc_handshaker_client* grpc_client =
- reinterpret_cast<alts_grpc_handshaker_client*>(client);
+static tsi_result make_grpc_call(alts_handshaker_client* c, bool is_start) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
grpc_op ops[kHandshakerClientOpNum];
memset(ops, 0, sizeof(ops));
grpc_op* op = ops;
@@ -59,22 +201,22 @@ static tsi_result make_grpc_call(alts_handshaker_client* client,
GPR_ASSERT(op - ops <= kHandshakerClientOpNum);
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
- &event->initial_metadata;
+ &client->recv_initial_metadata;
op++;
GPR_ASSERT(op - ops <= kHandshakerClientOpNum);
}
op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message.send_message = event->send_buffer;
+ op->data.send_message.send_message = client->send_buffer;
op++;
GPR_ASSERT(op - ops <= kHandshakerClientOpNum);
op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message.recv_message = &event->recv_buffer;
+ op->data.recv_message.recv_message = &client->recv_buffer;
op++;
GPR_ASSERT(op - ops <= kHandshakerClientOpNum);
- GPR_ASSERT(grpc_client->grpc_caller != nullptr);
- if (grpc_client->grpc_caller(grpc_client->call, ops,
- static_cast<size_t>(op - ops),
- (void*)event) != GRPC_CALL_OK) {
+ GPR_ASSERT(client->grpc_caller != nullptr);
+ if (client->grpc_caller(client->call, ops, static_cast<size_t>(op - ops),
+ &client->on_handshaker_service_resp_recv) !=
+ GRPC_CALL_OK) {
gpr_log(GPR_ERROR, "Start batch operation failed");
return TSI_INTERNAL_ERROR;
}
@@ -82,7 +224,11 @@ static tsi_result make_grpc_call(alts_handshaker_client* client,
}
/* Create and populate a client_start handshaker request, then serialize it. */
-static grpc_byte_buffer* get_serialized_start_client(alts_tsi_event* event) {
+static grpc_byte_buffer* get_serialized_start_client(
+ alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
bool ok = true;
grpc_gcp_handshaker_req* req =
grpc_gcp_handshaker_req_create(CLIENT_START_REQ);
@@ -91,14 +237,14 @@ static grpc_byte_buffer* get_serialized_start_client(alts_tsi_event* event) {
ok &= grpc_gcp_handshaker_req_add_application_protocol(
req, ALTS_APPLICATION_PROTOCOL);
ok &= grpc_gcp_handshaker_req_add_record_protocol(req, ALTS_RECORD_PROTOCOL);
- grpc_gcp_rpc_protocol_versions* versions = &event->options->rpc_versions;
+ grpc_gcp_rpc_protocol_versions* versions = &client->options->rpc_versions;
ok &= grpc_gcp_handshaker_req_set_rpc_versions(
req, versions->max_rpc_version.major, versions->max_rpc_version.minor,
versions->min_rpc_version.major, versions->min_rpc_version.minor);
- char* target_name = grpc_slice_to_c_string(event->target_name);
+ char* target_name = grpc_slice_to_c_string(client->target_name);
ok &= grpc_gcp_handshaker_req_set_target_name(req, target_name);
target_service_account* ptr =
- (reinterpret_cast<grpc_alts_credentials_client_options*>(event->options))
+ (reinterpret_cast<grpc_alts_credentials_client_options*>(client->options))
->target_account_list_head;
while (ptr != nullptr) {
grpc_gcp_handshaker_req_add_target_identity_service_account(req, ptr->data);
@@ -116,19 +262,21 @@ static grpc_byte_buffer* get_serialized_start_client(alts_tsi_event* event) {
return buffer;
}
-static tsi_result handshaker_client_start_client(alts_handshaker_client* client,
- alts_tsi_event* event) {
- if (client == nullptr || event == nullptr) {
- gpr_log(GPR_ERROR, "Invalid arguments to handshaker_client_start_client()");
+static tsi_result handshaker_client_start_client(alts_handshaker_client* c) {
+ if (c == nullptr) {
+ gpr_log(GPR_ERROR, "client is nullptr in handshaker_client_start_client()");
return TSI_INVALID_ARGUMENT;
}
- grpc_byte_buffer* buffer = get_serialized_start_client(event);
+ grpc_byte_buffer* buffer = get_serialized_start_client(c);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
if (buffer == nullptr) {
gpr_log(GPR_ERROR, "get_serialized_start_client() failed");
return TSI_INTERNAL_ERROR;
}
- event->send_buffer = buffer;
- tsi_result result = make_grpc_call(client, event, true /* is_start */);
+ handshaker_client_send_buffer_destroy(client);
+ client->send_buffer = buffer;
+ tsi_result result = make_grpc_call(&client->base, true /* is_start */);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "make_grpc_call() failed");
}
@@ -137,8 +285,11 @@ static tsi_result handshaker_client_start_client(alts_handshaker_client* client,
/* Create and populate a start_server handshaker request, then serialize it. */
static grpc_byte_buffer* get_serialized_start_server(
- alts_tsi_event* event, grpc_slice* bytes_received) {
+ alts_handshaker_client* c, grpc_slice* bytes_received) {
+ GPR_ASSERT(c != nullptr);
GPR_ASSERT(bytes_received != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
grpc_gcp_handshaker_req* req =
grpc_gcp_handshaker_req_create(SERVER_START_REQ);
bool ok = grpc_gcp_handshaker_req_add_application_protocol(
@@ -148,7 +299,7 @@ static grpc_byte_buffer* get_serialized_start_server(
ok &= grpc_gcp_handshaker_req_set_in_bytes(
req, reinterpret_cast<const char*> GRPC_SLICE_START_PTR(*bytes_received),
GRPC_SLICE_LENGTH(*bytes_received));
- grpc_gcp_rpc_protocol_versions* versions = &event->options->rpc_versions;
+ grpc_gcp_rpc_protocol_versions* versions = &client->options->rpc_versions;
ok &= grpc_gcp_handshaker_req_set_rpc_versions(
req, versions->max_rpc_version.major, versions->max_rpc_version.minor,
versions->min_rpc_version.major, versions->min_rpc_version.minor);
@@ -163,20 +314,22 @@ static grpc_byte_buffer* get_serialized_start_server(
return buffer;
}
-static tsi_result handshaker_client_start_server(alts_handshaker_client* client,
- alts_tsi_event* event,
+static tsi_result handshaker_client_start_server(alts_handshaker_client* c,
grpc_slice* bytes_received) {
- if (client == nullptr || event == nullptr || bytes_received == nullptr) {
+ if (c == nullptr || bytes_received == nullptr) {
gpr_log(GPR_ERROR, "Invalid arguments to handshaker_client_start_server()");
return TSI_INVALID_ARGUMENT;
}
- grpc_byte_buffer* buffer = get_serialized_start_server(event, bytes_received);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ grpc_byte_buffer* buffer = get_serialized_start_server(c, bytes_received);
if (buffer == nullptr) {
gpr_log(GPR_ERROR, "get_serialized_start_server() failed");
return TSI_INTERNAL_ERROR;
}
- event->send_buffer = buffer;
- tsi_result result = make_grpc_call(client, event, true /* is_start */);
+ handshaker_client_send_buffer_destroy(client);
+ client->send_buffer = buffer;
+ tsi_result result = make_grpc_call(&client->base, true /* is_start */);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "make_grpc_call() failed");
}
@@ -201,40 +354,48 @@ static grpc_byte_buffer* get_serialized_next(grpc_slice* bytes_received) {
return buffer;
}
-static tsi_result handshaker_client_next(alts_handshaker_client* client,
- alts_tsi_event* event,
+static tsi_result handshaker_client_next(alts_handshaker_client* c,
grpc_slice* bytes_received) {
- if (client == nullptr || event == nullptr || bytes_received == nullptr) {
+ if (c == nullptr || bytes_received == nullptr) {
gpr_log(GPR_ERROR, "Invalid arguments to handshaker_client_next()");
return TSI_INVALID_ARGUMENT;
}
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ grpc_slice_unref_internal(client->recv_bytes);
+ client->recv_bytes = grpc_slice_ref(*bytes_received);
grpc_byte_buffer* buffer = get_serialized_next(bytes_received);
if (buffer == nullptr) {
gpr_log(GPR_ERROR, "get_serialized_next() failed");
return TSI_INTERNAL_ERROR;
}
- event->send_buffer = buffer;
- tsi_result result = make_grpc_call(client, event, false /* is_start */);
+ handshaker_client_send_buffer_destroy(client);
+ client->send_buffer = buffer;
+ tsi_result result = make_grpc_call(&client->base, false /* is_start */);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "make_grpc_call() failed");
}
return result;
}
-static void handshaker_client_shutdown(alts_handshaker_client* client) {
- GPR_ASSERT(client != nullptr);
- alts_grpc_handshaker_client* grpc_client =
- reinterpret_cast<alts_grpc_handshaker_client*>(client);
- GPR_ASSERT(grpc_call_cancel(grpc_client->call, nullptr) == GRPC_CALL_OK);
+static void handshaker_client_shutdown(alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ if (client->call != nullptr) {
+ grpc_call_cancel_internal(client->call);
+ }
}
-static void handshaker_client_destruct(alts_handshaker_client* client) {
- if (client == nullptr) {
+static void handshaker_client_destruct(alts_handshaker_client* c) {
+ if (c == nullptr) {
return;
}
- alts_grpc_handshaker_client* grpc_client =
- reinterpret_cast<alts_grpc_handshaker_client*>(client);
- grpc_call_unref(grpc_client->call);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ if (client->call != nullptr) {
+ grpc_call_unref(client->call);
+ }
}
static const alts_handshaker_client_vtable vtable = {
@@ -243,22 +404,45 @@ static const alts_handshaker_client_vtable vtable = {
handshaker_client_destruct};
alts_handshaker_client* alts_grpc_handshaker_client_create(
- grpc_channel* channel, grpc_completion_queue* queue,
- const char* handshaker_service_url) {
- if (channel == nullptr || queue == nullptr ||
- handshaker_service_url == nullptr) {
+ alts_tsi_handshaker* handshaker, grpc_channel* channel,
+ const char* handshaker_service_url, grpc_pollset_set* interested_parties,
+ grpc_alts_credentials_options* options, grpc_slice target_name,
+ grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
+ void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
+ bool is_client) {
+ if (channel == nullptr || handshaker_service_url == nullptr) {
gpr_log(GPR_ERROR, "Invalid arguments to alts_handshaker_client_create()");
return nullptr;
}
alts_grpc_handshaker_client* client =
static_cast<alts_grpc_handshaker_client*>(gpr_zalloc(sizeof(*client)));
- client->grpc_caller = grpc_start_batch;
+ client->grpc_caller = grpc_call_start_batch_and_execute;
+ client->handshaker = handshaker;
+ client->cb = cb;
+ client->user_data = user_data;
+ client->send_buffer = nullptr;
+ client->recv_buffer = nullptr;
+ client->options = grpc_alts_credentials_options_copy(options);
+ client->target_name = grpc_slice_copy(target_name);
+ client->recv_bytes = grpc_empty_slice();
+ grpc_metadata_array_init(&client->recv_initial_metadata);
+ client->grpc_cb = grpc_cb;
+ client->is_client = is_client;
+ client->buffer_size = TSI_ALTS_INITIAL_BUFFER_SIZE;
+ client->buffer = static_cast<unsigned char*>(gpr_zalloc(client->buffer_size));
grpc_slice slice = grpc_slice_from_copied_string(handshaker_service_url);
- client->call = grpc_channel_create_call(
- channel, nullptr, GRPC_PROPAGATE_DEFAULTS, queue,
- grpc_slice_from_static_string(ALTS_SERVICE_METHOD), &slice,
- gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
- client->base.vtable = &vtable;
+ client->call =
+ strcmp(handshaker_service_url, ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING) ==
+ 0
+ ? nullptr
+ : grpc_channel_create_pollset_set_call(
+ channel, nullptr, GRPC_PROPAGATE_DEFAULTS, interested_parties,
+ grpc_slice_from_static_string(ALTS_SERVICE_METHOD), &slice,
+ GRPC_MILLIS_INF_FUTURE, nullptr);
+ client->base.vtable =
+ vtable_for_testing == nullptr ? &vtable : vtable_for_testing;
+ GRPC_CLOSURE_INIT(&client->on_handshaker_service_resp_recv, client->grpc_cb,
+ client, grpc_schedule_on_exec_ctx);
grpc_slice_unref_internal(slice);
return &client->base;
}
@@ -267,21 +451,114 @@ namespace grpc_core {
namespace internal {
void alts_handshaker_client_set_grpc_caller_for_testing(
- alts_handshaker_client* client, alts_grpc_caller caller) {
- GPR_ASSERT(client != nullptr && caller != nullptr);
- alts_grpc_handshaker_client* grpc_client =
- reinterpret_cast<alts_grpc_handshaker_client*>(client);
- grpc_client->grpc_caller = caller;
+ alts_handshaker_client* c, alts_grpc_caller caller) {
+ GPR_ASSERT(c != nullptr && caller != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ client->grpc_caller = caller;
+}
+
+grpc_byte_buffer* alts_handshaker_client_get_send_buffer_for_testing(
+ alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ return client->send_buffer;
+}
+
+grpc_byte_buffer** alts_handshaker_client_get_recv_buffer_addr_for_testing(
+ alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ return &client->recv_buffer;
+}
+
+grpc_metadata_array* alts_handshaker_client_get_initial_metadata_for_testing(
+ alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ return &client->recv_initial_metadata;
+}
+
+void alts_handshaker_client_set_recv_bytes_for_testing(
+ alts_handshaker_client* c, grpc_slice* recv_bytes) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ client->recv_bytes = grpc_slice_ref(*recv_bytes);
+}
+
+void alts_handshaker_client_set_fields_for_testing(
+ alts_handshaker_client* c, alts_tsi_handshaker* handshaker,
+ tsi_handshaker_on_next_done_cb cb, void* user_data,
+ grpc_byte_buffer* recv_buffer, grpc_status_code status) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ client->handshaker = handshaker;
+ client->cb = cb;
+ client->user_data = user_data;
+ client->recv_buffer = recv_buffer;
+ client->status = status;
+}
+
+void alts_handshaker_client_check_fields_for_testing(
+ alts_handshaker_client* c, tsi_handshaker_on_next_done_cb cb,
+ void* user_data, bool has_sent_start_message, grpc_slice* recv_bytes) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ GPR_ASSERT(client->cb == cb);
+ GPR_ASSERT(client->user_data == user_data);
+ if (recv_bytes != nullptr) {
+ GPR_ASSERT(grpc_slice_cmp(client->recv_bytes, *recv_bytes) == 0);
+ }
+ GPR_ASSERT(alts_tsi_handshaker_get_has_sent_start_message_for_testing(
+ client->handshaker) == has_sent_start_message);
+}
+
+void alts_handshaker_client_set_vtable_for_testing(
+ alts_handshaker_client* c, alts_handshaker_client_vtable* vtable) {
+ GPR_ASSERT(c != nullptr);
+ GPR_ASSERT(vtable != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ client->base.vtable = vtable;
+}
+
+alts_tsi_handshaker* alts_handshaker_client_get_handshaker_for_testing(
+ alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ return client->handshaker;
+}
+
+void alts_handshaker_client_set_cb_for_testing(
+ alts_handshaker_client* c, tsi_handshaker_on_next_done_cb cb) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ client->cb = cb;
+}
+
+grpc_closure* alts_handshaker_client_get_closure_for_testing(
+ alts_handshaker_client* c) {
+ GPR_ASSERT(c != nullptr);
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ return &client->on_handshaker_service_resp_recv;
}
} // namespace internal
} // namespace grpc_core
-tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client,
- alts_tsi_event* event) {
+tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client) {
if (client != nullptr && client->vtable != nullptr &&
client->vtable->client_start != nullptr) {
- return client->vtable->client_start(client, event);
+ return client->vtable->client_start(client);
}
gpr_log(GPR_ERROR,
"client or client->vtable has not been initialized properly");
@@ -289,11 +566,10 @@ tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client,
}
tsi_result alts_handshaker_client_start_server(alts_handshaker_client* client,
- alts_tsi_event* event,
grpc_slice* bytes_received) {
if (client != nullptr && client->vtable != nullptr &&
client->vtable->server_start != nullptr) {
- return client->vtable->server_start(client, event, bytes_received);
+ return client->vtable->server_start(client, bytes_received);
}
gpr_log(GPR_ERROR,
"client or client->vtable has not been initialized properly");
@@ -301,11 +577,10 @@ tsi_result alts_handshaker_client_start_server(alts_handshaker_client* client,
}
tsi_result alts_handshaker_client_next(alts_handshaker_client* client,
- alts_tsi_event* event,
grpc_slice* bytes_received) {
if (client != nullptr && client->vtable != nullptr &&
client->vtable->next != nullptr) {
- return client->vtable->next(client, event, bytes_received);
+ return client->vtable->next(client, bytes_received);
}
gpr_log(GPR_ERROR,
"client or client->vtable has not been initialized properly");
@@ -319,11 +594,22 @@ void alts_handshaker_client_shutdown(alts_handshaker_client* client) {
}
}
-void alts_handshaker_client_destroy(alts_handshaker_client* client) {
- if (client != nullptr) {
- if (client->vtable != nullptr && client->vtable->destruct != nullptr) {
- client->vtable->destruct(client);
+void alts_handshaker_client_destroy(alts_handshaker_client* c) {
+ if (c != nullptr) {
+ if (c->vtable != nullptr && c->vtable->destruct != nullptr) {
+ c->vtable->destruct(c);
}
+ alts_grpc_handshaker_client* client =
+ reinterpret_cast<alts_grpc_handshaker_client*>(c);
+ grpc_byte_buffer_destroy(client->send_buffer);
+ grpc_byte_buffer_destroy(client->recv_buffer);
+ client->send_buffer = nullptr;
+ client->recv_buffer = nullptr;
+ grpc_metadata_array_destroy(&client->recv_initial_metadata);
+ grpc_slice_unref_internal(client->recv_bytes);
+ grpc_slice_unref_internal(client->target_name);
+ grpc_alts_credentials_options_destroy(client->options);
+ gpr_free(client->buffer);
gpr_free(client);
}
}
diff --git a/src/core/tsi/alts/handshaker/alts_handshaker_client.h b/src/core/tsi/alts/handshaker/alts_handshaker_client.h
index 8dd8fe440d..4b489875f3 100644
--- a/src/core/tsi/alts/handshaker/alts_handshaker_client.h
+++ b/src/core/tsi/alts/handshaker/alts_handshaker_client.h
@@ -21,16 +21,24 @@
#include <grpc/support/port_platform.h>
+#include <grpc/byte_buffer.h>
+#include <grpc/byte_buffer_reader.h>
#include <grpc/grpc.h>
-#include "src/core/tsi/alts/handshaker/alts_tsi_event.h"
+#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h"
+#include "src/core/tsi/transport_security_interface.h"
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/pollset_set.h"
#define ALTS_SERVICE_METHOD "/grpc.gcp.HandshakerService/DoHandshake"
#define ALTS_APPLICATION_PROTOCOL "grpc"
#define ALTS_RECORD_PROTOCOL "ALTSRP_GCM_AES128_REKEY"
+#define ALTS_HANDSHAKER_SERVICE_URL_FOR_TESTING "lame"
const size_t kAltsAes128GcmRekeyKeyLength = 44;
+typedef struct alts_tsi_handshaker alts_tsi_handshaker;
/**
* A ALTS handshaker client interface. It is used to communicate with
* ALTS handshaker service by scheduling a handshaker request that could be one
@@ -41,63 +49,52 @@ typedef struct alts_handshaker_client alts_handshaker_client;
/* A function that makes the grpc call to the handshaker service. */
typedef grpc_call_error (*alts_grpc_caller)(grpc_call* call, const grpc_op* ops,
- size_t nops, void* tag);
+ size_t nops, grpc_closure* tag);
/* V-table for ALTS handshaker client operations. */
typedef struct alts_handshaker_client_vtable {
- tsi_result (*client_start)(alts_handshaker_client* client,
- alts_tsi_event* event);
+ tsi_result (*client_start)(alts_handshaker_client* client);
tsi_result (*server_start)(alts_handshaker_client* client,
- alts_tsi_event* event, grpc_slice* bytes_received);
- tsi_result (*next)(alts_handshaker_client* client, alts_tsi_event* event,
+ grpc_slice* bytes_received);
+ tsi_result (*next)(alts_handshaker_client* client,
grpc_slice* bytes_received);
void (*shutdown)(alts_handshaker_client* client);
void (*destruct)(alts_handshaker_client* client);
} alts_handshaker_client_vtable;
-struct alts_handshaker_client {
- const alts_handshaker_client_vtable* vtable;
-};
-
/**
* This method schedules a client_start handshaker request to ALTS handshaker
* service.
*
* - client: ALTS handshaker client instance.
- * - event: ALTS TSI event instance.
*
* It returns TSI_OK on success and an error status code on failure.
*/
-tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client,
- alts_tsi_event* event);
+tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client);
/**
* This method schedules a server_start handshaker request to ALTS handshaker
* service.
*
* - client: ALTS handshaker client instance.
- * - event: ALTS TSI event instance.
* - bytes_received: bytes in out_frames returned from the peer's handshaker
* response.
*
* It returns TSI_OK on success and an error status code on failure.
*/
tsi_result alts_handshaker_client_start_server(alts_handshaker_client* client,
- alts_tsi_event* event,
grpc_slice* bytes_received);
/**
* This method schedules a next handshaker request to ALTS handshaker service.
*
* - client: ALTS handshaker client instance.
- * - event: ALTS TSI event instance.
* - bytes_received: bytes in out_frames returned from the peer's handshaker
* response.
*
* It returns TSI_OK on success and an error status code on failure.
*/
tsi_result alts_handshaker_client_next(alts_handshaker_client* client,
- alts_tsi_event* event,
grpc_slice* bytes_received);
/**
@@ -110,38 +107,51 @@ tsi_result alts_handshaker_client_next(alts_handshaker_client* client,
void alts_handshaker_client_shutdown(alts_handshaker_client* client);
/**
- * This method destroys a ALTS handshaker client.
+ * This method destroys an ALTS handshaker client.
*
- * - client: a ALTS handshaker client instance.
+ * - client: an ALTS handshaker client instance.
*/
void alts_handshaker_client_destroy(alts_handshaker_client* client);
/**
- * This method creates a ALTS handshaker client.
+ * This method creates an ALTS handshaker client.
*
+ * - handshaker: ALTS TSI handshaker to which the created handshaker client
+ * belongs to.
* - channel: grpc channel to ALTS handshaker service.
- * - queue: grpc completion queue.
* - handshaker_service_url: address of ALTS handshaker service in the format of
* "host:port".
- *
- * It returns the created ALTS handshaker client on success, and NULL on
- * failure.
+ * - interested_parties: set of pollsets interested in this connection.
+ * - options: ALTS credentials options containing information passed from TSI
+ * caller (e.g., rpc protocol versions)
+ * - target_name: the name of the endpoint that the channel is connecting to,
+ * and will be used for secure naming check
+ * - grpc_cb: gRPC provided callbacks passed from TSI handshaker.
+ * - cb: callback to be executed when tsi_handshaker_next API compltes.
+ * - user_data: argument passed to cb.
+ * - vtable_for_testing: ALTS handshaker client vtable instance used for
+ * testing purpose.
+ * - is_client: a boolean value indicating if the created handshaker client is
+ * used at the client (is_client = true) or server (is_client = false) side. It
+ * returns the created ALTS handshaker client on success, and NULL on failure.
*/
alts_handshaker_client* alts_grpc_handshaker_client_create(
- grpc_channel* channel, grpc_completion_queue* queue,
- const char* handshaker_service_url);
-
-namespace grpc_core {
-namespace internal {
+ alts_tsi_handshaker* handshaker, grpc_channel* channel,
+ const char* handshaker_service_url, grpc_pollset_set* interested_parties,
+ grpc_alts_credentials_options* options, grpc_slice target_name,
+ grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
+ void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
+ bool is_client);
/**
- * Unsafe, use for testing only. It allows the caller to change the way that
- * GRPC calls are made to the handshaker service.
+ * This method handles handshaker response returned from ALTS handshaker
+ * service. Note that the only reason the API is exposed is that it is used in
+ * alts_shared_resources.cc.
+ *
+ * - client: an ALTS handshaker client instance.
+ * - is_ok: a boolean value indicating if the handshaker response is ok to read.
*/
-void alts_handshaker_client_set_grpc_caller_for_testing(
- alts_handshaker_client* client, alts_grpc_caller caller);
-
-} // namespace internal
-} // namespace grpc_core
+void alts_handshaker_client_handle_response(alts_handshaker_client* client,
+ bool is_ok);
#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_CLIENT_H */
diff --git a/src/core/tsi/alts/handshaker/alts_shared_resource.cc b/src/core/tsi/alts/handshaker/alts_shared_resource.cc
new file mode 100644
index 0000000000..3501257f05
--- /dev/null
+++ b/src/core/tsi/alts/handshaker/alts_shared_resource.cc
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/tsi/alts/handshaker/alts_shared_resource.h"
+
+#include <grpc/support/log.h>
+
+#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h"
+
+static alts_shared_resource_dedicated g_alts_resource_dedicated;
+
+alts_shared_resource_dedicated* grpc_alts_get_shared_resource_dedicated(void) {
+ return &g_alts_resource_dedicated;
+}
+
+static void thread_worker(void* arg) {
+ while (true) {
+ grpc_event event =
+ grpc_completion_queue_next(g_alts_resource_dedicated.cq,
+ gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
+ GPR_ASSERT(event.type != GRPC_QUEUE_TIMEOUT);
+ if (event.type == GRPC_QUEUE_SHUTDOWN) {
+ break;
+ }
+ GPR_ASSERT(event.type == GRPC_OP_COMPLETE);
+ alts_handshaker_client* client =
+ static_cast<alts_handshaker_client*>(event.tag);
+ alts_handshaker_client_handle_response(client, event.success);
+ }
+}
+
+void grpc_alts_shared_resource_dedicated_init() {
+ g_alts_resource_dedicated.cq = nullptr;
+ gpr_mu_init(&g_alts_resource_dedicated.mu);
+}
+
+void grpc_alts_shared_resource_dedicated_start(
+ const char* handshaker_service_url) {
+ gpr_mu_lock(&g_alts_resource_dedicated.mu);
+ if (g_alts_resource_dedicated.cq == nullptr) {
+ g_alts_resource_dedicated.channel =
+ grpc_insecure_channel_create(handshaker_service_url, nullptr, nullptr);
+ g_alts_resource_dedicated.cq =
+ grpc_completion_queue_create_for_next(nullptr);
+ g_alts_resource_dedicated.thread =
+ grpc_core::Thread("alts_tsi_handshaker", &thread_worker, nullptr);
+ g_alts_resource_dedicated.interested_parties = grpc_pollset_set_create();
+ grpc_pollset_set_add_pollset(g_alts_resource_dedicated.interested_parties,
+ grpc_cq_pollset(g_alts_resource_dedicated.cq));
+ g_alts_resource_dedicated.thread.Start();
+ }
+ gpr_mu_unlock(&g_alts_resource_dedicated.mu);
+}
+
+void grpc_alts_shared_resource_dedicated_shutdown() {
+ if (g_alts_resource_dedicated.cq != nullptr) {
+ grpc_pollset_set_del_pollset(g_alts_resource_dedicated.interested_parties,
+ grpc_cq_pollset(g_alts_resource_dedicated.cq));
+ grpc_completion_queue_shutdown(g_alts_resource_dedicated.cq);
+ g_alts_resource_dedicated.thread.Join();
+ grpc_pollset_set_destroy(g_alts_resource_dedicated.interested_parties);
+ grpc_completion_queue_destroy(g_alts_resource_dedicated.cq);
+ grpc_channel_destroy(g_alts_resource_dedicated.channel);
+ }
+ gpr_mu_destroy(&g_alts_resource_dedicated.mu);
+}
diff --git a/src/core/tsi/alts/handshaker/alts_shared_resource.h b/src/core/tsi/alts/handshaker/alts_shared_resource.h
new file mode 100644
index 0000000000..8ae0089a11
--- /dev/null
+++ b/src/core/tsi/alts/handshaker/alts_shared_resource.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_SHARED_RESOURCE_H
+#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_SHARED_RESOURCE_H
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+/**
+ * Main struct containing ALTS shared resources used when
+ * employing the dedicated completion queue and thread.
+ */
+typedef struct alts_shared_resource_dedicated {
+ grpc_core::Thread thread;
+ grpc_completion_queue* cq;
+ grpc_pollset_set* interested_parties;
+ grpc_cq_completion storage;
+ gpr_mu mu;
+ grpc_channel* channel;
+} alts_shared_resource_dedicated;
+
+/* This method returns the address of alts_shared_resource_dedicated
+ * object shared by all TSI handshakes.
+ */
+alts_shared_resource_dedicated* grpc_alts_get_shared_resource_dedicated(void);
+
+/**
+ * This method destroys the alts_shared_resource_dedicated object
+ * shared by all TSI handshakes. The applicaiton is responsible for
+ * invoking the API before calling grpc_shutdown().
+ */
+void grpc_alts_shared_resource_dedicated_shutdown();
+
+/**
+ * This method initializes the alts_shared_resource_dedicated object
+ * shared by all TSI handshakes. The application is responsible for
+ * invoking the API after calling grpc_init();
+ */
+void grpc_alts_shared_resource_dedicated_init();
+
+/**
+ * This method populates various fields of the alts_shared_resource_dedicated
+ * object shared by all TSI handshakes and start the dedicated thread.
+ * The API will be invoked by the caller in a lazy manner. That is,
+ * it will get invoked when ALTS TSI handshake occurs for the first time.
+ */
+void grpc_alts_shared_resource_dedicated_start(
+ const char* handshaker_service_url);
+
+#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_SHARED_RESOURCE_H \
+ */
diff --git a/src/core/tsi/alts/handshaker/alts_tsi_event.cc b/src/core/tsi/alts/handshaker/alts_tsi_event.cc
deleted file mode 100644
index cb36d5ebd1..0000000000
--- a/src/core/tsi/alts/handshaker/alts_tsi_event.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/tsi/alts/handshaker/alts_tsi_event.h"
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/slice/slice_internal.h"
-
-tsi_result alts_tsi_event_create(alts_tsi_handshaker* handshaker,
- tsi_handshaker_on_next_done_cb cb,
- void* user_data,
- grpc_alts_credentials_options* options,
- grpc_slice target_name,
- alts_tsi_event** event) {
- if (event == nullptr || handshaker == nullptr || cb == nullptr) {
- gpr_log(GPR_ERROR, "Invalid arguments to alts_tsi_event_create()");
- return TSI_INVALID_ARGUMENT;
- }
- alts_tsi_event* e = static_cast<alts_tsi_event*>(gpr_zalloc(sizeof(*e)));
- e->handshaker = handshaker;
- e->cb = cb;
- e->user_data = user_data;
- e->options = grpc_alts_credentials_options_copy(options);
- e->target_name = grpc_slice_copy(target_name);
- grpc_metadata_array_init(&e->initial_metadata);
- grpc_metadata_array_init(&e->trailing_metadata);
- *event = e;
- return TSI_OK;
-}
-
-void alts_tsi_event_dispatch_to_handshaker(alts_tsi_event* event, bool is_ok) {
- if (event == nullptr) {
- gpr_log(
- GPR_ERROR,
- "ALTS TSI event is nullptr in alts_tsi_event_dispatch_to_handshaker()");
- return;
- }
- alts_tsi_handshaker_handle_response(event->handshaker, event->recv_buffer,
- event->status, &event->details, event->cb,
- event->user_data, is_ok);
-}
-
-void alts_tsi_event_destroy(alts_tsi_event* event) {
- if (event == nullptr) {
- return;
- }
- grpc_byte_buffer_destroy(event->send_buffer);
- grpc_byte_buffer_destroy(event->recv_buffer);
- grpc_metadata_array_destroy(&event->initial_metadata);
- grpc_metadata_array_destroy(&event->trailing_metadata);
- grpc_slice_unref_internal(event->details);
- grpc_slice_unref_internal(event->target_name);
- grpc_alts_credentials_options_destroy(event->options);
- gpr_free(event);
-}
diff --git a/src/core/tsi/alts/handshaker/alts_tsi_event.h b/src/core/tsi/alts/handshaker/alts_tsi_event.h
deleted file mode 100644
index 043e75d4a9..0000000000
--- a/src/core/tsi/alts/handshaker/alts_tsi_event.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_EVENT_H
-#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_EVENT_H
-
-#include <grpc/support/port_platform.h>
-
-#include <grpc/byte_buffer.h>
-#include <grpc/byte_buffer_reader.h>
-
-#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h"
-#include "src/core/tsi/transport_security_interface.h"
-
-/**
- * A ALTS TSI event interface. In asynchronous implementation of
- * tsi_handshaker_next(), the function will exit after scheduling a handshaker
- * request to ALTS handshaker service without waiting for response to return.
- * The event is used to link the scheduled handshaker request with the
- * corresponding response so that enough context information can be inferred
- * from it to handle the response. All APIs in the header are thread-compatible.
- */
-
-/**
- * Main struct for ALTS TSI event. It retains ownership on send_buffer and
- * recv_buffer, but not on handshaker.
- */
-typedef struct alts_tsi_event {
- alts_tsi_handshaker* handshaker;
- grpc_byte_buffer* send_buffer;
- grpc_byte_buffer* recv_buffer;
- grpc_status_code status;
- grpc_slice details;
- grpc_metadata_array initial_metadata;
- grpc_metadata_array trailing_metadata;
- tsi_handshaker_on_next_done_cb cb;
- void* user_data;
- grpc_alts_credentials_options* options;
- grpc_slice target_name;
-} alts_tsi_event;
-
-/**
- * This method creates a ALTS TSI event.
- *
- * - handshaker: ALTS TSI handshaker instance associated with the event to be
- * created. The created event does not own the handshaker instance.
- * - cb: callback function to be called when handling data received from ALTS
- * handshaker service.
- * - user_data: argument to callback function.
- * - options: ALTS credentials options.
- * - target_name: name of endpoint used for secure naming check.
- * - event: address of ALTS TSI event instance to be returned from the method.
- *
- * It returns TSI_OK on success and an error status code on failure.
- */
-tsi_result alts_tsi_event_create(alts_tsi_handshaker* handshaker,
- tsi_handshaker_on_next_done_cb cb,
- void* user_data,
- grpc_alts_credentials_options* options,
- grpc_slice target_name,
- alts_tsi_event** event);
-
-/**
- * This method dispatches a ALTS TSI event received from the handshaker service,
- * and a boolean flag indicating if the event is valid to read to ALTS TSI
- * handshaker to process. It is called by TSI thread.
- *
- * - event: ALTS TSI event instance.
- * - is_ok: a boolean value indicating if the event is valid to read.
- */
-void alts_tsi_event_dispatch_to_handshaker(alts_tsi_event* event, bool is_ok);
-
-/**
- * This method destroys the ALTS TSI event.
- */
-void alts_tsi_event_destroy(alts_tsi_event* event);
-
-#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_EVENT_H */
diff --git a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
index dfdd659b87..1b7e58d3ce 100644
--- a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
+++ b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
@@ -26,34 +26,34 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd_id.h>
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gprpp/thd.h"
+#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/alts/frame_protector/alts_frame_protector.h"
#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h"
+#include "src/core/tsi/alts/handshaker/alts_shared_resource.h"
#include "src/core/tsi/alts/handshaker/alts_tsi_utils.h"
#include "src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h"
-#include "src/core/tsi/alts_transport_security.h"
-
-#define TSI_ALTS_INITIAL_BUFFER_SIZE 256
-
-static alts_shared_resource* kSharedResource = alts_get_shared_resource();
/* Main struct for ALTS TSI handshaker. */
-typedef struct alts_tsi_handshaker {
+struct alts_tsi_handshaker {
tsi_handshaker base;
alts_handshaker_client* client;
- grpc_slice recv_bytes;
grpc_slice target_name;
- unsigned char* buffer;
- size_t buffer_size;
bool is_client;
bool has_sent_start_message;
+ bool has_created_handshaker_client;
+ char* handshaker_service_url;
+ grpc_pollset_set* interested_parties;
grpc_alts_credentials_options* options;
-} alts_tsi_handshaker;
+ alts_handshaker_client_vtable* client_vtable_for_testing;
+ grpc_channel* channel;
+};
/* Main struct for ALTS TSI handshaker result. */
typedef struct alts_tsi_handshaker_result {
@@ -193,9 +193,9 @@ static const tsi_handshaker_result_vtable result_vtable = {
handshaker_result_create_frame_protector,
handshaker_result_get_unused_bytes, handshaker_result_destroy};
-static tsi_result create_handshaker_result(grpc_gcp_handshaker_resp* resp,
- bool is_client,
- tsi_handshaker_result** self) {
+tsi_result alts_tsi_handshaker_result_create(grpc_gcp_handshaker_resp* resp,
+ bool is_client,
+ tsi_handshaker_result** self) {
if (self == nullptr || resp == nullptr) {
gpr_log(GPR_ERROR, "Invalid arguments to create_handshaker_result()");
return TSI_INVALID_ARGUMENT;
@@ -234,6 +234,27 @@ static tsi_result create_handshaker_result(grpc_gcp_handshaker_resp* resp,
return TSI_OK;
}
+/* gRPC provided callback used when gRPC thread model is applied. */
+static void on_handshaker_service_resp_recv(void* arg, grpc_error* error) {
+ alts_handshaker_client* client = static_cast<alts_handshaker_client*>(arg);
+ if (client == nullptr) {
+ gpr_log(GPR_ERROR, "ALTS handshaker client is nullptr");
+ return;
+ }
+ alts_handshaker_client_handle_response(client, true);
+}
+
+/* gRPC provided callback used when dedicatd CQ and thread are used.
+ * It serves to safely bring the control back to application. */
+static void on_handshaker_service_resp_recv_dedicated(void* arg,
+ grpc_error* error) {
+ alts_shared_resource_dedicated* resource =
+ grpc_alts_get_shared_resource_dedicated();
+ grpc_cq_end_op(resource->cq, arg, GRPC_ERROR_NONE,
+ [](void* done_arg, grpc_cq_completion* storage) {}, nullptr,
+ &resource->storage);
+}
+
static tsi_result handshaker_next(
tsi_handshaker* self, const unsigned char* received_bytes,
size_t received_bytes_size, const unsigned char** bytes_to_send,
@@ -250,12 +271,36 @@ static tsi_result handshaker_next(
alts_tsi_handshaker* handshaker =
reinterpret_cast<alts_tsi_handshaker*>(self);
tsi_result ok = TSI_OK;
- alts_tsi_event* event = nullptr;
- ok = alts_tsi_event_create(handshaker, cb, user_data, handshaker->options,
- handshaker->target_name, &event);
- if (ok != TSI_OK) {
- gpr_log(GPR_ERROR, "Failed to create ALTS TSI event");
- return ok;
+ if (!handshaker->has_created_handshaker_client) {
+ if (handshaker->channel == nullptr) {
+ grpc_alts_shared_resource_dedicated_start(
+ handshaker->handshaker_service_url);
+ handshaker->interested_parties =
+ grpc_alts_get_shared_resource_dedicated()->interested_parties;
+ GPR_ASSERT(handshaker->interested_parties != nullptr);
+ }
+ grpc_iomgr_cb_func grpc_cb = handshaker->channel == nullptr
+ ? on_handshaker_service_resp_recv_dedicated
+ : on_handshaker_service_resp_recv;
+ grpc_channel* channel =
+ handshaker->channel == nullptr
+ ? grpc_alts_get_shared_resource_dedicated()->channel
+ : handshaker->channel;
+ handshaker->client = alts_grpc_handshaker_client_create(
+ handshaker, channel, handshaker->handshaker_service_url,
+ handshaker->interested_parties, handshaker->options,
+ handshaker->target_name, grpc_cb, cb, user_data,
+ handshaker->client_vtable_for_testing, handshaker->is_client);
+ if (handshaker->client == nullptr) {
+ gpr_log(GPR_ERROR, "Failed to create ALTS handshaker client");
+ return TSI_FAILED_PRECONDITION;
+ }
+ handshaker->has_created_handshaker_client = true;
+ }
+ if (handshaker->channel == nullptr &&
+ handshaker->client_vtable_for_testing == nullptr) {
+ GPR_ASSERT(grpc_cq_begin_op(grpc_alts_get_shared_resource_dedicated()->cq,
+ handshaker->client));
}
grpc_slice slice = (received_bytes == nullptr || received_bytes_size == 0)
? grpc_empty_slice()
@@ -264,16 +309,11 @@ static tsi_result handshaker_next(
received_bytes_size);
if (!handshaker->has_sent_start_message) {
ok = handshaker->is_client
- ? alts_handshaker_client_start_client(handshaker->client, event)
- : alts_handshaker_client_start_server(handshaker->client, event,
- &slice);
+ ? alts_handshaker_client_start_client(handshaker->client)
+ : alts_handshaker_client_start_server(handshaker->client, &slice);
handshaker->has_sent_start_message = true;
} else {
- if (!GRPC_SLICE_IS_EMPTY(handshaker->recv_bytes)) {
- grpc_slice_unref_internal(handshaker->recv_bytes);
- }
- handshaker->recv_bytes = grpc_slice_ref(slice);
- ok = alts_handshaker_client_next(handshaker->client, event, &slice);
+ ok = alts_handshaker_client_next(handshaker->client, &slice);
}
grpc_slice_unref_internal(slice);
if (ok != TSI_OK) {
@@ -283,6 +323,22 @@ static tsi_result handshaker_next(
return TSI_ASYNC;
}
+/*
+ * This API will be invoked by a non-gRPC application, and an ExecCtx needs
+ * to be explicitly created in order to invoke ALTS handshaker client API's
+ * that assumes the caller is inside gRPC core.
+ */
+static tsi_result handshaker_next_dedicated(
+ tsi_handshaker* self, const unsigned char* received_bytes,
+ size_t received_bytes_size, const unsigned char** bytes_to_send,
+ size_t* bytes_to_send_size, tsi_handshaker_result** result,
+ tsi_handshaker_on_next_done_cb cb, void* user_data) {
+ grpc_core::ExecCtx exec_ctx;
+ return handshaker_next(self, received_bytes, received_bytes_size,
+ bytes_to_send, bytes_to_send_size, result, cb,
+ user_data);
+}
+
static void handshaker_shutdown(tsi_handshaker* self) {
GPR_ASSERT(self != nullptr);
if (self->handshake_shutdown) {
@@ -300,10 +356,12 @@ static void handshaker_destroy(tsi_handshaker* self) {
alts_tsi_handshaker* handshaker =
reinterpret_cast<alts_tsi_handshaker*>(self);
alts_handshaker_client_destroy(handshaker->client);
- grpc_slice_unref_internal(handshaker->recv_bytes);
grpc_slice_unref_internal(handshaker->target_name);
grpc_alts_credentials_options_destroy(handshaker->options);
- gpr_free(handshaker->buffer);
+ if (handshaker->channel != nullptr) {
+ grpc_channel_destroy(handshaker->channel);
+ }
+ gpr_free(handshaker->handshaker_service_url);
gpr_free(handshaker);
}
@@ -313,36 +371,19 @@ static const tsi_handshaker_vtable handshaker_vtable = {
nullptr, handshaker_destroy,
handshaker_next, handshaker_shutdown};
-static void thread_worker(void* arg) {
- while (true) {
- grpc_event event = grpc_completion_queue_next(
- kSharedResource->cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
- GPR_ASSERT(event.type != GRPC_QUEUE_TIMEOUT);
- if (event.type == GRPC_QUEUE_SHUTDOWN) {
- /* signal alts_tsi_shutdown() to destroy completion queue. */
- grpc_tsi_alts_signal_for_cq_destroy();
- break;
- }
- /* event.type == GRPC_OP_COMPLETE. */
- alts_tsi_event* alts_event = static_cast<alts_tsi_event*>(event.tag);
- alts_tsi_event_dispatch_to_handshaker(alts_event, event.success);
- alts_tsi_event_destroy(alts_event);
- }
-}
-
-static void init_shared_resources(const char* handshaker_service_url) {
- GPR_ASSERT(handshaker_service_url != nullptr);
- gpr_mu_lock(&kSharedResource->mu);
- if (kSharedResource->channel == nullptr) {
- gpr_cv_init(&kSharedResource->cv);
- kSharedResource->channel =
- grpc_insecure_channel_create(handshaker_service_url, nullptr, nullptr);
- kSharedResource->cq = grpc_completion_queue_create_for_next(nullptr);
- kSharedResource->thread =
- grpc_core::Thread("alts_tsi_handshaker", &thread_worker, nullptr);
- kSharedResource->thread.Start();
- }
- gpr_mu_unlock(&kSharedResource->mu);
+static const tsi_handshaker_vtable handshaker_vtable_dedicated = {
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ handshaker_destroy,
+ handshaker_next_dedicated,
+ handshaker_shutdown};
+
+bool alts_tsi_handshaker_has_shutdown(alts_tsi_handshaker* handshaker) {
+ GPR_ASSERT(handshaker != nullptr);
+ return handshaker->base.handshake_shutdown;
}
tsi_result alts_tsi_handshaker_create(
@@ -354,40 +395,33 @@ tsi_result alts_tsi_handshaker_create(
gpr_log(GPR_ERROR, "Invalid arguments to alts_tsi_handshaker_create()");
return TSI_INVALID_ARGUMENT;
}
- init_shared_resources(handshaker_service_url);
- alts_handshaker_client* client = alts_grpc_handshaker_client_create(
- kSharedResource->channel, kSharedResource->cq, handshaker_service_url);
- if (client == nullptr) {
- gpr_log(GPR_ERROR, "Failed to create ALTS handshaker client");
- return TSI_FAILED_PRECONDITION;
- }
alts_tsi_handshaker* handshaker =
static_cast<alts_tsi_handshaker*>(gpr_zalloc(sizeof(*handshaker)));
- handshaker->client = client;
- handshaker->buffer_size = TSI_ALTS_INITIAL_BUFFER_SIZE;
- handshaker->buffer =
- static_cast<unsigned char*>(gpr_zalloc(handshaker->buffer_size));
+ bool use_dedicated_cq = interested_parties == nullptr;
+ handshaker->client = nullptr;
handshaker->is_client = is_client;
handshaker->has_sent_start_message = false;
handshaker->target_name = target_name == nullptr
? grpc_empty_slice()
: grpc_slice_from_static_string(target_name);
+ handshaker->interested_parties = interested_parties;
+ handshaker->has_created_handshaker_client = false;
+ handshaker->handshaker_service_url = gpr_strdup(handshaker_service_url);
handshaker->options = grpc_alts_credentials_options_copy(options);
- handshaker->base.vtable = &handshaker_vtable;
+ handshaker->base.vtable =
+ use_dedicated_cq ? &handshaker_vtable_dedicated : &handshaker_vtable;
+ handshaker->channel =
+ use_dedicated_cq
+ ? nullptr
+ : grpc_insecure_channel_create(handshaker->handshaker_service_url,
+ nullptr, nullptr);
*self = &handshaker->base;
return TSI_OK;
}
-static bool is_handshake_finished_properly(grpc_gcp_handshaker_resp* resp) {
- GPR_ASSERT(resp != nullptr);
- if (resp->has_result) {
- return true;
- }
- return false;
-}
-
-static void set_unused_bytes(tsi_handshaker_result* self,
- grpc_slice* recv_bytes, size_t bytes_consumed) {
+void alts_tsi_handshaker_result_set_unused_bytes(tsi_handshaker_result* self,
+ grpc_slice* recv_bytes,
+ size_t bytes_consumed) {
GPR_ASSERT(recv_bytes != nullptr && self != nullptr);
if (GRPC_SLICE_LENGTH(*recv_bytes) == bytes_consumed) {
return;
@@ -402,81 +436,6 @@ static void set_unused_bytes(tsi_handshaker_result* self,
result->unused_bytes_size);
}
-void alts_tsi_handshaker_handle_response(alts_tsi_handshaker* handshaker,
- grpc_byte_buffer* recv_buffer,
- grpc_status_code status,
- grpc_slice* details,
- tsi_handshaker_on_next_done_cb cb,
- void* user_data, bool is_ok) {
- /* Invalid input check. */
- if (cb == nullptr) {
- gpr_log(GPR_ERROR,
- "cb is nullptr in alts_tsi_handshaker_handle_response()");
- return;
- }
- if (handshaker == nullptr || recv_buffer == nullptr) {
- gpr_log(GPR_ERROR,
- "Invalid arguments to alts_tsi_handshaker_handle_response()");
- cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr);
- return;
- }
- if (handshaker->base.handshake_shutdown) {
- gpr_log(GPR_ERROR, "TSI handshake shutdown");
- cb(TSI_HANDSHAKE_SHUTDOWN, user_data, nullptr, 0, nullptr);
- return;
- }
- /* Failed grpc call check. */
- if (!is_ok || status != GRPC_STATUS_OK) {
- gpr_log(GPR_ERROR, "grpc call made to handshaker service failed");
- if (details != nullptr) {
- char* error_details = grpc_slice_to_c_string(*details);
- gpr_log(GPR_ERROR, "error details:%s", error_details);
- gpr_free(error_details);
- }
- cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr);
- return;
- }
- grpc_gcp_handshaker_resp* resp =
- alts_tsi_utils_deserialize_response(recv_buffer);
- /* Invalid handshaker response check. */
- if (resp == nullptr) {
- gpr_log(GPR_ERROR, "alts_tsi_utils_deserialize_response() failed");
- cb(TSI_DATA_CORRUPTED, user_data, nullptr, 0, nullptr);
- return;
- }
- grpc_slice* slice = static_cast<grpc_slice*>(resp->out_frames.arg);
- unsigned char* bytes_to_send = nullptr;
- size_t bytes_to_send_size = 0;
- if (slice != nullptr) {
- bytes_to_send_size = GRPC_SLICE_LENGTH(*slice);
- while (bytes_to_send_size > handshaker->buffer_size) {
- handshaker->buffer_size *= 2;
- handshaker->buffer = static_cast<unsigned char*>(
- gpr_realloc(handshaker->buffer, handshaker->buffer_size));
- }
- memcpy(handshaker->buffer, GRPC_SLICE_START_PTR(*slice),
- bytes_to_send_size);
- bytes_to_send = handshaker->buffer;
- }
- tsi_handshaker_result* result = nullptr;
- if (is_handshake_finished_properly(resp)) {
- create_handshaker_result(resp, handshaker->is_client, &result);
- set_unused_bytes(result, &handshaker->recv_bytes, resp->bytes_consumed);
- }
- grpc_status_code code = static_cast<grpc_status_code>(resp->status.code);
- if (code != GRPC_STATUS_OK) {
- grpc_slice* details = static_cast<grpc_slice*>(resp->status.details.arg);
- if (details != nullptr) {
- char* error_details = grpc_slice_to_c_string(*details);
- gpr_log(GPR_ERROR, "Error from handshaker service:%s", error_details);
- gpr_free(error_details);
- }
- }
- grpc_gcp_handshaker_resp_destroy(resp);
- cb(alts_tsi_utils_convert_to_tsi_result(code), user_data, bytes_to_send,
- bytes_to_send_size, result);
-}
-
namespace grpc_core {
namespace internal {
@@ -486,29 +445,16 @@ bool alts_tsi_handshaker_get_has_sent_start_message_for_testing(
return handshaker->has_sent_start_message;
}
-bool alts_tsi_handshaker_get_is_client_for_testing(
- alts_tsi_handshaker* handshaker) {
+void alts_tsi_handshaker_set_client_vtable_for_testing(
+ alts_tsi_handshaker* handshaker, alts_handshaker_client_vtable* vtable) {
GPR_ASSERT(handshaker != nullptr);
- return handshaker->is_client;
-}
-
-void alts_tsi_handshaker_set_recv_bytes_for_testing(
- alts_tsi_handshaker* handshaker, grpc_slice* slice) {
- GPR_ASSERT(handshaker != nullptr && slice != nullptr);
- handshaker->recv_bytes = grpc_slice_ref(*slice);
+ handshaker->client_vtable_for_testing = vtable;
}
-grpc_slice alts_tsi_handshaker_get_recv_bytes_for_testing(
+bool alts_tsi_handshaker_get_is_client_for_testing(
alts_tsi_handshaker* handshaker) {
GPR_ASSERT(handshaker != nullptr);
- return handshaker->recv_bytes;
-}
-
-void alts_tsi_handshaker_set_client_for_testing(
- alts_tsi_handshaker* handshaker, alts_handshaker_client* client) {
- GPR_ASSERT(handshaker != nullptr && client != nullptr);
- alts_handshaker_client_destroy(handshaker->client);
- handshaker->client = client;
+ return handshaker->is_client;
}
alts_handshaker_client* alts_tsi_handshaker_get_client_for_testing(
diff --git a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h
index 48ce69b1da..32f94bc9d3 100644
--- a/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h
+++ b/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h
@@ -25,7 +25,8 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h"
-#include "src/core/tsi/alts_transport_security.h"
+#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h"
+#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h"
#include "src/core/tsi/transport_security.h"
#include "src/core/tsi/transport_security_interface.h"
@@ -35,10 +36,6 @@
const size_t kTsiAltsNumOfPeerProperties = 3;
-/**
- * Main struct for ALTS TSI handshaker. All APIs in the header are
- * thread-comptabile.
- */
typedef struct alts_tsi_handshaker alts_tsi_handshaker;
/**
@@ -56,7 +53,9 @@ typedef struct alts_tsi_handshaker alts_tsi_handshaker;
* - self: address of ALTS TSI handshaker instance to be returned from the
* method.
*
- * It returns TSI_OK on success and an error status code on failure.
+ * It returns TSI_OK on success and an error status code on failure. Note that
+ * if interested_parties is nullptr, a dedicated TSI thread will be created and
+ * used.
*/
tsi_result alts_tsi_handshaker_create(
const grpc_alts_credentials_options* options, const char* target_name,
@@ -64,23 +63,32 @@ tsi_result alts_tsi_handshaker_create(
grpc_pollset_set* interested_parties, tsi_handshaker** self);
/**
- * This method handles handshaker response returned from ALTS handshaker
- * service.
+ * This method creates an ALTS TSI handshaker result instance.
*
- * - handshaker: ALTS TSI handshaker instance.
- * - recv_buffer: buffer holding data received from the handshaker service.
- * - status: status of the grpc call made to the handshaker service.
- * - details: error details of the grpc call made to the handshaker service.
- * - cb: callback function of ALTS TSI event.
- * - user_data: argument of callback function.
- * - is_ok: a boolean value indicating if the handshaker response is ok to read.
+ * - resp: data received from the handshaker service.
+ * - is_client: a boolean value indicating if the result belongs to a
+ * client or not.
+ * - result: address of ALTS TSI handshaker result instance.
+ */
+tsi_result alts_tsi_handshaker_result_create(grpc_gcp_handshaker_resp* resp,
+ bool is_client,
+ tsi_handshaker_result** result);
+
+/**
+ * This method sets unused bytes of ALTS TSI handshaker result instance.
*
+ * - result: an ALTS TSI handshaker result instance.
+ * - recv_bytes: data received from the handshaker service.
+ * - bytes_consumed: size of data consumed by the handshaker service.
+ */
+void alts_tsi_handshaker_result_set_unused_bytes(tsi_handshaker_result* result,
+ grpc_slice* recv_bytes,
+ size_t bytes_consumed);
+
+/**
+ * This method returns a boolean value indicating if an ALTS TSI handshaker
+ * has been shutdown or not.
*/
-void alts_tsi_handshaker_handle_response(alts_tsi_handshaker* handshaker,
- grpc_byte_buffer* recv_buffer,
- grpc_status_code status,
- grpc_slice* details,
- tsi_handshaker_on_next_done_cb cb,
- void* user_data, bool is_ok);
+bool alts_tsi_handshaker_has_shutdown(alts_tsi_handshaker* handshaker);
#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_H */
diff --git a/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h b/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h
index 9612071407..ec2616e95f 100644
--- a/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h
+++ b/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h
@@ -27,27 +27,55 @@ namespace grpc_core {
namespace internal {
/**
- * Unsafe, use for testing only. It allows the caller to change the way the
- * ALTS TSI handshaker schedules handshaker requests.
- */
-void alts_tsi_handshaker_set_client_for_testing(alts_tsi_handshaker* handshaker,
- alts_handshaker_client* client);
+ * Unsafe, use for testing only. */
alts_handshaker_client* alts_tsi_handshaker_get_client_for_testing(
alts_tsi_handshaker* handshaker);
-/* For testing only. */
bool alts_tsi_handshaker_get_has_sent_start_message_for_testing(
alts_tsi_handshaker* handshaker);
+void alts_tsi_handshaker_set_client_vtable_for_testing(
+ alts_tsi_handshaker* handshaker, alts_handshaker_client_vtable* vtable);
+
bool alts_tsi_handshaker_get_is_client_for_testing(
alts_tsi_handshaker* handshaker);
-void alts_tsi_handshaker_set_recv_bytes_for_testing(
- alts_tsi_handshaker* handshaker, grpc_slice* slice);
+void alts_handshaker_client_set_grpc_caller_for_testing(
+ alts_handshaker_client* client, alts_grpc_caller caller);
-grpc_slice alts_tsi_handshaker_get_recv_bytes_for_testing(
- alts_tsi_handshaker* handshaker);
+grpc_byte_buffer* alts_handshaker_client_get_send_buffer_for_testing(
+ alts_handshaker_client* client);
+
+grpc_byte_buffer** alts_handshaker_client_get_recv_buffer_addr_for_testing(
+ alts_handshaker_client* client);
+
+grpc_metadata_array* alts_handshaker_client_get_initial_metadata_for_testing(
+ alts_handshaker_client* client);
+
+void alts_handshaker_client_set_recv_bytes_for_testing(
+ alts_handshaker_client* client, grpc_slice* recv_bytes);
+
+void alts_handshaker_client_check_fields_for_testing(
+ alts_handshaker_client* client, tsi_handshaker_on_next_done_cb cb,
+ void* user_data, bool has_sent_start_message, grpc_slice* recv_bytes);
+
+void alts_handshaker_client_set_fields_for_testing(
+ alts_handshaker_client* client, alts_tsi_handshaker* handshaker,
+ tsi_handshaker_on_next_done_cb cb, void* user_data,
+ grpc_byte_buffer* recv_buffer, grpc_status_code status);
+
+void alts_handshaker_client_set_vtable_for_testing(
+ alts_handshaker_client* client, alts_handshaker_client_vtable* vtable);
+
+alts_tsi_handshaker* alts_handshaker_client_get_handshaker_for_testing(
+ alts_handshaker_client* client);
+
+void alts_handshaker_client_set_cb_for_testing(
+ alts_handshaker_client* client, tsi_handshaker_on_next_done_cb cb);
+
+grpc_closure* alts_handshaker_client_get_closure_for_testing(
+ alts_handshaker_client* client);
} // namespace internal
} // namespace grpc_core
diff --git a/src/core/tsi/alts_transport_security.cc b/src/core/tsi/alts_transport_security.cc
deleted file mode 100644
index dac23bbf7a..0000000000
--- a/src/core/tsi/alts_transport_security.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/tsi/alts_transport_security.h"
-
-#include <string.h>
-
-static alts_shared_resource g_alts_resource;
-
-alts_shared_resource* alts_get_shared_resource(void) {
- return &g_alts_resource;
-}
-
-static void grpc_tsi_alts_wait_for_cq_drain() {
- gpr_mu_lock(&g_alts_resource.mu);
- while (!g_alts_resource.is_cq_drained) {
- gpr_cv_wait(&g_alts_resource.cv, &g_alts_resource.mu,
- gpr_inf_future(GPR_CLOCK_REALTIME));
- }
- gpr_mu_unlock(&g_alts_resource.mu);
-}
-
-void grpc_tsi_alts_signal_for_cq_destroy() {
- gpr_mu_lock(&g_alts_resource.mu);
- g_alts_resource.is_cq_drained = true;
- gpr_cv_signal(&g_alts_resource.cv);
- gpr_mu_unlock(&g_alts_resource.mu);
-}
-
-void grpc_tsi_alts_init() {
- g_alts_resource.channel = nullptr;
- g_alts_resource.cq = nullptr;
- g_alts_resource.is_cq_drained = false;
- gpr_mu_init(&g_alts_resource.mu);
- gpr_cv_init(&g_alts_resource.cv);
-}
-
-void grpc_tsi_alts_shutdown() {
- if (g_alts_resource.cq != nullptr) {
- grpc_completion_queue_shutdown(g_alts_resource.cq);
- grpc_tsi_alts_wait_for_cq_drain();
- grpc_completion_queue_destroy(g_alts_resource.cq);
- grpc_channel_destroy(g_alts_resource.channel);
- g_alts_resource.thread.Join();
- }
- gpr_cv_destroy(&g_alts_resource.cv);
- gpr_mu_destroy(&g_alts_resource.mu);
-}
diff --git a/src/core/tsi/alts_transport_security.h b/src/core/tsi/alts_transport_security.h
deleted file mode 100644
index d6b8e11137..0000000000
--- a/src/core/tsi/alts_transport_security.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_TSI_ALTS_TRANSPORT_SECURITY_H
-#define GRPC_CORE_TSI_ALTS_TRANSPORT_SECURITY_H
-
-#include <grpc/support/port_platform.h>
-
-#include <grpc/grpc.h>
-#include <grpc/support/sync.h>
-
-#include "src/core/lib/gprpp/thd.h"
-
-typedef struct alts_shared_resource {
- grpc_core::Thread thread;
- grpc_channel* channel;
- grpc_completion_queue* cq;
- gpr_mu mu;
- gpr_cv cv;
- bool is_cq_drained;
-} alts_shared_resource;
-
-/* This method returns the address of alts_shared_resource object shared by all
- * TSI handshakes. */
-alts_shared_resource* alts_get_shared_resource(void);
-
-/* This method signals the thread that invokes grpc_tsi_alts_shutdown() to
- * continue with destroying the cq as a part of shutdown process. */
-
-void grpc_tsi_alts_signal_for_cq_destroy(void);
-
-#endif /* GRPC_CORE_TSI_ALTS_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/transport_security.cc b/src/core/tsi/transport_security.cc
index ca861b52de..078a917bba 100644
--- a/src/core/tsi/transport_security.cc
+++ b/src/core/tsi/transport_security.cc
@@ -213,10 +213,10 @@ tsi_result tsi_handshaker_next(
void tsi_handshaker_shutdown(tsi_handshaker* self) {
if (self == nullptr || self->vtable == nullptr) return;
- self->handshake_shutdown = true;
if (self->vtable->shutdown != nullptr) {
self->vtable->shutdown(self);
}
+ self->handshake_shutdown = true;
}
void tsi_handshaker_destroy(tsi_handshaker* self) {
diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc
index 2cab41b3f5..a31d0b30b1 100644
--- a/src/cpp/client/channel_cc.cc
+++ b/src/cpp/client/channel_cc.cc
@@ -33,6 +33,7 @@
#include <grpcpp/client_context.h>
#include <grpcpp/completion_queue.h>
#include <grpcpp/impl/call.h>
+#include <grpcpp/impl/codegen/call_op_set.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
#include <grpcpp/impl/grpc_library.h>
#include <grpcpp/impl/rpc_method.h>
@@ -53,14 +54,11 @@ namespace grpc {
static internal::GrpcLibraryInitializer g_gli_initializer;
Channel::Channel(
const grpc::string& host, grpc_channel* channel,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators)
: host_(host), c_channel_(channel) {
- auto* vector = interceptor_creators.release();
- if (vector != nullptr) {
- interceptor_creators_ = std::move(*vector);
- }
+ interceptor_creators_ = std::move(interceptor_creators);
g_gli_initializer.summon();
}
@@ -112,9 +110,10 @@ void ChannelResetConnectionBackoff(Channel* channel) {
} // namespace experimental
-internal::Call Channel::CreateCall(const internal::RpcMethod& method,
- ClientContext* context,
- CompletionQueue* cq) {
+internal::Call Channel::CreateCallInternal(const internal::RpcMethod& method,
+ ClientContext* context,
+ CompletionQueue* cq,
+ size_t interceptor_pos) {
const bool kRegistered = method.channel_tag() && context->authority().empty();
grpc_call* c_call = nullptr;
if (kRegistered) {
@@ -146,18 +145,28 @@ internal::Call Channel::CreateCall(const internal::RpcMethod& method,
}
}
grpc_census_call_set_context(c_call, context->census_context());
+
+ // ClientRpcInfo should be set before call because set_call also checks
+ // whether the call has been cancelled, and if the call was cancelled, we
+ // should notify the interceptors too/
+ auto* info =
+ context->set_client_rpc_info(method.name(), method.method_type(), this,
+ interceptor_creators_, interceptor_pos);
context->set_call(c_call, shared_from_this());
- return internal::Call(c_call, this, cq);
+
+ return internal::Call(c_call, this, cq, info);
+}
+
+internal::Call Channel::CreateCall(const internal::RpcMethod& method,
+ ClientContext* context,
+ CompletionQueue* cq) {
+ return CreateCallInternal(method, context, cq, 0);
}
void Channel::PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) {
- static const size_t MAX_OPS = 8;
- size_t nops = 0;
- grpc_op cops[MAX_OPS];
- ops->FillOps(call->call(), cops, &nops);
- GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call->call(), cops, nops,
- ops->cq_tag(), nullptr));
+ ops->FillOps(
+ call); // Make a copy of call. It's fine since Call just has pointers
}
void* Channel::RegisterMethod(const char* method) {
@@ -219,7 +228,7 @@ class ShutdownCallback : public grpc_experimental_completion_queue_functor {
static void Run(grpc_experimental_completion_queue_functor* cb, int) {
auto* callback = static_cast<ShutdownCallback*>(cb);
delete callback->cq_;
- grpc_core::Delete(callback);
+ delete callback;
}
private:
@@ -232,7 +241,7 @@ CompletionQueue* Channel::CallbackCQ() {
// if there is no explicit per-channel CQ registered
std::lock_guard<std::mutex> l(mu_);
if (callback_cq_ == nullptr) {
- auto* shutdown_callback = grpc_core::New<ShutdownCallback>();
+ auto* shutdown_callback = new ShutdownCallback;
callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
shutdown_callback});
diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc
index 07a04e4268..c9ea3e5f83 100644
--- a/src/cpp/client/client_context.cc
+++ b/src/cpp/client/client_context.cc
@@ -24,6 +24,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <grpcpp/impl/codegen/interceptor_common.h>
#include <grpcpp/impl/grpc_library.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/server_context.h>
@@ -40,9 +41,10 @@ class DefaultGlobalClientCallbacks final
};
static internal::GrpcLibraryInitializer g_gli_initializer;
-static DefaultGlobalClientCallbacks g_default_client_callbacks;
+static DefaultGlobalClientCallbacks* g_default_client_callbacks =
+ new DefaultGlobalClientCallbacks();
static ClientContext::GlobalCallbacks* g_client_callbacks =
- &g_default_client_callbacks;
+ g_default_client_callbacks;
ClientContext::ClientContext()
: initial_metadata_received_(false),
@@ -86,10 +88,13 @@ void ClientContext::set_call(grpc_call* call,
call_ = call;
channel_ = channel;
if (creds_ && !creds_->ApplyToCall(call_)) {
+ // TODO(yashykt): should interceptors also see this status?
+ SendCancelToInterceptors();
grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED,
"Failed to set credentials to rpc.", nullptr);
}
if (call_canceled_) {
+ SendCancelToInterceptors();
grpc_call_cancel(call_, nullptr);
}
}
@@ -110,12 +115,20 @@ void ClientContext::set_compression_algorithm(
void ClientContext::TryCancel() {
std::unique_lock<std::mutex> lock(mu_);
if (call_) {
+ SendCancelToInterceptors();
grpc_call_cancel(call_, nullptr);
} else {
call_canceled_ = true;
}
}
+void ClientContext::SendCancelToInterceptors() {
+ internal::CancelInterceptorBatchMethods cancel_methods;
+ for (size_t i = 0; i < rpc_info_.interceptors_.size(); i++) {
+ rpc_info_.RunInterceptor(&cancel_methods, i);
+ }
+}
+
grpc::string ClientContext::peer() const {
grpc::string peer;
if (call_) {
@@ -127,9 +140,9 @@ grpc::string ClientContext::peer() const {
}
void ClientContext::SetGlobalCallbacks(GlobalCallbacks* client_callbacks) {
- GPR_ASSERT(g_client_callbacks == &g_default_client_callbacks);
+ GPR_ASSERT(g_client_callbacks == g_default_client_callbacks);
GPR_ASSERT(client_callbacks != nullptr);
- GPR_ASSERT(client_callbacks != &g_default_client_callbacks);
+ GPR_ASSERT(client_callbacks != g_default_client_callbacks);
g_client_callbacks = client_callbacks;
}
diff --git a/src/cpp/client/client_interceptor.cc b/src/cpp/client/client_interceptor.cc
new file mode 100644
index 0000000000..3a5cac9830
--- /dev/null
+++ b/src/cpp/client/client_interceptor.cc
@@ -0,0 +1,34 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/client_interceptor.h>
+
+namespace grpc {
+
+namespace internal {
+experimental::ClientInterceptorFactoryInterface*
+ g_global_client_interceptor_factory = nullptr;
+}
+
+namespace experimental {
+void RegisterGlobalClientInterceptorFactory(
+ ClientInterceptorFactoryInterface* factory) {
+ internal::g_global_client_interceptor_factory = factory;
+}
+} // namespace experimental
+} // namespace grpc
diff --git a/src/cpp/client/create_channel.cc b/src/cpp/client/create_channel.cc
index efdff6c265..457daa674c 100644
--- a/src/cpp/client/create_channel.cc
+++ b/src/cpp/client/create_channel.cc
@@ -39,13 +39,14 @@ std::shared_ptr<Channel> CreateCustomChannel(
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args) {
GrpcLibraryCodegen init_lib; // We need to call init in case of a bad creds.
- return creds
- ? creds->CreateChannel(target, args)
- : CreateChannelInternal("",
- grpc_lame_client_channel_create(
- nullptr, GRPC_STATUS_INVALID_ARGUMENT,
- "Invalid credentials."),
- nullptr);
+ return creds ? creds->CreateChannel(target, args)
+ : CreateChannelInternal(
+ "",
+ grpc_lame_client_channel_create(
+ nullptr, GRPC_STATUS_INVALID_ARGUMENT,
+ "Invalid credentials."),
+ std::vector<std::unique_ptr<
+ experimental::ClientInterceptorFactoryInterface>>());
}
namespace experimental {
@@ -64,17 +65,18 @@ std::shared_ptr<Channel> CreateCustomChannelWithInterceptors(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) {
- return creds
- ? creds->CreateChannelWithInterceptors(
- target, args, std::move(interceptor_creators))
- : CreateChannelInternal("",
- grpc_lame_client_channel_create(
- nullptr, GRPC_STATUS_INVALID_ARGUMENT,
- "Invalid credentials."),
- nullptr);
+ return creds ? creds->CreateChannelWithInterceptors(
+ target, args, std::move(interceptor_creators))
+ : CreateChannelInternal(
+ "",
+ grpc_lame_client_channel_create(
+ nullptr, GRPC_STATUS_INVALID_ARGUMENT,
+ "Invalid credentials."),
+ std::vector<std::unique_ptr<
+ experimental::ClientInterceptorFactoryInterface>>());
}
} // namespace experimental
diff --git a/src/cpp/client/create_channel_internal.cc b/src/cpp/client/create_channel_internal.cc
index 313d682aae..a0efb97f7e 100644
--- a/src/cpp/client/create_channel_internal.cc
+++ b/src/cpp/client/create_channel_internal.cc
@@ -26,8 +26,8 @@ namespace grpc {
std::shared_ptr<Channel> CreateChannelInternal(
const grpc::string& host, grpc_channel* c_channel,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) {
return std::shared_ptr<Channel>(
new Channel(host, c_channel, std::move(interceptor_creators)));
diff --git a/src/cpp/client/create_channel_internal.h b/src/cpp/client/create_channel_internal.h
index 512fc22866..a90c92c518 100644
--- a/src/cpp/client/create_channel_internal.h
+++ b/src/cpp/client/create_channel_internal.h
@@ -31,8 +31,8 @@ class Channel;
std::shared_ptr<Channel> CreateChannelInternal(
const grpc::string& host, grpc_channel* c_channel,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators);
} // namespace grpc
diff --git a/src/cpp/client/create_channel_posix.cc b/src/cpp/client/create_channel_posix.cc
index 8d775e7a87..3affc1ef39 100644
--- a/src/cpp/client/create_channel_posix.cc
+++ b/src/cpp/client/create_channel_posix.cc
@@ -34,7 +34,8 @@ std::shared_ptr<Channel> CreateInsecureChannelFromFd(const grpc::string& target,
init_lib.init();
return CreateChannelInternal(
"", grpc_insecure_channel_create_from_fd(target.c_str(), fd, nullptr),
- nullptr);
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>());
}
std::shared_ptr<Channel> CreateCustomInsecureChannelFromFd(
@@ -46,15 +47,16 @@ std::shared_ptr<Channel> CreateCustomInsecureChannelFromFd(
return CreateChannelInternal(
"",
grpc_insecure_channel_create_from_fd(target.c_str(), fd, &channel_args),
- nullptr);
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>());
}
namespace experimental {
std::shared_ptr<Channel> CreateCustomInsecureChannelWithInterceptorsFromFd(
const grpc::string& target, int fd, const ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) {
internal::GrpcLibrary init_lib;
init_lib.init();
diff --git a/src/cpp/client/cronet_credentials.cc b/src/cpp/client/cronet_credentials.cc
index 09a76b428c..b2801764f2 100644
--- a/src/cpp/client/cronet_credentials.cc
+++ b/src/cpp/client/cronet_credentials.cc
@@ -31,7 +31,10 @@ class CronetChannelCredentialsImpl final : public ChannelCredentials {
std::shared_ptr<grpc::Channel> CreateChannel(
const string& target, const grpc::ChannelArguments& args) override {
- return CreateChannelWithInterceptors(target, args, nullptr);
+ return CreateChannelWithInterceptors(
+ target, args,
+ std::vector<std::unique_ptr<
+ experimental::ClientInterceptorFactoryInterface>>());
}
SecureChannelCredentials* AsSecureCredentials() override { return nullptr; }
@@ -39,8 +42,8 @@ class CronetChannelCredentialsImpl final : public ChannelCredentials {
private:
std::shared_ptr<grpc::Channel> CreateChannelWithInterceptors(
const string& target, const grpc::ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) override {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
diff --git a/src/cpp/client/generic_stub.cc b/src/cpp/client/generic_stub.cc
index 87902b26f0..f61c1b5317 100644
--- a/src/cpp/client/generic_stub.cc
+++ b/src/cpp/client/generic_stub.cc
@@ -72,4 +72,13 @@ void GenericStub::experimental_type::UnaryCall(
context, request, response, std::move(on_completion));
}
+void GenericStub::experimental_type::PrepareBidiStreamingCall(
+ ClientContext* context, const grpc::string& method,
+ experimental::ClientBidiReactor<ByteBuffer, ByteBuffer>* reactor) {
+ internal::ClientCallbackReaderWriterFactory<ByteBuffer, ByteBuffer>::Create(
+ stub_->channel_.get(),
+ internal::RpcMethod(method.c_str(), internal::RpcMethod::BIDI_STREAMING),
+ context, reactor);
+}
+
} // namespace grpc
diff --git a/src/cpp/client/insecure_credentials.cc b/src/cpp/client/insecure_credentials.cc
index b816e0c59a..241ce91803 100644
--- a/src/cpp/client/insecure_credentials.cc
+++ b/src/cpp/client/insecure_credentials.cc
@@ -32,13 +32,16 @@ class InsecureChannelCredentialsImpl final : public ChannelCredentials {
public:
std::shared_ptr<grpc::Channel> CreateChannel(
const string& target, const grpc::ChannelArguments& args) override {
- return CreateChannelWithInterceptors(target, args, nullptr);
+ return CreateChannelWithInterceptors(
+ target, args,
+ std::vector<std::unique_ptr<
+ experimental::ClientInterceptorFactoryInterface>>());
}
std::shared_ptr<grpc::Channel> CreateChannelWithInterceptors(
const string& target, const grpc::ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) override {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc
index d1cd78e755..d0abe441a6 100644
--- a/src/cpp/client/secure_credentials.cc
+++ b/src/cpp/client/secure_credentials.cc
@@ -36,14 +36,17 @@ SecureChannelCredentials::SecureChannelCredentials(
std::shared_ptr<grpc::Channel> SecureChannelCredentials::CreateChannel(
const string& target, const grpc::ChannelArguments& args) {
- return CreateChannelWithInterceptors(target, args, nullptr);
+ return CreateChannelWithInterceptors(
+ target, args,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>());
}
std::shared_ptr<grpc::Channel>
SecureChannelCredentials::CreateChannelWithInterceptors(
const string& target, const grpc::ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
@@ -228,9 +231,10 @@ int MetadataCredentialsPluginWrapper::GetMetadata(
}
if (w->plugin_->IsBlocking()) {
// Asynchronous return.
- w->thread_pool_->Add(
- std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context,
- cb, user_data, nullptr, nullptr, nullptr, nullptr));
+ w->thread_pool_->Add([w, context, cb, user_data] {
+ w->MetadataCredentialsPluginWrapper::InvokePlugin(
+ context, cb, user_data, nullptr, nullptr, nullptr, nullptr);
+ });
return 0;
} else {
// Synchronous return.
diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h
index bfb6e17ee9..613f1d6dc2 100644
--- a/src/cpp/client/secure_credentials.h
+++ b/src/cpp/client/secure_credentials.h
@@ -42,8 +42,8 @@ class SecureChannelCredentials final : public ChannelCredentials {
private:
std::shared_ptr<grpc::Channel> CreateChannelWithInterceptors(
const string& target, const grpc::ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) override;
grpc_channel_credentials* const c_creds_;
};
diff --git a/src/cpp/common/completion_queue_cc.cc b/src/cpp/common/completion_queue_cc.cc
index 6893201e2e..d93a54aed7 100644
--- a/src/cpp/common/completion_queue_cc.cc
+++ b/src/cpp/common/completion_queue_cc.cc
@@ -60,10 +60,10 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
case GRPC_QUEUE_SHUTDOWN:
return SHUTDOWN;
case GRPC_OP_COMPLETE:
- auto cq_tag = static_cast<internal::CompletionQueueTag*>(ev.tag);
+ auto core_cq_tag = static_cast<internal::CompletionQueueTag*>(ev.tag);
*ok = ev.success != 0;
- *tag = cq_tag;
- if (cq_tag->FinalizeResult(tag, ok)) {
+ *tag = core_cq_tag;
+ if (core_cq_tag->FinalizeResult(tag, ok)) {
return GOT_EVENT;
}
break;
@@ -87,9 +87,9 @@ bool CompletionQueue::CompletionQueueTLSCache::Flush(void** tag, bool* ok) {
flushed_ = true;
if (grpc_completion_queue_thread_local_cache_flush(cq_->cq_, &res_tag,
&res)) {
- auto cq_tag = static_cast<internal::CompletionQueueTag*>(res_tag);
+ auto core_cq_tag = static_cast<internal::CompletionQueueTag*>(res_tag);
*ok = res == 1;
- if (cq_tag->FinalizeResult(tag, ok)) {
+ if (core_cq_tag->FinalizeResult(tag, ok)) {
return true;
}
}
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index 619aacadaa..cfaa2e7b19 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -102,6 +102,13 @@ size_t CoreCodegen::grpc_byte_buffer_length(grpc_byte_buffer* bb) {
return ::grpc_byte_buffer_length(bb);
}
+grpc_call_error CoreCodegen::grpc_call_start_batch(grpc_call* call,
+ const grpc_op* ops,
+ size_t nops, void* tag,
+ void* reserved) {
+ return ::grpc_call_start_batch(call, ops, nops, tag, reserved);
+}
+
grpc_call_error CoreCodegen::grpc_call_cancel_with_status(
grpc_call* call, grpc_status_code status, const char* description,
void* reserved) {
diff --git a/src/cpp/common/version_cc.cc b/src/cpp/common/version_cc.cc
index 8abd45efb7..55da89e6c8 100644
--- a/src/cpp/common/version_cc.cc
+++ b/src/cpp/common/version_cc.cc
@@ -22,5 +22,5 @@
#include <grpcpp/grpcpp.h>
namespace grpc {
-grpc::string Version() { return "1.17.0-dev"; }
+grpc::string Version() { return "1.18.0-dev"; }
} // namespace grpc
diff --git a/src/cpp/ext/filters/census/context.cc b/src/cpp/ext/filters/census/context.cc
index 4b3250236d..78fc69a805 100644
--- a/src/cpp/ext/filters/census/context.cc
+++ b/src/cpp/ext/filters/census/context.cc
@@ -29,9 +29,15 @@ void GenerateServerContext(absl::string_view tracing, absl::string_view stats,
absl::string_view primary_role,
absl::string_view method, CensusContext* context) {
GrpcTraceContext trace_ctxt;
- TraceContextEncoding::Decode(tracing, &trace_ctxt);
- SpanContext parent_ctx = trace_ctxt.ToSpanContext();
- new (context) CensusContext(method, parent_ctx);
+ if (TraceContextEncoding::Decode(tracing, &trace_ctxt) !=
+ TraceContextEncoding::kEncodeDecodeFailure) {
+ SpanContext parent_ctx = trace_ctxt.ToSpanContext();
+ if (parent_ctx.IsValid()) {
+ new (context) CensusContext(method, parent_ctx);
+ return;
+ }
+ }
+ new (context) CensusContext(method);
}
void GenerateClientContext(absl::string_view method, CensusContext* ctxt,
diff --git a/src/cpp/server/channelz/channelz_service.cc b/src/cpp/server/channelz/channelz_service.cc
index 428893f277..9ecb9de7e4 100644
--- a/src/cpp/server/channelz/channelz_service.cc
+++ b/src/cpp/server/channelz/channelz_service.cc
@@ -59,6 +59,23 @@ Status ChannelzService::GetServers(
return Status::OK;
}
+Status ChannelzService::GetServer(ServerContext* unused,
+ const channelz::v1::GetServerRequest* request,
+ channelz::v1::GetServerResponse* response) {
+ char* json_str = grpc_channelz_get_server(request->server_id());
+ if (json_str == nullptr) {
+ return Status(StatusCode::INTERNAL,
+ "grpc_channelz_get_server returned null");
+ }
+ grpc::protobuf::util::Status s =
+ grpc::protobuf::json::JsonStringToMessage(json_str, response);
+ gpr_free(json_str);
+ if (!s.ok()) {
+ return Status(StatusCode::INTERNAL, s.ToString());
+ }
+ return Status::OK;
+}
+
Status ChannelzService::GetServerSockets(
ServerContext* unused, const channelz::v1::GetServerSocketsRequest* request,
channelz::v1::GetServerSocketsResponse* response) {
diff --git a/src/cpp/server/channelz/channelz_service.h b/src/cpp/server/channelz/channelz_service.h
index 590b5d492e..b4a66ba1c6 100644
--- a/src/cpp/server/channelz/channelz_service.h
+++ b/src/cpp/server/channelz/channelz_service.h
@@ -36,6 +36,10 @@ class ChannelzService final : public channelz::v1::Channelz::Service {
Status GetServers(ServerContext* unused,
const channelz::v1::GetServersRequest* request,
channelz::v1::GetServersResponse* response) override;
+ // implementation of GetServer rpc
+ Status GetServer(ServerContext* unused,
+ const channelz::v1::GetServerRequest* request,
+ channelz::v1::GetServerResponse* response) override;
// implementation of GetServerSockets rpc
Status GetServerSockets(
ServerContext* unused,
diff --git a/src/cpp/server/health/default_health_check_service.cc b/src/cpp/server/health/default_health_check_service.cc
index c951c69d51..44aebd2f9d 100644
--- a/src/cpp/server/health/default_health_check_service.cc
+++ b/src/cpp/server/health/default_health_check_service.cc
@@ -42,18 +42,37 @@ DefaultHealthCheckService::DefaultHealthCheckService() {
void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) {
std::unique_lock<std::mutex> lock(mu_);
+ if (shutdown_) {
+ // Set to NOT_SERVING in case service_name is not in the map.
+ serving = false;
+ }
services_map_[service_name].SetServingStatus(serving ? SERVING : NOT_SERVING);
}
void DefaultHealthCheckService::SetServingStatus(bool serving) {
const ServingStatus status = serving ? SERVING : NOT_SERVING;
std::unique_lock<std::mutex> lock(mu_);
+ if (shutdown_) {
+ return;
+ }
for (auto& p : services_map_) {
ServiceData& service_data = p.second;
service_data.SetServingStatus(status);
}
}
+void DefaultHealthCheckService::Shutdown() {
+ std::unique_lock<std::mutex> lock(mu_);
+ if (shutdown_) {
+ return;
+ }
+ shutdown_ = true;
+ for (auto& p : services_map_) {
+ ServiceData& service_data = p.second;
+ service_data.SetServingStatus(NOT_SERVING);
+ }
+}
+
DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const {
diff --git a/src/cpp/server/health/default_health_check_service.h b/src/cpp/server/health/default_health_check_service.h
index 450bd543f5..9551cd2e2c 100644
--- a/src/cpp/server/health/default_health_check_service.h
+++ b/src/cpp/server/health/default_health_check_service.h
@@ -237,6 +237,8 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
bool serving) override;
void SetServingStatus(bool serving) override;
+ void Shutdown() override;
+
ServingStatus GetServingStatus(const grpc::string& service_name) const;
HealthCheckServiceImpl* GetHealthCheckService(
@@ -272,6 +274,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler);
mutable std::mutex mu_;
+ bool shutdown_ = false; // Guarded by mu_.
std::map<grpc::string, ServiceData> services_map_; // Guarded by mu_.
std::unique_ptr<HealthCheckServiceImpl> impl_;
};
diff --git a/src/cpp/server/secure_server_credentials.cc b/src/cpp/server/secure_server_credentials.cc
index 536bf022dd..ebb17def32 100644
--- a/src/cpp/server/secure_server_credentials.cc
+++ b/src/cpp/server/secure_server_credentials.cc
@@ -43,9 +43,10 @@ void AuthMetadataProcessorAyncWrapper::Process(
return;
}
if (w->processor_->IsBlocking()) {
- w->thread_pool_->Add(
- std::bind(&AuthMetadataProcessorAyncWrapper::InvokeProcessor, w,
- context, md, num_md, cb, user_data));
+ w->thread_pool_->Add([w, context, md, num_md, cb, user_data] {
+ w->AuthMetadataProcessorAyncWrapper::InvokeProcessor(context, md, num_md,
+ cb, user_data);
+ });
} else {
// invoke directly.
w->InvokeProcessor(context, md, num_md, cb, user_data);
diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc
index 8417c45e64..0dc03b6876 100644
--- a/src/cpp/server/server_builder.cc
+++ b/src/cpp/server/server_builder.cc
@@ -71,7 +71,9 @@ ServerBuilder::~ServerBuilder() {
std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
bool is_frequently_polled) {
ServerCompletionQueue* cq = new ServerCompletionQueue(
- is_frequently_polled ? GRPC_CQ_DEFAULT_POLLING : GRPC_CQ_NON_LISTENING);
+ GRPC_CQ_NEXT,
+ is_frequently_polled ? GRPC_CQ_DEFAULT_POLLING : GRPC_CQ_NON_LISTENING,
+ nullptr);
cqs_.push_back(cq);
return std::unique_ptr<ServerCompletionQueue>(cq);
}
@@ -256,14 +258,22 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
// Create completion queues to listen to incoming rpc requests
for (int i = 0; i < sync_server_settings_.num_cqs; i++) {
- sync_server_cqs->emplace_back(new ServerCompletionQueue(polling_type));
+ sync_server_cqs->emplace_back(
+ new ServerCompletionQueue(GRPC_CQ_NEXT, polling_type, nullptr));
}
}
- std::unique_ptr<Server> server(new Server(
- max_receive_message_size_, &args, sync_server_cqs,
- sync_server_settings_.min_pollers, sync_server_settings_.max_pollers,
- sync_server_settings_.cq_timeout_msec, resource_quota_));
+ // == Determine if the server has any callback methods ==
+ bool has_callback_methods = false;
+ for (auto it = services_.begin(); it != services_.end(); ++it) {
+ if ((*it)->service->has_callback_methods()) {
+ has_callback_methods = true;
+ break;
+ }
+ }
+
+ // TODO(vjpai): Add a section here for plugins once they can support callback
+ // methods
if (has_sync_methods) {
// This is a Sync server
@@ -275,6 +285,16 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
sync_server_settings_.cq_timeout_msec);
}
+ if (has_callback_methods) {
+ gpr_log(GPR_INFO, "Callback server.");
+ }
+
+ std::unique_ptr<Server> server(new Server(
+ max_receive_message_size_, &args, sync_server_cqs,
+ sync_server_settings_.min_pollers, sync_server_settings_.max_pollers,
+ sync_server_settings_.cq_timeout_msec, resource_quota_,
+ std::move(interceptor_creators_)));
+
ServerInitializer* initializer = server->initializer();
// Register all the completion queues with the server. i.e
@@ -288,6 +308,12 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
num_frequently_polled_cqs++;
}
+ if (has_callback_methods) {
+ auto* cq = server->CallbackCQ();
+ grpc_server_register_completion_queue(server->server_, cq->cq(), nullptr);
+ num_frequently_polled_cqs++;
+ }
+
// cqs_ contains the completion queue added by calling the ServerBuilder's
// AddCompletionQueue() API. Some of them may not be frequently polled (i.e by
// calling Next() or AsyncNext()) and hence are not safe to be used for
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 7aeddff643..1e3c57446f 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -27,7 +27,9 @@
#include <grpcpp/completion_queue.h>
#include <grpcpp/generic/async_generic_service.h>
#include <grpcpp/impl/codegen/async_unary_call.h>
+#include <grpcpp/impl/codegen/call.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
+#include <grpcpp/impl/codegen/server_interceptor.h>
#include <grpcpp/impl/grpc_library.h>
#include <grpcpp/impl/method_handler_impl.h>
#include <grpcpp/impl/rpc_service_method.h>
@@ -38,8 +40,10 @@
#include <grpcpp/support/time.h>
#include "src/core/ext/transport/inproc/inproc_transport.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/completion_queue.h"
#include "src/cpp/client/create_channel_internal.h"
#include "src/cpp/server/health/default_health_check_service.h"
#include "src/cpp/thread_manager/thread_manager.h"
@@ -54,6 +58,9 @@ namespace {
// max-threads set) to the server builder.
#define DEFAULT_MAX_SYNC_SERVER_THREADS INT_MAX
+// How many callback requests of each method should we pre-register at start
+#define DEFAULT_CALLBACK_REQS_PER_METHOD 32
+
class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
public:
~DefaultGlobalCallbacks() override {}
@@ -77,10 +84,7 @@ class ShutdownTag : public internal::CompletionQueueTag {
class DummyTag : public internal::CompletionQueueTag {
public:
- bool FinalizeResult(void** tag, bool* status) {
- *status = true;
- return true;
- }
+ bool FinalizeResult(void** tag, bool* status) { return true; }
};
class UnimplementedAsyncRequestContext {
@@ -127,10 +131,13 @@ class Server::UnimplementedAsyncResponse final
~UnimplementedAsyncResponse() { delete request_; }
bool FinalizeResult(void** tag, bool* status) override {
- internal::CallOpSet<
- internal::CallOpSendInitialMetadata,
- internal::CallOpServerSendStatus>::FinalizeResult(tag, status);
- delete this;
+ if (internal::CallOpSet<
+ internal::CallOpSendInitialMetadata,
+ internal::CallOpServerSendStatus>::FinalizeResult(tag, status)) {
+ delete this;
+ } else {
+ // The tag was swallowed due to interception. We will see it again.
+ }
return false;
}
@@ -140,9 +147,9 @@ class Server::UnimplementedAsyncResponse final
class Server::SyncRequest final : public internal::CompletionQueueTag {
public:
- SyncRequest(internal::RpcServiceMethod* method, void* tag)
+ SyncRequest(internal::RpcServiceMethod* method, void* method_tag)
: method_(method),
- tag_(tag),
+ method_tag_(method_tag),
in_flight_(false),
has_request_payload_(
method->method_type() == internal::RpcMethod::NORMAL_RPC ||
@@ -169,10 +176,10 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
void Request(grpc_server* server, grpc_completion_queue* notify_cq) {
GPR_ASSERT(cq_ && !in_flight_);
in_flight_ = true;
- if (tag_) {
+ if (method_tag_) {
if (GRPC_CALL_OK !=
grpc_server_request_registered_call(
- server, tag_, &call_, &deadline_, &request_metadata_,
+ server, method_tag_, &call_, &deadline_, &request_metadata_,
has_request_payload_ ? &request_payload_ : nullptr, cq_,
notify_cq, this)) {
TeardownRequest();
@@ -192,9 +199,21 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
}
}
+ void PostShutdownCleanup() {
+ if (call_) {
+ grpc_call_unref(call_);
+ call_ = nullptr;
+ }
+ if (cq_) {
+ grpc_completion_queue_destroy(cq_);
+ cq_ = nullptr;
+ }
+ }
+
bool FinalizeResult(void** tag, bool* status) override {
if (!*status) {
grpc_completion_queue_destroy(cq_);
+ cq_ = nullptr;
}
if (call_details_) {
deadline_ = call_details_->deadline;
@@ -204,17 +223,26 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
return true;
}
+ // The CallData class represents a call that is "active" as opposed
+ // to just being requested. It wraps and takes ownership of the cq from
+ // the call request
class CallData final {
public:
explicit CallData(Server* server, SyncRequest* mrd)
: cq_(mrd->cq_),
- call_(mrd->call_, server, &cq_, server->max_receive_message_size()),
ctx_(mrd->deadline_, &mrd->request_metadata_),
has_request_payload_(mrd->has_request_payload_),
request_payload_(has_request_payload_ ? mrd->request_payload_
: nullptr),
+ request_(nullptr),
method_(mrd->method_),
- server_(server) {
+ call_(
+ mrd->call_, server, &cq_, server->max_receive_message_size(),
+ ctx_.set_server_rpc_info(method_->name(), method_->method_type(),
+ server->interceptor_creators_)),
+ server_(server),
+ global_callbacks_(nullptr),
+ resources_(false) {
ctx_.set_call(mrd->call_);
ctx_.cq_ = &cq_;
GPR_ASSERT(mrd->in_flight_);
@@ -230,38 +258,79 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
void Run(const std::shared_ptr<GlobalCallbacks>& global_callbacks,
bool resources) {
- ctx_.BeginCompletionOp(&call_);
- global_callbacks->PreSynchronousRequest(&ctx_);
- auto* handler = resources ? method_->handler()
- : server_->resource_exhausted_handler_.get();
- handler->RunHandler(internal::MethodHandler::HandlerParameter(
- &call_, &ctx_, request_payload_));
- global_callbacks->PostSynchronousRequest(&ctx_);
- request_payload_ = nullptr;
-
- cq_.Shutdown();
+ global_callbacks_ = global_callbacks;
+ resources_ = resources;
+
+ interceptor_methods_.SetCall(&call_);
+ interceptor_methods_.SetReverse();
+ // Set interception point for RECV INITIAL METADATA
+ interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
+ interceptor_methods_.SetRecvInitialMetadata(&ctx_.client_metadata_);
+
+ if (has_request_payload_) {
+ // Set interception point for RECV MESSAGE
+ auto* handler = resources_ ? method_->handler()
+ : server_->resource_exhausted_handler_.get();
+ request_ = handler->Deserialize(call_.call(), request_payload_,
+ &request_status_);
+
+ request_payload_ = nullptr;
+ interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
+ interceptor_methods_.SetRecvMessage(request_);
+ }
- internal::CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
- cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
+ if (interceptor_methods_.RunInterceptors(
+ [this]() { ContinueRunAfterInterception(); })) {
+ ContinueRunAfterInterception();
+ } else {
+ // There were interceptors to be run, so ContinueRunAfterInterception
+ // will be run when interceptors are done.
+ }
+ }
- /* Ensure the cq_ is shutdown */
- DummyTag ignored_tag;
- GPR_ASSERT(cq_.Pluck(&ignored_tag) == false);
+ void ContinueRunAfterInterception() {
+ {
+ ctx_.BeginCompletionOp(&call_, nullptr, nullptr);
+ global_callbacks_->PreSynchronousRequest(&ctx_);
+ auto* handler = resources_ ? method_->handler()
+ : server_->resource_exhausted_handler_.get();
+ handler->RunHandler(internal::MethodHandler::HandlerParameter(
+ &call_, &ctx_, request_, request_status_, nullptr));
+ request_ = nullptr;
+ global_callbacks_->PostSynchronousRequest(&ctx_);
+
+ cq_.Shutdown();
+
+ internal::CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
+ cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
+
+ /* Ensure the cq_ is shutdown */
+ DummyTag ignored_tag;
+ GPR_ASSERT(cq_.Pluck(&ignored_tag) == false);
+ }
+ delete this;
}
private:
CompletionQueue cq_;
- internal::Call call_;
ServerContext ctx_;
const bool has_request_payload_;
grpc_byte_buffer* request_payload_;
+ void* request_;
+ Status request_status_;
internal::RpcServiceMethod* const method_;
+ internal::Call call_;
Server* server_;
+ std::shared_ptr<GlobalCallbacks> global_callbacks_;
+ bool resources_;
+ internal::InterceptorBatchMethodsImpl interceptor_methods_;
};
private:
internal::RpcServiceMethod* const method_;
- void* const tag_;
+ void* const method_tag_;
bool in_flight_;
const bool has_request_payload_;
grpc_call* call_;
@@ -272,6 +341,176 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
grpc_completion_queue* cq_;
};
+class Server::CallbackRequest final : public internal::CompletionQueueTag {
+ public:
+ CallbackRequest(Server* server, internal::RpcServiceMethod* method,
+ void* method_tag)
+ : server_(server),
+ method_(method),
+ method_tag_(method_tag),
+ has_request_payload_(
+ method->method_type() == internal::RpcMethod::NORMAL_RPC ||
+ method->method_type() == internal::RpcMethod::SERVER_STREAMING),
+ cq_(server->CallbackCQ()),
+ tag_(this) {
+ Setup();
+ }
+
+ ~CallbackRequest() { Clear(); }
+
+ void Request() {
+ if (method_tag_) {
+ if (GRPC_CALL_OK !=
+ grpc_server_request_registered_call(
+ server_->c_server(), method_tag_, &call_, &deadline_,
+ &request_metadata_,
+ has_request_payload_ ? &request_payload_ : nullptr, cq_->cq(),
+ cq_->cq(), static_cast<void*>(&tag_))) {
+ return;
+ }
+ } else {
+ if (!call_details_) {
+ call_details_ = new grpc_call_details;
+ grpc_call_details_init(call_details_);
+ }
+ if (grpc_server_request_call(server_->c_server(), &call_, call_details_,
+ &request_metadata_, cq_->cq(), cq_->cq(),
+ static_cast<void*>(&tag_)) != GRPC_CALL_OK) {
+ return;
+ }
+ }
+ }
+
+ bool FinalizeResult(void** tag, bool* status) override { return false; }
+
+ private:
+ class CallbackCallTag : public grpc_experimental_completion_queue_functor {
+ public:
+ CallbackCallTag(Server::CallbackRequest* req) : req_(req) {
+ functor_run = &CallbackCallTag::StaticRun;
+ }
+
+ // force_run can not be performed on a tag if operations using this tag
+ // have been sent to PerformOpsOnCall. It is intended for error conditions
+ // that are detected before the operations are internally processed.
+ void force_run(bool ok) { Run(ok); }
+
+ private:
+ Server::CallbackRequest* req_;
+ internal::Call* call_;
+
+ static void StaticRun(grpc_experimental_completion_queue_functor* cb,
+ int ok) {
+ static_cast<CallbackCallTag*>(cb)->Run(static_cast<bool>(ok));
+ }
+ void Run(bool ok) {
+ void* ignored = req_;
+ bool new_ok = ok;
+ GPR_ASSERT(!req_->FinalizeResult(&ignored, &new_ok));
+ GPR_ASSERT(ignored == req_);
+
+ if (!ok) {
+ // The call has been shutdown
+ req_->Clear();
+ return;
+ }
+
+ // Bind the call, deadline, and metadata from what we got
+ req_->ctx_.set_call(req_->call_);
+ req_->ctx_.cq_ = req_->cq_;
+ req_->ctx_.BindDeadlineAndMetadata(req_->deadline_,
+ &req_->request_metadata_);
+ req_->request_metadata_.count = 0;
+
+ // Create a C++ Call to control the underlying core call
+ call_ = new (grpc_call_arena_alloc(req_->call_, sizeof(internal::Call)))
+ internal::Call(
+ req_->call_, req_->server_, req_->cq_,
+ req_->server_->max_receive_message_size(),
+ req_->ctx_.set_server_rpc_info(
+ req_->method_->name(), req_->method_->method_type(),
+ req_->server_->interceptor_creators_));
+
+ req_->interceptor_methods_.SetCall(call_);
+ req_->interceptor_methods_.SetReverse();
+ // Set interception point for RECV INITIAL METADATA
+ req_->interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
+ req_->interceptor_methods_.SetRecvInitialMetadata(
+ &req_->ctx_.client_metadata_);
+
+ if (req_->has_request_payload_) {
+ // Set interception point for RECV MESSAGE
+ req_->request_ = req_->method_->handler()->Deserialize(
+ req_->call_, req_->request_payload_, &req_->request_status_);
+ req_->request_payload_ = nullptr;
+ req_->interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
+ req_->interceptor_methods_.SetRecvMessage(req_->request_);
+ }
+
+ if (req_->interceptor_methods_.RunInterceptors(
+ [this] { ContinueRunAfterInterception(); })) {
+ ContinueRunAfterInterception();
+ } else {
+ // There were interceptors to be run, so ContinueRunAfterInterception
+ // will be run when interceptors are done.
+ }
+ }
+ void ContinueRunAfterInterception() {
+ req_->method_->handler()->RunHandler(
+ internal::MethodHandler::HandlerParameter(
+ call_, &req_->ctx_, req_->request_, req_->request_status_,
+ [this] {
+ req_->Reset();
+ req_->Request();
+ }));
+ }
+ };
+
+ void Reset() {
+ Clear();
+ Setup();
+ }
+
+ void Clear() {
+ if (call_details_) {
+ delete call_details_;
+ call_details_ = nullptr;
+ }
+ grpc_metadata_array_destroy(&request_metadata_);
+ if (has_request_payload_ && request_payload_) {
+ grpc_byte_buffer_destroy(request_payload_);
+ }
+ ctx_.Clear();
+ interceptor_methods_.ClearState();
+ }
+
+ void Setup() {
+ grpc_metadata_array_init(&request_metadata_);
+ ctx_.Setup(gpr_inf_future(GPR_CLOCK_REALTIME));
+ request_payload_ = nullptr;
+ request_ = nullptr;
+ request_status_ = Status();
+ }
+
+ Server* const server_;
+ internal::RpcServiceMethod* const method_;
+ void* const method_tag_;
+ const bool has_request_payload_;
+ grpc_byte_buffer* request_payload_;
+ void* request_;
+ Status request_status_;
+ grpc_call_details* call_details_ = nullptr;
+ grpc_call* call_;
+ gpr_timespec deadline_;
+ grpc_metadata_array request_metadata_;
+ CompletionQueue* cq_;
+ CallbackCallTag tag_;
+ ServerContext ctx_;
+ internal::InterceptorBatchMethodsImpl interceptor_methods_;
+};
+
// Implementation of ThreadManager. Each instance of SyncRequestThreadManager
// manages a pool of threads that poll for incoming Sync RPCs and call the
// appropriate RPC handlers
@@ -318,8 +557,9 @@ class Server::SyncRequestThreadManager : public ThreadManager {
}
if (ok) {
- // Calldata takes ownership of the completion queue inside sync_req
- SyncRequest::CallData cd(server_, sync_req);
+ // Calldata takes ownership of the completion queue and interceptors
+ // inside sync_req
+ auto* cd = new SyncRequest::CallData(server_, sync_req);
// Prepare for the next request
if (!IsShutdown()) {
sync_req->SetupRequest(); // Create new completion queue for sync_req
@@ -327,7 +567,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
}
GPR_TIMER_SCOPE("cd.Run()", 0);
- cd.Run(global_callbacks_, resources);
+ cd->Run(global_callbacks_, resources);
}
// TODO (sreek) If ok is false here (which it isn't in case of
// grpc_request_registered_call), we should still re-queue the request
@@ -359,7 +599,17 @@ class Server::SyncRequestThreadManager : public ThreadManager {
void* tag;
bool ok;
while (server_cq_->Next(&tag, &ok)) {
- // Do nothing
+ if (ok) {
+ // If a request was pulled off the queue, it means that the thread
+ // handling the request added it to the completion queue after shutdown
+ // was called - because the thread had already started and checked the
+ // shutdown flag before shutdown was called. In this case, we simply
+ // clean it up here, *after* calling wait on all the worker threads, at
+ // which point we are certain no in-flight requests will add more to the
+ // queue. This fixes an intermittent memory leak on shutdown.
+ SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
+ sync_req->PostShutdownCleanup();
+ }
}
}
@@ -389,8 +639,12 @@ Server::Server(
std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
sync_server_cqs,
int min_pollers, int max_pollers, int sync_cq_timeout_msec,
- grpc_resource_quota* server_rq)
- : max_receive_message_size_(max_receive_message_size),
+ grpc_resource_quota* server_rq,
+ std::vector<
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ interceptor_creators)
+ : interceptor_creators_(std::move(interceptor_creators)),
+ max_receive_message_size_(max_receive_message_size),
sync_server_cqs_(std::move(sync_server_cqs)),
started_(false),
shutdown_(false),
@@ -446,6 +700,9 @@ Server::Server(
Server::~Server() {
{
std::unique_lock<std::mutex> lock(mu_);
+ if (callback_cq_ != nullptr) {
+ callback_cq_->Shutdown();
+ }
if (started_ && !shutdown_) {
lock.unlock();
Shutdown();
@@ -473,14 +730,15 @@ std::shared_ptr<Channel> Server::InProcessChannel(
grpc_channel_args channel_args = args.c_channel_args();
return CreateChannelInternal(
"inproc", grpc_inproc_channel_create(server_, &channel_args, nullptr),
- nullptr);
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>());
}
std::shared_ptr<Channel>
Server::experimental_type::InProcessChannelWithInterceptors(
const ChannelArguments& args,
- std::unique_ptr<std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>>
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
interceptor_creators) {
grpc_channel_args channel_args = args.c_channel_args();
return CreateChannelInternal(
@@ -518,21 +776,31 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
}
internal::RpcServiceMethod* method = it->get();
- void* tag = grpc_server_register_method(
+ void* method_registration_tag = grpc_server_register_method(
server_, method->name(), host ? host->c_str() : nullptr,
PayloadHandlingForMethod(method), 0);
- if (tag == nullptr) {
+ if (method_registration_tag == nullptr) {
gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
method->name());
return false;
}
- if (method->handler() == nullptr) { // Async method
- method->set_server_tag(tag);
- } else {
+ if (method->handler() == nullptr) { // Async method without handler
+ method->set_server_tag(method_registration_tag);
+ } else if (method->api_type() ==
+ internal::RpcServiceMethod::ApiType::SYNC) {
for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
- (*it)->AddSyncMethod(method, tag);
+ (*it)->AddSyncMethod(method, method_registration_tag);
+ }
+ } else {
+ // a callback method. Register at least some callback requests
+ // TODO(vjpai): Register these dynamically based on need
+ for (int i = 0; i < DEFAULT_CALLBACK_REQS_PER_METHOD; i++) {
+ auto* req = new CallbackRequest(this, method, method_registration_tag);
+ callback_reqs_.emplace_back(req);
}
+ // Enqueue it so that it will be Request'ed later once
+ // all request matchers are created at core server startup
}
method_name = method->name();
@@ -583,7 +851,8 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
// performance. This ensures that we don't introduce thread hops
// for application requests that wind up on this CQ, which is polled
// in its own thread.
- health_check_cq = new ServerCompletionQueue(GRPC_CQ_NON_POLLING);
+ health_check_cq =
+ new ServerCompletionQueue(GRPC_CQ_NEXT, GRPC_CQ_NON_POLLING, nullptr);
grpc_server_register_completion_queue(server_, health_check_cq->cq(),
nullptr);
default_health_check_service_impl =
@@ -620,6 +889,10 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
(*it)->Start();
}
+ for (auto& cbreq : callback_reqs_) {
+ cbreq->Request();
+ }
+
if (default_health_check_service_impl != nullptr) {
default_health_check_service_impl->StartServingThread();
}
@@ -681,31 +954,27 @@ void Server::Wait() {
void Server::PerformOpsOnCall(internal::CallOpSetInterface* ops,
internal::Call* call) {
- static const size_t MAX_OPS = 8;
- size_t nops = 0;
- grpc_op cops[MAX_OPS];
- ops->FillOps(call->call(), cops, &nops);
- auto result =
- grpc_call_start_batch(call->call(), cops, nops, ops->cq_tag(), nullptr);
- if (result != GRPC_CALL_OK) {
- gpr_log(GPR_ERROR, "Fatal: grpc_call_start_batch returned %d", result);
- grpc_call_log_batch(__FILE__, __LINE__, GPR_LOG_SEVERITY_ERROR,
- call->call(), cops, nops, ops);
- abort();
- }
+ ops->FillOps(call);
}
ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
ServerInterface* server, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
- void* tag, bool delete_on_finalize)
+ ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
: server_(server),
context_(context),
stream_(stream),
call_cq_(call_cq),
+ notification_cq_(notification_cq),
tag_(tag),
delete_on_finalize_(delete_on_finalize),
- call_(nullptr) {
+ call_(nullptr),
+ done_intercepting_(false) {
+ /* Set up interception state partially for the receive ops. call_wrapper_ is
+ * not filled at this point, but it will be filled before the interceptors are
+ * run. */
+ interceptor_methods_.SetCall(&call_wrapper_);
+ interceptor_methods_.SetReverse();
call_cq_->RegisterAvalanching(); // This op will trigger more ops
}
@@ -715,15 +984,43 @@ ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
bool* status) {
+ if (done_intercepting_) {
+ *tag = tag_;
+ if (delete_on_finalize_) {
+ delete this;
+ }
+ return true;
+ }
context_->set_call(call_);
context_->cq_ = call_cq_;
- internal::Call call(call_, server_, call_cq_,
- server_->max_receive_message_size());
- if (*status && call_) {
- context_->BeginCompletionOp(&call);
+ if (call_wrapper_.call() == nullptr) {
+ // Fill it since it is empty.
+ call_wrapper_ = internal::Call(
+ call_, server_, call_cq_, server_->max_receive_message_size(), nullptr);
}
+
// just the pointers inside call are copied here
- stream_->BindCall(&call);
+ stream_->BindCall(&call_wrapper_);
+
+ if (*status && call_ && call_wrapper_.server_rpc_info()) {
+ done_intercepting_ = true;
+ // Set interception point for RECV INITIAL METADATA
+ interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
+ interceptor_methods_.SetRecvInitialMetadata(&context_->client_metadata_);
+ if (interceptor_methods_.RunInterceptors(
+ [this]() { ContinueFinalizeResultAfterInterception(); })) {
+ // There are no interceptors to run. Continue
+ } else {
+ // There were interceptors to be run, so
+ // ContinueFinalizeResultAfterInterception will be run when interceptors
+ // are done.
+ return false;
+ }
+ }
+ if (*status && call_) {
+ context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
+ }
*tag = tag_;
if (delete_on_finalize_) {
delete this;
@@ -731,11 +1028,27 @@ bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
return true;
}
+void ServerInterface::BaseAsyncRequest::
+ ContinueFinalizeResultAfterInterception() {
+ context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
+ // Queue a tag which will be returned immediately
+ grpc_core::ExecCtx exec_ctx;
+ grpc_cq_begin_op(notification_cq_->cq(), this);
+ grpc_cq_end_op(
+ notification_cq_->cq(), this, GRPC_ERROR_NONE,
+ [](void* arg, grpc_cq_completion* completion) { delete completion; },
+ nullptr, new grpc_cq_completion());
+}
+
ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
ServerInterface* server, ServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
- void* tag)
- : BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
+ ServerCompletionQueue* notification_cq, void* tag, const char* name,
+ internal::RpcMethod::RpcType type)
+ : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
+ true),
+ name_(name),
+ type_(type) {}
void ServerInterface::RegisteredAsyncRequest::IssueRequest(
void* registered_method, grpc_byte_buffer** payload,
@@ -751,7 +1064,7 @@ ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
ServerInterface* server, GenericServerContext* context,
internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
- : BaseAsyncRequest(server, context, stream, call_cq, tag,
+ : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
delete_on_finalize) {
grpc_call_details_init(&call_details_);
GPR_ASSERT(notification_cq);
@@ -764,6 +1077,10 @@ ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
bool* status) {
+ // If we are done intercepting, there is nothing more for us to do
+ if (done_intercepting_) {
+ return BaseAsyncRequest::FinalizeResult(tag, status);
+ }
// TODO(yangg) remove the copy here.
if (*status) {
static_cast<GenericServerContext*>(context_)->method_ =
@@ -774,16 +1091,27 @@ bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
}
grpc_slice_unref(call_details_.method);
grpc_slice_unref(call_details_.host);
+ call_wrapper_ = internal::Call(
+ call_, server_, call_cq_, server_->max_receive_message_size(),
+ context_->set_server_rpc_info(
+ static_cast<GenericServerContext*>(context_)->method_.c_str(),
+ internal::RpcMethod::BIDI_STREAMING,
+ *server_->interceptor_creators()));
return BaseAsyncRequest::FinalizeResult(tag, status);
}
bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
bool* status) {
- if (GenericAsyncRequest::FinalizeResult(tag, status) && *status) {
- new UnimplementedAsyncRequest(server_, cq_);
- new UnimplementedAsyncResponse(this);
+ if (GenericAsyncRequest::FinalizeResult(tag, status)) {
+ // We either had no interceptors run or we are done intercepting
+ if (*status) {
+ new UnimplementedAsyncRequest(server_, cq_);
+ new UnimplementedAsyncResponse(this);
+ } else {
+ delete this;
+ }
} else {
- delete this;
+ // The tag was swallowed due to interception. We will see it again.
}
return false;
}
@@ -798,4 +1126,41 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
ServerInitializer* Server::initializer() { return server_initializer_.get(); }
+namespace {
+class ShutdownCallback : public grpc_experimental_completion_queue_functor {
+ public:
+ ShutdownCallback() { functor_run = &ShutdownCallback::Run; }
+ // TakeCQ takes ownership of the cq into the shutdown callback
+ // so that the shutdown callback will be responsible for destroying it
+ void TakeCQ(CompletionQueue* cq) { cq_ = cq; }
+
+ // The Run function will get invoked by the completion queue library
+ // when the shutdown is actually complete
+ static void Run(grpc_experimental_completion_queue_functor* cb, int) {
+ auto* callback = static_cast<ShutdownCallback*>(cb);
+ delete callback->cq_;
+ delete callback;
+ }
+
+ private:
+ CompletionQueue* cq_ = nullptr;
+};
+} // namespace
+
+CompletionQueue* Server::CallbackCQ() {
+ // TODO(vjpai): Consider using a single global CQ for the default CQ
+ // if there is no explicit per-server CQ registered
+ std::lock_guard<std::mutex> l(mu_);
+ if (callback_cq_ == nullptr) {
+ auto* shutdown_callback = new ShutdownCallback;
+ callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{
+ GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
+ shutdown_callback});
+
+ // Transfer ownership of the new cq to its own shutdown callback
+ shutdown_callback->TakeCQ(callback_cq_);
+ }
+ return callback_cq_;
+};
+
} // namespace grpc
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index bd532a968d..1b524bc3e8 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -17,6 +17,7 @@
*/
#include <grpcpp/server_context.h>
+#include <grpcpp/support/server_callback.h>
#include <algorithm>
#include <mutex>
@@ -41,13 +42,30 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
public:
// initial refs: one in the server context, one in the cq
// must ref the call before calling constructor and after deleting this
- CompletionOp(grpc_call* call)
- : call_(call),
+ CompletionOp(internal::Call* call, internal::ServerReactor* reactor)
+ : call_(*call),
+ reactor_(reactor),
has_tag_(false),
tag_(nullptr),
+ core_cq_tag_(this),
refs_(2),
finalized_(false),
- cancelled_(0) {}
+ cancelled_(0),
+ done_intercepting_(false) {}
+
+ // CompletionOp isn't copyable or movable
+ CompletionOp(const CompletionOp&) = delete;
+ CompletionOp& operator=(const CompletionOp&) = delete;
+ CompletionOp(CompletionOp&&) = delete;
+ CompletionOp& operator=(CompletionOp&&) = delete;
+
+ ~CompletionOp() {
+ if (call_.server_rpc_info()) {
+ call_.server_rpc_info()->Unref();
+ }
+ }
+
+ void FillOps(internal::Call* call) override;
// This should always be arena allocated in the call, so override delete.
// But this class is not trivially destructible, so must actually call delete
@@ -63,7 +81,6 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { assert(0); }
- void FillOps(grpc_call* call, grpc_op* ops, size_t* nops) override;
bool FinalizeResult(void** tag, bool* status) override;
bool CheckCancelled(CompletionQueue* cq) {
@@ -77,107 +94,207 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
tag_ = tag;
}
- /// TODO(vjpai): Allow override of cq_tag if appropriate for callback API
- void* cq_tag() override { return this; }
+ void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; }
+
+ void* core_cq_tag() override { return core_cq_tag_; }
void Unref();
+ // This will be called while interceptors are run if the RPC is a hijacked
+ // RPC. This should set hijacking state for each of the ops.
+ void SetHijackingState() override {
+ /* Servers don't allow hijacking */
+ GPR_CODEGEN_ASSERT(false);
+ }
+
+ /* Should be called after interceptors are done running */
+ void ContinueFillOpsAfterInterception() override {}
+
+ /* Should be called after interceptors are done running on the finalize result
+ * path */
+ void ContinueFinalizeResultAfterInterception() override {
+ done_intercepting_ = true;
+ if (!has_tag_) {
+ /* We don't have a tag to return. */
+ std::unique_lock<std::mutex> lock(mu_);
+ if (--refs_ == 0) {
+ lock.unlock();
+ grpc_call* call = call_.call();
+ delete this;
+ grpc_call_unref(call);
+ }
+ return;
+ }
+ /* Start a dummy op so that we can return the tag */
+ GPR_CODEGEN_ASSERT(
+ GRPC_CALL_OK ==
+ grpc_call_start_batch(call_.call(), nullptr, 0, core_cq_tag_, nullptr));
+ }
+
private:
bool CheckCancelledNoPluck() {
std::lock_guard<std::mutex> g(mu_);
return finalized_ ? (cancelled_ != 0) : false;
}
- grpc_call* call_;
+ internal::Call call_;
+ internal::ServerReactor* reactor_;
bool has_tag_;
void* tag_;
+ void* core_cq_tag_;
std::mutex mu_;
int refs_;
bool finalized_;
- int cancelled_;
+ int cancelled_; // This is an int (not bool) because it is passed to core
+ bool done_intercepting_;
+ internal::InterceptorBatchMethodsImpl interceptor_methods_;
};
void ServerContext::CompletionOp::Unref() {
std::unique_lock<std::mutex> lock(mu_);
if (--refs_ == 0) {
lock.unlock();
- // Save aside the call pointer before deleting for later unref
- grpc_call* call = call_;
+ grpc_call* call = call_.call();
delete this;
grpc_call_unref(call);
}
}
-void ServerContext::CompletionOp::FillOps(grpc_call* call, grpc_op* ops,
- size_t* nops) {
- ops->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
- ops->data.recv_close_on_server.cancelled = &cancelled_;
- ops->flags = 0;
- ops->reserved = nullptr;
- *nops = 1;
+void ServerContext::CompletionOp::FillOps(internal::Call* call) {
+ grpc_op ops;
+ ops.op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+ ops.data.recv_close_on_server.cancelled = &cancelled_;
+ ops.flags = 0;
+ ops.reserved = nullptr;
+ interceptor_methods_.SetCall(&call_);
+ interceptor_methods_.SetReverse();
+ interceptor_methods_.SetCallOpSetInterface(this);
+ GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call->call(), &ops, 1,
+ core_cq_tag_, nullptr));
+ /* No interceptors to run here */
}
bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
+ bool ret = false;
std::unique_lock<std::mutex> lock(mu_);
+ if (done_intercepting_) {
+ /* We are done intercepting. */
+ if (has_tag_) {
+ *tag = tag_;
+ ret = true;
+ }
+ if (--refs_ == 0) {
+ lock.unlock();
+ grpc_call* call = call_.call();
+ delete this;
+ grpc_call_unref(call);
+ }
+ return ret;
+ }
finalized_ = true;
- bool ret = false;
- if (has_tag_) {
- *tag = tag_;
- ret = true;
+
+ // If for some reason the incoming status is false, mark that as a
+ // cancellation.
+ // TODO(vjpai): does this ever happen?
+ if (!*status) {
+ cancelled_ = 1;
}
- if (!*status) cancelled_ = 1;
- if (--refs_ == 0) {
- lock.unlock();
- // Save aside the call pointer before deleting for later unref
- grpc_call* call = call_;
- delete this;
- grpc_call_unref(call);
+
+ if (cancelled_ && (reactor_ != nullptr)) {
+ reactor_->OnCancel();
}
- return ret;
+ /* Release the lock since we are going to be running through interceptors now
+ */
+ lock.unlock();
+ /* Add interception point and run through interceptors */
+ interceptor_methods_.AddInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_CLOSE);
+ if (interceptor_methods_.RunInterceptors()) {
+ /* No interceptors were run */
+ if (has_tag_) {
+ *tag = tag_;
+ ret = true;
+ }
+ lock.lock();
+ if (--refs_ == 0) {
+ lock.unlock();
+ grpc_call* call = call_.call();
+ delete this;
+ grpc_call_unref(call);
+ }
+ return ret;
+ }
+ /* There are interceptors to be run. Return false for now */
+ return false;
}
// ServerContext body
-ServerContext::ServerContext()
- : completion_op_(nullptr),
- has_notify_when_done_tag_(false),
- async_notify_when_done_tag_(nullptr),
- deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)),
- call_(nullptr),
- cq_(nullptr),
- sent_initial_metadata_(false),
- compression_level_set_(false),
- has_pending_ops_(false) {}
-
-ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
- : completion_op_(nullptr),
- has_notify_when_done_tag_(false),
- async_notify_when_done_tag_(nullptr),
- deadline_(deadline),
- call_(nullptr),
- cq_(nullptr),
- sent_initial_metadata_(false),
- compression_level_set_(false),
- has_pending_ops_(false) {
+ServerContext::ServerContext() { Setup(gpr_inf_future(GPR_CLOCK_REALTIME)); }
+
+ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr) {
+ Setup(deadline);
std::swap(*client_metadata_.arr(), *arr);
}
-ServerContext::~ServerContext() {
- if (call_) {
- grpc_call_unref(call_);
- }
+void ServerContext::Setup(gpr_timespec deadline) {
+ completion_op_ = nullptr;
+ has_notify_when_done_tag_ = false;
+ async_notify_when_done_tag_ = nullptr;
+ deadline_ = deadline;
+ call_ = nullptr;
+ cq_ = nullptr;
+ sent_initial_metadata_ = false;
+ compression_level_set_ = false;
+ has_pending_ops_ = false;
+ rpc_info_ = nullptr;
+}
+
+void ServerContext::BindDeadlineAndMetadata(gpr_timespec deadline,
+ grpc_metadata_array* arr) {
+ deadline_ = deadline;
+ std::swap(*client_metadata_.arr(), *arr);
+}
+
+ServerContext::~ServerContext() { Clear(); }
+
+void ServerContext::Clear() {
+ auth_context_.reset();
+ initial_metadata_.clear();
+ trailing_metadata_.clear();
+ client_metadata_.Reset();
if (completion_op_) {
completion_op_->Unref();
+ completion_op_ = nullptr;
+ completion_tag_.Clear();
+ }
+ if (rpc_info_) {
+ rpc_info_->Unref();
+ rpc_info_ = nullptr;
+ }
+ if (call_) {
+ auto* call = call_;
+ call_ = nullptr;
+ grpc_call_unref(call);
}
}
-void ServerContext::BeginCompletionOp(internal::Call* call) {
+void ServerContext::BeginCompletionOp(internal::Call* call,
+ std::function<void(bool)> callback,
+ internal::ServerReactor* reactor) {
GPR_ASSERT(!completion_op_);
+ if (rpc_info_) {
+ rpc_info_->Ref();
+ }
grpc_call_ref(call->call());
completion_op_ =
new (grpc_call_arena_alloc(call->call(), sizeof(CompletionOp)))
- CompletionOp(call->call());
- if (has_notify_when_done_tag_) {
+ CompletionOp(call, reactor);
+ if (callback != nullptr) {
+ completion_tag_.Set(call->call(), std::move(callback), completion_op_);
+ completion_op_->set_core_cq_tag(&completion_tag_);
+ completion_op_->set_tag(completion_op_);
+ } else if (has_notify_when_done_tag_) {
completion_op_->set_tag(async_notify_when_done_tag_);
}
call->PerformOps(completion_op_);
@@ -198,6 +315,12 @@ void ServerContext::AddTrailingMetadata(const grpc::string& key,
}
void ServerContext::TryCancel() const {
+ internal::CancelInterceptorBatchMethods cancel_methods;
+ if (rpc_info_) {
+ for (size_t i = 0; i < rpc_info_->interceptors_.size(); i++) {
+ rpc_info_->RunInterceptor(&cancel_methods, i);
+ }
+ }
grpc_call_error err = grpc_call_cancel_with_status(
call_, GRPC_STATUS_CANCELLED, "Cancelled on the server side", nullptr);
if (err != GRPC_CALL_OK) {
@@ -206,12 +329,15 @@ void ServerContext::TryCancel() const {
}
bool ServerContext::IsCancelled() const {
- if (has_notify_when_done_tag_) {
- // when using async API, but the result is only valid
+ if (completion_tag_) {
+ // When using callback API, this result is always valid.
+ return completion_op_->CheckCancelledAsync();
+ } else if (has_notify_when_done_tag_) {
+ // When using async API, the result is only valid
// if the tag has already been delivered at the completion queue
return completion_op_ && completion_op_->CheckCancelledAsync();
} else {
- // when using sync API
+ // when using sync API, the result is always valid
return completion_op_ && completion_op_->CheckCancelled(cq_);
}
}
diff --git a/src/csharp/BUILD-INTEGRATION.md b/src/csharp/BUILD-INTEGRATION.md
new file mode 100644
index 0000000000..3addc2403c
--- /dev/null
+++ b/src/csharp/BUILD-INTEGRATION.md
@@ -0,0 +1,357 @@
+Protocol Buffers/gRPC Integration Into .NET Build
+=================================================
+
+With Grpc.Tools package version 1.17 we made it easier to compile .proto files
+in your project using the `dotnet build` command, Visual Studio, or command-line
+MSBuild. You need to configure the .csproj project according to the way you want
+to integrate Protocol Buffer files into your build. If you are upgrading an
+existing project, read through this list of common scenarios and decide if any
+one of them matches your approach. The protoc command line migration is
+explained near the end of this document; this migration may be the quickest but
+not the long-term solution.
+
+There is also a Reference section at the end of the file.
+
+Reporting issues
+----------------
+
+First thing first, if you found a bug in this new build system, or have a
+scenario that is not easily covered, please open an [issue in the gRPC
+repository](https://github.com/grpc/grpc/issues), and **tag the user @kkm000**
+somewhere in the text (for example, include `/cc @kkm000` at end of the issue
+text) to seize his immediate attention.
+
+Common scenarios
+----------------
+
+### I just want to compile .proto files into my library
+
+This is the approach taken by the examples in the `csharp/examples` directory.
+Protoc output files (for example, `Helloworld.cs` and `HelloworldGrpc.cs`
+compiled from `helloworld.proto`) are placed among *object* and other temporary
+files of your project, and automatically provided as inputs to the C# compiler.
+As with other automatically generated .cs files, they are included in the source
+and symbols NuGet package, if you build one.
+
+Simply reference your .proto files in a `<Protobuf>` item group. The following
+example will add all .proto files in a project and all its subdirectories
+(excluding special directories such as `bin` and `obj`):
+
+```xml
+ <ItemGroup>
+ <Protobuf Include="**/*.proto" />
+ </ItemGroup>
+```
+
+You must add a reference to the NuGet packages Grpc.Tools and Grpc (the latter
+is a meta-package, in turn referencing Grpc.Core and Google.Protobuf packages).
+It is **very important** to mark Grpc.Tools as a development-only dependency, so
+that the *users* of your library do not fetch the tools package:
+
+* "Classic" .csproj with `packages.config` (Visual Studio, Mono): This is
+ handled automatically by NuGet. See the attribute added by Visual Studio to the
+ [packages.config](../../examples/csharp/HelloworldLegacyCsproj/Greeter/packages.config#L6)
+ file in the HelloworldLegacyCsproj/Greeter example.
+
+* "SDK" .csproj (Visual Studio, `dotnet new`): Add an attribute
+ `PrivateAssets="All"` to the Grpc.Tools package reference. See an example in the
+ [Greeter.csproj](../../examples/csharp/Helloworld/Greeter/Greeter.csproj#L10)
+ example project in this repository. If adding a package reference in Visual
+ Studio, edit the project file and add this attribute. [This is a bug in NuGet
+ client](https://github.com/NuGet/Home/issues/4125).
+
+If building a NuGet package from your library with the nuget command line tool
+from a .nuspec file, then the spec file may (and probably should) reference the
+Grpc metapackage, but **do not add a reference to Grpc.Tools** to it. .NET "SDK"
+projects handle this automatically when called from `dotnet pack` by excluding
+any packages with private assets, such as thus marked Grpc.Tools.
+
+#### Per-file options that can be set in Visual Studio
+
+For a "classic" project, you can only add .proto files with all options set to
+default (if you find it necessary to modify these options, then hand-edit the
+.csproj file). Click on the "show all files" button, add files to project, then
+change file type of the .proto files to "Protobuf" in the Properties window
+drop-down. This menu item will appear after you import the Grpc.Tools package:
+
+![Properties in a classic project](doc/integration.md-fig.1-classic.png)
+
+For an "SDK" project, you have more control of some frequently used options.
+**You may need to open and close Visual Studio** for this form to appear in the
+properties window after adding a reference to Grpc.Tools package (we do not know
+whether this is a bug or by design, but it looks like a bug):
+
+![Properties in an SDK project](doc/integration.md-fig.2-sdk.png)
+
+You can also change options of multiple files at once by selecting them in the
+Project Explorer together.
+
+See the Reference section at end of this file for options that can be set
+per-file by modifying the source .csproj directly.
+
+#### My .proto files are in a directory outside the project
+
+Refer to the example files
+[RouteGuide.csproj](../../examples/csharp/RouteGuide/RouteGuide/RouteGuide.csproj#L58-L60)
+and [Greeter.csproj](../../examples/csharp/Helloworld/Greeter/Greeter.csproj#L11)
+in this repository. For the files to show up in Visual Studio properly, add a
+`Link` attribute with just a filename to the `<Protobuf>` item. This will be the
+display name of the file. In the `Include` attribute, specify the complete path
+to file. A relative path is based off the project directory.
+
+Or, if using Visual Studio, add files _as links_ from outside directory. In the
+Add Files dialog, there is a little [down arrow near the Open
+button](https://stackoverflow.com/a/9770061). Click on it, and choose "Add as
+link". If you do not select this option, Visual Studio will copy files to the
+project directory instead.
+
+### I just want to generate proto and gRPC C# sources from my .proto files (no C# compile)
+
+Suppose you want to place generated files right beside each respective source
+.proto file. Create a .csproj library file in the common root of your .proto
+tree, and add a reference to Grpc.Tools package (this works in Windows too, `$`
+below stands for a command prompt in either platform):
+
+```
+/myproject/myprotofiles$ dotnet new classlib
+ . . .
+ Restoring packages for /myproject/myprotofiles/myprotofiles.csproj...
+ . . .
+/myproject/myprotofiles$ rm *.cs <-- remove all *.cs files from template;
+C:\myproject\myprotofiles> del *.cs /y <-- on Windows, use the del command instead.
+/myproject/myprotofiles$ dotnet add package Grpc.Tools
+```
+
+(the latter command also accepts an optional `--version X.Y` switch for a
+specific version of package, should you need one). Next open the generated
+.csproj file in a text editor.
+
+Since you are not building a package, you may not worry about adding
+`PrivateAssets="All"` attribute, but it will not hurt, in case you are
+repurposing the project at some time later. The important part is (1) tell the
+gRPC tools to select the whole directory of files; (2) order placement of each
+output besides its source, and (3) not compile the generated .cs files. Add the
+following stanza under the `<Project>` xml node:
+
+```xml
+ <ItemGroup>
+ <Protobuf Include="**/*.proto" OutputDir="%(RelativePath)" CompileOutputs="false" />
+ </ItemGroup>
+```
+
+The `Include` tells the build system to recursively examine project directory
+and its subdirectories (`**`) include all files matching the wildcard `*.proto`.
+You can instead selectively include your files or selectively exclude files from
+the glob pattern; [MSBuild documentation explains
+that](https://docs.microsoft.com/visualstudio/msbuild/msbuild-items). The
+`OutputDir="%(RelativePath)"` orders the output directory for each .cs file be
+same as the corresponding .proto directory. Finally, `CompileOutputs="false"`
+prevents compiling the generated files into an assembly.
+
+Note that an empty assembly is still generated, but you should ignore it. As
+with any build system, it is used to detect out-of-date dependencies and
+recompile them.
+
+#### I am getting a warning about a missing expected file!
+
+When we are preparing compile, there is no way to know whether a given proto
+file will produce a *Grpc.cs output or not. If the proto file has a `service`
+clause, it will; otherwise, it won't, but the build script cannot know that in
+advance. When we are treating generated .cs files as temporary, this is ok, but
+when generating them for you, creating empty files is probably not. You need to
+tell the compiler which files should be compiled with gRPC services, and which
+only contain protobuffer message definitions.
+
+One option is just ignore the warning. Another is quench it by setting the
+property `Protobuf_NoWarnMissingExpected` to `true`:
+
+```xml
+<PropertyGroup>
+ <Protobuf_NoWarnMissingExpected>true</Protobuf_NoWarnMissingExpected>
+</PropertyGroup>
+```
+
+For a small to medium projects this is sufficient. But because of a missing
+output dependency, the corresponding .proto file will be recompiled on every
+build. If your project is large, or if other large builds depend on generated
+files, and are also needlessly recompiled, you'll want to prevent these rebuilds
+when files have not in fact changed, as follows:
+
+##### Explicitly tell protoc for which files it should use the gRPC plugin
+
+You need to set the `Protobuf` item property `GrpcServices` to `None` for those
+.proto inputs which do not have a `service` declared (or, optionally, those
+which do but you do not want a service/client stub for). The default value for
+the `GrpcServices` is `Both` (both client and server stub are generated). This
+is easy enough to do with glob patterns if your files are laid out in
+directories according to their service use, for example:
+
+```xml
+ <ItemGroup>
+ <Protobuf Include="**/*.proto" OutputDir="%(RelativePath)"
+ CompileOutputs="false" GrpcServices="None" />
+ <Protobuf Update="**/hello/*.proto;**/bye/*.proto" GrpcServices="Both" />
+ </ItemGroup>
+```
+
+In this sample, all .proto files are compiled with `GrpcServices="None"`, except
+for .proto files in subdirectories on any tree level named `hello/` and `bye`,
+which will take `GrpcServices="Both"` Note the use of the `Update` attribute
+instead of `Include`. If you write `Include` by mistake, the files will be added
+to compile *twice*, once with, and once without GrpcServices. Pay attention not
+to do that!
+
+Another example would be the use of globbing if your service .proto files are
+named according to a pattern, for example `*_services.proto`. In this case, The
+`Update` attribute can be written as `Update="**/*_service.proto"`, to set the
+attribute `GrpcServices="Both"` only on these files.
+
+But what if no patterns work, and you cannot sort a large set of .proto file
+into those containing a service and those not? As a last resort,
+
+##### Force creating empty .cs files for missing outputs.
+
+Naturally, this results in a dirtier compiler output tree, but you may clean it
+using other ways (for example, by not copying zero-length .cs files to their
+final destination). Remember, though, that the files are still important to keep
+in their output locations to prevent needless recompilation. You may force
+generating empty files by setting the property `Protobuf_TouchMissingExpected`
+to `true`:
+
+```xml
+ <PropertyGroup>
+ <Protobuf_TouchMissingExpected>true</Protobuf_TouchMissingExpected>
+ </PropertyGroup>
+```
+
+#### But I do not use gRPC at all, I need only protobuffer messages compiled
+
+Set `GrpcServices="None"` on all proto files:
+
+```xml
+ <ItemGroup>
+ <Protobuf Include="**/*.proto" OutputDir="%(RelativeDir)"
+ CompileOutputs="false" GrpcServices="None" />
+ </ItemGroup>
+```
+
+#### That's good so far, but I do not want the `bin` and `obj` directories in my tree
+
+You may create the project in a subdirectory of the root of your files, such as,
+for example, `.build`. In this case, you want to refer to the proto files
+relative to that `.build/` directory as
+
+```xml
+ <ItemGroup>
+ <Protobuf Include="../**/*.proto" ProtoRoot=".."
+ OutputDir="%(RelativeDir)" CompileOutputs="false" />
+ </ItemGroup>
+```
+
+Pay attention to the `ProtoRoot` property. It needs to be set to the directory
+where `import` declarations in the .proto files are looking for files, since the
+project root is no longer the same as the proto root.
+
+Alternatively, you may place the project in a directory *above* your proto root,
+and refer to the files with a subdirectory name:
+
+```xml
+ <ItemGroup>
+ <Protobuf Include="proto_root/**/*.proto" ProtoRoot="proto_root"
+ OutputDir="%(RelativeDir)" CompileOutputs="false" />
+ </ItemGroup>
+```
+
+### Alas, this all is nice, but my scenario is more complex, -OR-
+### I'll investigate that when I have time. I just want to run protoc as I did before.
+
+One option is examine our [.targets and .props files](Grpc.Tools/build/) and see
+if you can create your own build sequence from the provided targets so that it
+fits your needs. Also please open an issue (and tag @kkm000 in it!) with your
+scenario. We'll try to support it if it appears general enough.
+
+But if you just want to run `protoc` using MsBuild `<Exec>` task, as you
+probably did before the version 1.17 of Grpc.Tools, we have a few build
+variables that point to resolved names of tools and common protoc imports.
+You'll have to roll your own dependency checking (or go with a full
+recompilation each time, if that works for you), but at the very least each
+version of the Tools package will point to the correct location of the files,
+and resolve the compiler and plugin executables appropriate for the host system.
+These property variables are:
+
+* `Protobuf_ProtocFullPath` points to the full path and filename of protoc executable, e. g.,
+ "C:\Users\kkm\.nuget\packages\grpc.tools\1.17.0\build\native\bin\windows\protoc.exe".
+
+* `gRPC_PluginFullPath` points to the full path and filename of gRPC plugin, such as
+ "C:\Users\kkm\.nuget\packages\grpc.tools\1.17.0\build\native\bin\windows\grpc_csharp_plugin.exe"
+
+* `Protobuf_StandardImportsPath` points to the standard proto import directory, for example,
+ "C:\Users\kkm\.nuget\packages\grpc.tools\1.17.0\build\native\include". This is
+ the directory where a declaration such as `import "google/protobuf/wrappers.proto";`
+ in a proto file would find its target.
+
+Use MSBuild property expansion syntax `$(VariableName)` in your protoc command
+line to substitute these variables, for instance,
+
+```xml
+ <Target Name="MyProtoCompile">
+ <PropertyGroup>
+ <ProtoCCommand>$(Protobuf_ProtocFullPath) --plugin=protoc-gen-grpc=$(gRPC_PluginFullPath) -I $(Protobuf_StandardImportsPath) ....rest of your command.... </ProtoCCommand>
+ </PropertyGroup>
+ <Message Importance="high" Text="$(ProtoCCommand)" />
+ <Exec Command="$(ProtoCCommand)" />
+ </Target>
+```
+
+Also make sure *not* to include any file names to the `Protobuf` item
+collection, otherwise they will be compiled by default. If, by any chance, you
+used that name for your build scripting, you must rename it.
+
+### What about C++ projects?
+
+This is in the works. Currently, the same variables as above are set to point to
+the protoc binary, C++ gRPC plugin and the standard imports, but nothing else.
+Do not use the `Protobuf` item collection name so that your project remains
+future-proof. We'll use it for C++ projects too.
+
+Reference
+---------
+
+### Protobuf item metadata reference
+
+The following metadata are recognized on the `<Protobuf>` items.
+
+| Name | Default | Value | Synopsis |
+|----------------|-----------|----------------------|----------------------------------|
+| Access | `public` | `public`, `internal` | Generated class access |
+| ProtoCompile | `true` | `true`, `false` | Pass files to protoc? |
+| ProtoRoot | See notes | A directory | Common root for set of files |
+| CompileOutputs | `true` | `true`, `false` | C#-compile generated files? |
+| OutputDir | See notes | A directory | Directory for generated C# files |
+| GrpcOutputDir | See notes | A directory | Directory for generated stubs |
+| GrpcServices | `both` | `none`, `client`, | Generated gRPC stubs |
+| | | `server`, `both` | |
+
+__Notes__
+
+* __ProtoRoot__
+For files _inside_ the project cone, `ProtoRoot` is set by default to the
+project directory. For every file _outside_ of the project directory, the value
+is set to this file's containing directory name, individually per file. If you
+include a subtree of proto files that lies outside of the project directory, you
+need to set this metadatum. There is an example in this file above. The path in
+this variable is relative to the project directory.
+
+* __OutputDir__
+The default value for this metadatum is the value of the property
+`Protobuf_OutputPath`. This property, in turn, unless you set it in your
+project, will be set to the value of the standard MSBuild property
+`IntermediateOutputPath`, which points to the location of compilation object
+outputs, such as "obj/Release/netstandard1.5/". The path in this property is
+considered relative to the project directory.
+
+* __GrpcOutputDir__
+Unless explicitly set, will follow `OutputDir` for any given file.
+
+* __Access__
+Sets generated class access on _both_ generated message and gRPC stub classes.
diff --git a/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs b/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
index 3a161763fd..b10d7a1045 100644
--- a/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
+++ b/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
@@ -25,7 +25,7 @@ namespace Grpc.Core.Tests
{
public class AppDomainUnloadTest
{
-#if NETCOREAPP1_0
+#if NETCOREAPP1_1 || NETCOREAPP2_1
[Test]
[Ignore("Not supported for CoreCLR")]
public void AppDomainUnloadHookCanCleanupAbandonedCall()
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index d58f046824..178931a3d7 100755
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Core.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Core.Tests</PackageId>
diff --git a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
index e7d8939978..5c7d48f786 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
@@ -49,7 +49,7 @@ namespace Grpc.Core.Internal.Tests
fakeCall = new FakeNativeCall();
asyncCallServer = new AsyncCallServer<string, string>(
- Marshallers.StringMarshaller.Serializer, Marshallers.StringMarshaller.Deserializer,
+ Marshallers.StringMarshaller.ContextualSerializer, Marshallers.StringMarshaller.ContextualDeserializer,
server);
asyncCallServer.InitializeForTesting(fakeCall);
}
diff --git a/src/csharp/Grpc.Core.Tests/MarshallerTest.cs b/src/csharp/Grpc.Core.Tests/MarshallerTest.cs
index 97f64a0575..ad3e81d61f 100644
--- a/src/csharp/Grpc.Core.Tests/MarshallerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/MarshallerTest.cs
@@ -69,11 +69,8 @@ namespace Grpc.Core.Tests
Assert.AreSame(contextualSerializer, marshaller.ContextualSerializer);
Assert.AreSame(contextualDeserializer, marshaller.ContextualDeserializer);
-
- // test that emulated serializer and deserializer work
- var origMsg = "abc";
- var serialized = marshaller.Serializer(origMsg);
- Assert.AreEqual(origMsg, marshaller.Deserializer(serialized));
+ Assert.Throws(typeof(NotImplementedException), () => marshaller.Serializer("abc"));
+ Assert.Throws(typeof(NotImplementedException), () => marshaller.Deserializer(new byte[] {1, 2, 3}));
}
class FakeSerializationContext : SerializationContext
diff --git a/src/csharp/Grpc.Core.Tests/NUnitMain.cs b/src/csharp/Grpc.Core.Tests/NUnitMain.cs
index 49cb8cd3b9..3b206603f1 100644
--- a/src/csharp/Grpc.Core.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.Core.Tests/NUnitMain.cs
@@ -34,11 +34,7 @@ namespace Grpc.Core.Tests
{
// Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
GrpcEnvironment.SetLogger(new ConsoleLogger());
-#if NETCOREAPP1_0
- return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
-#else
- return new AutoRun().Execute(args);
-#endif
+ return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args);
}
}
}
diff --git a/src/csharp/Grpc.Core.Tests/SanityTest.cs b/src/csharp/Grpc.Core.Tests/SanityTest.cs
index 0904453b6e..f785f70f4c 100644
--- a/src/csharp/Grpc.Core.Tests/SanityTest.cs
+++ b/src/csharp/Grpc.Core.Tests/SanityTest.cs
@@ -31,7 +31,7 @@ namespace Grpc.Core.Tests
public class SanityTest
{
// TODO: make sanity test work for CoreCLR as well
-#if !NETCOREAPP1_0
+#if !NETCOREAPP1_1 && !NETCOREAPP2_1
/// <summary>
/// Because we depend on a native library, sometimes when things go wrong, the
/// entire NUnit test process crashes. To be able to track down problems better,
@@ -44,7 +44,7 @@ namespace Grpc.Core.Tests
public void TestsJsonUpToDate()
{
var discoveredTests = DiscoverAllTestClasses();
- var testsFromFile
+ var testsFromFile
= JsonConvert.DeserializeObject<Dictionary<string, List<string>>>(ReadTestsJson());
Assert.AreEqual(discoveredTests, testsFromFile);
diff --git a/src/csharp/Grpc.Core/DeserializationContext.cs b/src/csharp/Grpc.Core/DeserializationContext.cs
index 5b6372ef85..d69e0db5bd 100644
--- a/src/csharp/Grpc.Core/DeserializationContext.cs
+++ b/src/csharp/Grpc.Core/DeserializationContext.cs
@@ -16,6 +16,8 @@
#endregion
+using System;
+
namespace Grpc.Core
{
/// <summary>
@@ -41,6 +43,9 @@ namespace Grpc.Core
/// (as there is no practical reason for doing so) and <c>DeserializationContext</c> implementations are free to assume so.
/// </summary>
/// <returns>byte array containing the entire payload.</returns>
- public abstract byte[] PayloadAsNewBuffer();
+ public virtual byte[] PayloadAsNewBuffer()
+ {
+ throw new NotImplementedException();
+ }
}
}
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
index 4cdf0ee6a7..b6d687f71e 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
@@ -54,7 +54,7 @@ namespace Grpc.Core.Internal
ClientSideStatus? finishedStatus;
public AsyncCall(CallInvocationDetails<TRequest, TResponse> callDetails)
- : base(callDetails.RequestMarshaller.Serializer, callDetails.ResponseMarshaller.Deserializer)
+ : base(callDetails.RequestMarshaller.ContextualSerializer, callDetails.ResponseMarshaller.ContextualDeserializer)
{
this.details = callDetails.WithOptions(callDetails.Options.Normalize());
this.initialMetadataSent = true; // we always send metadata at the very beginning of the call.
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
index a93dc34620..39c9f7c616 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
@@ -40,8 +40,8 @@ namespace Grpc.Core.Internal
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<AsyncCallBase<TWrite, TRead>>();
protected static readonly Status DeserializeResponseFailureStatus = new Status(StatusCode.Internal, "Failed to deserialize response message.");
- readonly Func<TWrite, byte[]> serializer;
- readonly Func<byte[], TRead> deserializer;
+ readonly Action<TWrite, SerializationContext> serializer;
+ readonly Func<DeserializationContext, TRead> deserializer;
protected readonly object myLock = new object();
@@ -63,7 +63,7 @@ namespace Grpc.Core.Internal
protected bool initialMetadataSent;
protected long streamingWritesCounter; // Number of streaming send operations started so far.
- public AsyncCallBase(Func<TWrite, byte[]> serializer, Func<byte[], TRead> deserializer)
+ public AsyncCallBase(Action<TWrite, SerializationContext> serializer, Func<DeserializationContext, TRead> deserializer)
{
this.serializer = GrpcPreconditions.CheckNotNull(serializer);
this.deserializer = GrpcPreconditions.CheckNotNull(deserializer);
@@ -215,14 +215,26 @@ namespace Grpc.Core.Internal
protected byte[] UnsafeSerialize(TWrite msg)
{
- return serializer(msg);
+ DefaultSerializationContext context = null;
+ try
+ {
+ context = DefaultSerializationContext.GetInitializedThreadLocal();
+ serializer(msg, context);
+ return context.GetPayload();
+ }
+ finally
+ {
+ context?.Reset();
+ }
}
protected Exception TryDeserialize(byte[] payload, out TRead msg)
{
+ DefaultDeserializationContext context = null;
try
{
- msg = deserializer(payload);
+ context = DefaultDeserializationContext.GetInitializedThreadLocal(payload);
+ msg = deserializer(context);
return null;
}
catch (Exception e)
@@ -230,6 +242,11 @@ namespace Grpc.Core.Internal
msg = default(TRead);
return e;
}
+ finally
+ {
+ context?.Reset();
+
+ }
}
/// <summary>
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
index 0ceca4abb8..0bf1fb3b7d 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
@@ -37,7 +37,7 @@ namespace Grpc.Core.Internal
readonly CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();
readonly Server server;
- public AsyncCallServer(Func<TResponse, byte[]> serializer, Func<byte[], TRequest> deserializer, Server server) : base(serializer, deserializer)
+ public AsyncCallServer(Action<TResponse, SerializationContext> serializer, Func<DeserializationContext, TRequest> deserializer, Server server) : base(serializer, deserializer)
{
this.server = GrpcPreconditions.CheckNotNull(server);
}
diff --git a/src/csharp/Grpc.Core/Internal/DefaultDeserializationContext.cs b/src/csharp/Grpc.Core/Internal/DefaultDeserializationContext.cs
new file mode 100644
index 0000000000..7ace80e8d5
--- /dev/null
+++ b/src/csharp/Grpc.Core/Internal/DefaultDeserializationContext.cs
@@ -0,0 +1,66 @@
+#region Copyright notice and license
+
+// Copyright 2018 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using Grpc.Core.Utils;
+using System;
+using System.Threading;
+
+namespace Grpc.Core.Internal
+{
+ internal class DefaultDeserializationContext : DeserializationContext
+ {
+ static readonly ThreadLocal<DefaultDeserializationContext> threadLocalInstance =
+ new ThreadLocal<DefaultDeserializationContext>(() => new DefaultDeserializationContext(), false);
+
+ byte[] payload;
+ bool alreadyCalledPayloadAsNewBuffer;
+
+ public DefaultDeserializationContext()
+ {
+ Reset();
+ }
+
+ public override int PayloadLength => payload.Length;
+
+ public override byte[] PayloadAsNewBuffer()
+ {
+ GrpcPreconditions.CheckState(!alreadyCalledPayloadAsNewBuffer);
+ alreadyCalledPayloadAsNewBuffer = true;
+ return payload;
+ }
+
+ public void Initialize(byte[] payload)
+ {
+ this.payload = GrpcPreconditions.CheckNotNull(payload);
+ this.alreadyCalledPayloadAsNewBuffer = false;
+ }
+
+ public void Reset()
+ {
+ this.payload = null;
+ this.alreadyCalledPayloadAsNewBuffer = true; // mark payload as read
+ }
+
+ public static DefaultDeserializationContext GetInitializedThreadLocal(byte[] payload)
+ {
+ var instance = threadLocalInstance.Value;
+ instance.Initialize(payload);
+ return instance;
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core/Internal/DefaultSerializationContext.cs b/src/csharp/Grpc.Core/Internal/DefaultSerializationContext.cs
new file mode 100644
index 0000000000..cceb194879
--- /dev/null
+++ b/src/csharp/Grpc.Core/Internal/DefaultSerializationContext.cs
@@ -0,0 +1,62 @@
+#region Copyright notice and license
+
+// Copyright 2018 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using Grpc.Core.Utils;
+using System.Threading;
+
+namespace Grpc.Core.Internal
+{
+ internal class DefaultSerializationContext : SerializationContext
+ {
+ static readonly ThreadLocal<DefaultSerializationContext> threadLocalInstance =
+ new ThreadLocal<DefaultSerializationContext>(() => new DefaultSerializationContext(), false);
+
+ bool isComplete;
+ byte[] payload;
+
+ public DefaultSerializationContext()
+ {
+ Reset();
+ }
+
+ public override void Complete(byte[] payload)
+ {
+ GrpcPreconditions.CheckState(!isComplete);
+ this.isComplete = true;
+ this.payload = payload;
+ }
+
+ internal byte[] GetPayload()
+ {
+ return this.payload;
+ }
+
+ public void Reset()
+ {
+ this.isComplete = false;
+ this.payload = null;
+ }
+
+ public static DefaultSerializationContext GetInitializedThreadLocal()
+ {
+ var instance = threadLocalInstance.Value;
+ instance.Reset();
+ return instance;
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core/Internal/NativeExtension.cs b/src/csharp/Grpc.Core/Internal/NativeExtension.cs
index f526b913af..5177b69fd9 100644
--- a/src/csharp/Grpc.Core/Internal/NativeExtension.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeExtension.cs
@@ -83,13 +83,13 @@ namespace Grpc.Core.Internal
// See https://github.com/grpc/grpc/pull/7303 for one option.
var assemblyDirectory = Path.GetDirectoryName(GetAssemblyPath());
- // With old-style VS projects, the native libraries get copied using a .targets rule to the build output folder
+ // With "classic" VS projects, the native libraries get copied using a .targets rule to the build output folder
// alongside the compiled assembly.
// With dotnet cli projects targeting net45 framework, the native libraries (just the required ones)
// are similarly copied to the built output folder, through the magic of Microsoft.NETCore.Platforms.
var classicPath = Path.Combine(assemblyDirectory, GetNativeLibraryFilename());
- // With dotnet cli project targeting netcoreapp1.0, projects will use Grpc.Core assembly directly in the location where it got restored
+ // With dotnet cli project targeting netcoreappX.Y, projects will use Grpc.Core assembly directly in the location where it got restored
// by nuget. We locate the native libraries based on known structure of Grpc.Core nuget package.
// When "dotnet publish" is used, the runtimes directory is copied next to the published assemblies.
string runtimesDirectory = string.Format("runtimes/{0}/native", GetPlatformString());
diff --git a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
index 81522cf8fe..ec732e8c7f 100644
--- a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
@@ -52,8 +52,8 @@ namespace Grpc.Core.Internal
public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
- method.ResponseMarshaller.Serializer,
- method.RequestMarshaller.Deserializer,
+ method.ResponseMarshaller.ContextualSerializer,
+ method.RequestMarshaller.ContextualDeserializer,
newRpc.Server);
asyncCall.Initialize(newRpc.Call, cq);
@@ -116,8 +116,8 @@ namespace Grpc.Core.Internal
public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
- method.ResponseMarshaller.Serializer,
- method.RequestMarshaller.Deserializer,
+ method.ResponseMarshaller.ContextualSerializer,
+ method.RequestMarshaller.ContextualDeserializer,
newRpc.Server);
asyncCall.Initialize(newRpc.Call, cq);
@@ -179,8 +179,8 @@ namespace Grpc.Core.Internal
public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
- method.ResponseMarshaller.Serializer,
- method.RequestMarshaller.Deserializer,
+ method.ResponseMarshaller.ContextualSerializer,
+ method.RequestMarshaller.ContextualDeserializer,
newRpc.Server);
asyncCall.Initialize(newRpc.Call, cq);
@@ -242,8 +242,8 @@ namespace Grpc.Core.Internal
public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
- method.ResponseMarshaller.Serializer,
- method.RequestMarshaller.Deserializer,
+ method.ResponseMarshaller.ContextualSerializer,
+ method.RequestMarshaller.ContextualDeserializer,
newRpc.Server);
asyncCall.Initialize(newRpc.Call, cq);
diff --git a/src/csharp/Grpc.Core/Marshaller.cs b/src/csharp/Grpc.Core/Marshaller.cs
index 0af9aa586b..34a1849cd7 100644
--- a/src/csharp/Grpc.Core/Marshaller.cs
+++ b/src/csharp/Grpc.Core/Marshaller.cs
@@ -41,6 +41,8 @@ namespace Grpc.Core
{
this.serializer = GrpcPreconditions.CheckNotNull(serializer, nameof(serializer));
this.deserializer = GrpcPreconditions.CheckNotNull(deserializer, nameof(deserializer));
+ // contextual serialization/deserialization is emulated to make the marshaller
+ // usable with the grpc library (required for backward compatibility).
this.contextualSerializer = EmulateContextualSerializer;
this.contextualDeserializer = EmulateContextualDeserializer;
}
@@ -57,10 +59,10 @@ namespace Grpc.Core
{
this.contextualSerializer = GrpcPreconditions.CheckNotNull(serializer, nameof(serializer));
this.contextualDeserializer = GrpcPreconditions.CheckNotNull(deserializer, nameof(deserializer));
- // TODO(jtattermusch): once gRPC C# library switches to using contextual (de)serializer,
- // emulating the simple (de)serializer will become unnecessary.
- this.serializer = EmulateSimpleSerializer;
- this.deserializer = EmulateSimpleDeserializer;
+ // gRPC only uses contextual serializer/deserializer internally, so emulating the legacy
+ // (de)serializer is not necessary.
+ this.serializer = (msg) => { throw new NotImplementedException(); };
+ this.deserializer = (payload) => { throw new NotImplementedException(); };
}
/// <summary>
@@ -85,25 +87,6 @@ namespace Grpc.Core
/// </summary>
public Func<DeserializationContext, T> ContextualDeserializer => this.contextualDeserializer;
- // for backward compatibility, emulate the simple serializer using the contextual one
- private byte[] EmulateSimpleSerializer(T msg)
- {
- // TODO(jtattermusch): avoid the allocation by passing a thread-local instance
- // This code will become unnecessary once gRPC C# library switches to using contextual (de)serializer.
- var context = new EmulatedSerializationContext();
- this.contextualSerializer(msg, context);
- return context.GetPayload();
- }
-
- // for backward compatibility, emulate the simple deserializer using the contextual one
- private T EmulateSimpleDeserializer(byte[] payload)
- {
- // TODO(jtattermusch): avoid the allocation by passing a thread-local instance
- // This code will become unnecessary once gRPC C# library switches to using contextual (de)serializer.
- var context = new EmulatedDeserializationContext(payload);
- return this.contextualDeserializer(context);
- }
-
// for backward compatibility, emulate the contextual serializer using the simple one
private void EmulateContextualSerializer(T message, SerializationContext context)
{
@@ -116,44 +99,6 @@ namespace Grpc.Core
{
return this.deserializer(context.PayloadAsNewBuffer());
}
-
- internal class EmulatedSerializationContext : SerializationContext
- {
- bool isComplete;
- byte[] payload;
-
- public override void Complete(byte[] payload)
- {
- GrpcPreconditions.CheckState(!isComplete);
- this.isComplete = true;
- this.payload = payload;
- }
-
- internal byte[] GetPayload()
- {
- return this.payload;
- }
- }
-
- internal class EmulatedDeserializationContext : DeserializationContext
- {
- readonly byte[] payload;
- bool alreadyCalledPayloadAsNewBuffer;
-
- public EmulatedDeserializationContext(byte[] payload)
- {
- this.payload = GrpcPreconditions.CheckNotNull(payload);
- }
-
- public override int PayloadLength => payload.Length;
-
- public override byte[] PayloadAsNewBuffer()
- {
- GrpcPreconditions.CheckState(!alreadyCalledPayloadAsNewBuffer);
- alreadyCalledPayloadAsNewBuffer = true;
- return payload;
- }
- }
}
/// <summary>
diff --git a/src/csharp/Grpc.Core/SerializationContext.cs b/src/csharp/Grpc.Core/SerializationContext.cs
index cf4d1595da..9aef2adbcd 100644
--- a/src/csharp/Grpc.Core/SerializationContext.cs
+++ b/src/csharp/Grpc.Core/SerializationContext.cs
@@ -16,6 +16,8 @@
#endregion
+using System;
+
namespace Grpc.Core
{
/// <summary>
@@ -29,6 +31,9 @@ namespace Grpc.Core
/// payload which must not be accessed afterwards.
/// </summary>
/// <param name="payload">the serialized form of current message</param>
- public abstract void Complete(byte[] payload);
+ public virtual void Complete(byte[] payload)
+ {
+ throw new NotImplementedException();
+ }
}
}
diff --git a/src/csharp/Grpc.Core/ServerServiceDefinition.cs b/src/csharp/Grpc.Core/ServerServiceDefinition.cs
index 07c6aa1796..b040ab379c 100644
--- a/src/csharp/Grpc.Core/ServerServiceDefinition.cs
+++ b/src/csharp/Grpc.Core/ServerServiceDefinition.cs
@@ -72,7 +72,7 @@ namespace Grpc.Core
}
/// <summary>
- /// Adds a definitions for a single request - single response method.
+ /// Adds a definition for a single request - single response method.
/// </summary>
/// <typeparam name="TRequest">The request message class.</typeparam>
/// <typeparam name="TResponse">The response message class.</typeparam>
@@ -90,7 +90,7 @@ namespace Grpc.Core
}
/// <summary>
- /// Adds a definitions for a client streaming method.
+ /// Adds a definition for a client streaming method.
/// </summary>
/// <typeparam name="TRequest">The request message class.</typeparam>
/// <typeparam name="TResponse">The response message class.</typeparam>
@@ -108,7 +108,7 @@ namespace Grpc.Core
}
/// <summary>
- /// Adds a definitions for a server streaming method.
+ /// Adds a definition for a server streaming method.
/// </summary>
/// <typeparam name="TRequest">The request message class.</typeparam>
/// <typeparam name="TResponse">The response message class.</typeparam>
@@ -126,7 +126,7 @@ namespace Grpc.Core
}
/// <summary>
- /// Adds a definitions for a bidirectional streaming method.
+ /// Adds a definition for a bidirectional streaming method.
/// </summary>
/// <typeparam name="TRequest">The request message class.</typeparam>
/// <typeparam name="TResponse">The response message class.</typeparam>
diff --git a/src/csharp/Grpc.Core/ServiceBinderBase.cs b/src/csharp/Grpc.Core/ServiceBinderBase.cs
new file mode 100644
index 0000000000..d4909f4a26
--- /dev/null
+++ b/src/csharp/Grpc.Core/ServiceBinderBase.cs
@@ -0,0 +1,101 @@
+#region Copyright notice and license
+
+// Copyright 2018 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.Collections.ObjectModel;
+using System.Linq;
+using Grpc.Core.Interceptors;
+using Grpc.Core.Internal;
+using Grpc.Core.Utils;
+
+namespace Grpc.Core
+{
+ /// <summary>
+ /// Allows binding server-side method implementations in alternative serving stacks.
+ /// Instances of this class are usually populated by the <c>BindService</c> method
+ /// that is part of the autogenerated code for a protocol buffers service definition.
+ /// <seealso cref="ServerServiceDefinition"/>
+ /// </summary>
+ public class ServiceBinderBase
+ {
+ /// <summary>
+ /// Adds a definition for a single request - single response method.
+ /// </summary>
+ /// <typeparam name="TRequest">The request message class.</typeparam>
+ /// <typeparam name="TResponse">The response message class.</typeparam>
+ /// <param name="method">The method.</param>
+ /// <param name="handler">The method handler.</param>
+ public virtual void AddMethod<TRequest, TResponse>(
+ Method<TRequest, TResponse> method,
+ UnaryServerMethod<TRequest, TResponse> handler)
+ where TRequest : class
+ where TResponse : class
+ {
+ throw new NotImplementedException();
+ }
+
+ /// <summary>
+ /// Adds a definition for a client streaming method.
+ /// </summary>
+ /// <typeparam name="TRequest">The request message class.</typeparam>
+ /// <typeparam name="TResponse">The response message class.</typeparam>
+ /// <param name="method">The method.</param>
+ /// <param name="handler">The method handler.</param>
+ public virtual void AddMethod<TRequest, TResponse>(
+ Method<TRequest, TResponse> method,
+ ClientStreamingServerMethod<TRequest, TResponse> handler)
+ where TRequest : class
+ where TResponse : class
+ {
+ throw new NotImplementedException();
+ }
+
+ /// <summary>
+ /// Adds a definition for a server streaming method.
+ /// </summary>
+ /// <typeparam name="TRequest">The request message class.</typeparam>
+ /// <typeparam name="TResponse">The response message class.</typeparam>
+ /// <param name="method">The method.</param>
+ /// <param name="handler">The method handler.</param>
+ public virtual void AddMethod<TRequest, TResponse>(
+ Method<TRequest, TResponse> method,
+ ServerStreamingServerMethod<TRequest, TResponse> handler)
+ where TRequest : class
+ where TResponse : class
+ {
+ throw new NotImplementedException();
+ }
+
+ /// <summary>
+ /// Adds a definition for a bidirectional streaming method.
+ /// </summary>
+ /// <typeparam name="TRequest">The request message class.</typeparam>
+ /// <typeparam name="TResponse">The response message class.</typeparam>
+ /// <param name="method">The method.</param>
+ /// <param name="handler">The method handler.</param>
+ public virtual void AddMethod<TRequest, TResponse>(
+ Method<TRequest, TResponse> method,
+ DuplexStreamingServerMethod<TRequest, TResponse> handler)
+ where TRequest : class
+ where TResponse : class
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core/Version.csproj.include b/src/csharp/Grpc.Core/Version.csproj.include
index ed0d884365..4fffe4f644 100755
--- a/src/csharp/Grpc.Core/Version.csproj.include
+++ b/src/csharp/Grpc.Core/Version.csproj.include
@@ -1,7 +1,7 @@
<!-- This file is generated -->
<Project>
<PropertyGroup>
- <GrpcCsharpVersion>1.17.0-dev</GrpcCsharpVersion>
+ <GrpcCsharpVersion>1.18.0-dev</GrpcCsharpVersion>
<GoogleProtobufVersion>3.6.1</GoogleProtobufVersion>
</PropertyGroup>
</Project>
diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs
index 14714c8c4a..633880189c 100644
--- a/src/csharp/Grpc.Core/VersionInfo.cs
+++ b/src/csharp/Grpc.Core/VersionInfo.cs
@@ -33,11 +33,11 @@ namespace Grpc.Core
/// <summary>
/// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies
/// </summary>
- public const string CurrentAssemblyFileVersion = "1.17.0.0";
+ public const string CurrentAssemblyFileVersion = "1.18.0.0";
/// <summary>
/// Current version of gRPC C#
/// </summary>
- public const string CurrentVersion = "1.17.0-dev";
+ public const string CurrentVersion = "1.18.0-dev";
}
}
diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
index db4e3ef4e3..1afcd9fba0 100755
--- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
+++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Examples.MathClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathClient</PackageId>
diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
index b12b418d01..75ef6d1008 100755
--- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
+++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Examples.MathServer</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathServer</PackageId>
diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
index 7493eb8051..93d112a0c5 100755
--- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
+++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Examples.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.Tests</PackageId>
diff --git a/src/csharp/Grpc.Examples.Tests/NUnitMain.cs b/src/csharp/Grpc.Examples.Tests/NUnitMain.cs
index bcb8b46b64..107df64809 100644
--- a/src/csharp/Grpc.Examples.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.Examples.Tests/NUnitMain.cs
@@ -34,11 +34,7 @@ namespace Grpc.Examples.Tests
{
// Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
GrpcEnvironment.SetLogger(new ConsoleLogger());
-#if NETCOREAPP1_0
- return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
-#else
- return new AutoRun().Execute(args);
-#endif
+ return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args);
}
}
}
diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
index baa3b4ce6c..9ce2b59d03 100755
--- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj
+++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Examples</AssemblyName>
<PackageId>Grpc.Examples</PackageId>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index 9578bb4d81..e5be387e67 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -287,6 +287,18 @@ namespace Math {
.AddMethod(__Method_Sum, serviceImpl.Sum).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, MathBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_Div, serviceImpl.Div);
+ serviceBinder.AddMethod(__Method_DivMany, serviceImpl.DivMany);
+ serviceBinder.AddMethod(__Method_Fib, serviceImpl.Fib);
+ serviceBinder.AddMethod(__Method_Sum, serviceImpl.Sum);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
index 616e56df10..2a037a72e5 100755
--- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
+++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.HealthCheck.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.HealthCheck.Tests</PackageId>
diff --git a/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs b/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs
index 365551e895..db6d32a5b2 100644
--- a/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.HealthCheck.Tests/NUnitMain.cs
@@ -34,11 +34,7 @@ namespace Grpc.HealthCheck.Tests
{
// Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
GrpcEnvironment.SetLogger(new ConsoleLogger());
-#if NETCOREAPP1_0
- return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
-#else
- return new AutoRun().Execute(args);
-#endif
+ return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args);
}
}
}
diff --git a/src/csharp/Grpc.HealthCheck/Health.cs b/src/csharp/Grpc.HealthCheck/Health.cs
index a90f261d28..2c3bb45c3c 100644
--- a/src/csharp/Grpc.HealthCheck/Health.cs
+++ b/src/csharp/Grpc.HealthCheck/Health.cs
@@ -25,15 +25,17 @@ namespace Grpc.Health.V1 {
byte[] descriptorData = global::System.Convert.FromBase64String(
string.Concat(
"ChtncnBjL2hlYWx0aC92MS9oZWFsdGgucHJvdG8SDmdycGMuaGVhbHRoLnYx",
- "IiUKEkhlYWx0aENoZWNrUmVxdWVzdBIPCgdzZXJ2aWNlGAEgASgJIpQBChNI",
+ "IiUKEkhlYWx0aENoZWNrUmVxdWVzdBIPCgdzZXJ2aWNlGAEgASgJIqkBChNI",
"ZWFsdGhDaGVja1Jlc3BvbnNlEkEKBnN0YXR1cxgBIAEoDjIxLmdycGMuaGVh",
- "bHRoLnYxLkhlYWx0aENoZWNrUmVzcG9uc2UuU2VydmluZ1N0YXR1cyI6Cg1T",
+ "bHRoLnYxLkhlYWx0aENoZWNrUmVzcG9uc2UuU2VydmluZ1N0YXR1cyJPCg1T",
"ZXJ2aW5nU3RhdHVzEgsKB1VOS05PV04QABILCgdTRVJWSU5HEAESDwoLTk9U",
- "X1NFUlZJTkcQAjJaCgZIZWFsdGgSUAoFQ2hlY2sSIi5ncnBjLmhlYWx0aC52",
- "MS5IZWFsdGhDaGVja1JlcXVlc3QaIy5ncnBjLmhlYWx0aC52MS5IZWFsdGhD",
- "aGVja1Jlc3BvbnNlQmEKEWlvLmdycGMuaGVhbHRoLnYxQgtIZWFsdGhQcm90",
- "b1ABWixnb29nbGUuZ29sYW5nLm9yZy9ncnBjL2hlYWx0aC9ncnBjX2hlYWx0",
- "aF92MaoCDkdycGMuSGVhbHRoLlYxYgZwcm90bzM="));
+ "X1NFUlZJTkcQAhITCg9TRVJWSUNFX1VOS05PV04QAzKuAQoGSGVhbHRoElAK",
+ "BUNoZWNrEiIuZ3JwYy5oZWFsdGgudjEuSGVhbHRoQ2hlY2tSZXF1ZXN0GiMu",
+ "Z3JwYy5oZWFsdGgudjEuSGVhbHRoQ2hlY2tSZXNwb25zZRJSCgVXYXRjaBIi",
+ "LmdycGMuaGVhbHRoLnYxLkhlYWx0aENoZWNrUmVxdWVzdBojLmdycGMuaGVh",
+ "bHRoLnYxLkhlYWx0aENoZWNrUmVzcG9uc2UwAUJhChFpby5ncnBjLmhlYWx0",
+ "aC52MUILSGVhbHRoUHJvdG9QAVosZ29vZ2xlLmdvbGFuZy5vcmcvZ3JwYy9o",
+ "ZWFsdGgvZ3JwY19oZWFsdGhfdjGqAg5HcnBjLkhlYWx0aC5WMWIGcHJvdG8z"));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { },
new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
@@ -309,6 +311,10 @@ namespace Grpc.Health.V1 {
[pbr::OriginalName("UNKNOWN")] Unknown = 0,
[pbr::OriginalName("SERVING")] Serving = 1,
[pbr::OriginalName("NOT_SERVING")] NotServing = 2,
+ /// <summary>
+ /// Used only by the Watch method.
+ /// </summary>
+ [pbr::OriginalName("SERVICE_UNKNOWN")] ServiceUnknown = 3,
}
}
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index 5e79c04d2a..51956f2f23 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -40,6 +40,13 @@ namespace Grpc.Health.V1 {
__Marshaller_grpc_health_v1_HealthCheckRequest,
__Marshaller_grpc_health_v1_HealthCheckResponse);
+ static readonly grpc::Method<global::Grpc.Health.V1.HealthCheckRequest, global::Grpc.Health.V1.HealthCheckResponse> __Method_Watch = new grpc::Method<global::Grpc.Health.V1.HealthCheckRequest, global::Grpc.Health.V1.HealthCheckResponse>(
+ grpc::MethodType.ServerStreaming,
+ __ServiceName,
+ "Watch",
+ __Marshaller_grpc_health_v1_HealthCheckRequest,
+ __Marshaller_grpc_health_v1_HealthCheckResponse);
+
/// <summary>Service descriptor</summary>
public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor
{
@@ -49,11 +56,44 @@ namespace Grpc.Health.V1 {
/// <summary>Base class for server-side implementations of Health</summary>
public abstract partial class HealthBase
{
+ /// <summary>
+ /// If the requested service is unknown, the call will fail with status
+ /// NOT_FOUND.
+ /// </summary>
+ /// <param name="request">The request received from the client.</param>
+ /// <param name="context">The context of the server-side call handler being invoked.</param>
+ /// <returns>The response to send back to the client (wrapped by a task).</returns>
public virtual global::System.Threading.Tasks.Task<global::Grpc.Health.V1.HealthCheckResponse> Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::ServerCallContext context)
{
throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
+ /// <summary>
+ /// Performs a watch for the serving status of the requested service.
+ /// The server will immediately send back a message indicating the current
+ /// serving status. It will then subsequently send a new message whenever
+ /// the service's serving status changes.
+ ///
+ /// If the requested service is unknown when the call is received, the
+ /// server will send a message setting the serving status to
+ /// SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ /// future point, the serving status of the service becomes known, the
+ /// server will send a new message with the service's serving status.
+ ///
+ /// If the call terminates with status UNIMPLEMENTED, then clients
+ /// should assume this method is not supported and should not retry the
+ /// call. If the call terminates with any other status (including OK),
+ /// clients should retry the call with appropriate exponential backoff.
+ /// </summary>
+ /// <param name="request">The request received from the client.</param>
+ /// <param name="responseStream">Used for sending responses back to the client.</param>
+ /// <param name="context">The context of the server-side call handler being invoked.</param>
+ /// <returns>A task indicating completion of the handler.</returns>
+ public virtual global::System.Threading.Tasks.Task Watch(global::Grpc.Health.V1.HealthCheckRequest request, grpc::IServerStreamWriter<global::Grpc.Health.V1.HealthCheckResponse> responseStream, grpc::ServerCallContext context)
+ {
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
+ }
+
}
/// <summary>Client for Health</summary>
@@ -79,22 +119,104 @@ namespace Grpc.Health.V1 {
{
}
+ /// <summary>
+ /// If the requested service is unknown, the call will fail with status
+ /// NOT_FOUND.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The response received from the server.</returns>
public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return Check(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
+ /// <summary>
+ /// If the requested service is unknown, the call will fail with status
+ /// NOT_FOUND.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The response received from the server.</returns>
public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_Check, null, options, request);
}
+ /// <summary>
+ /// If the requested service is unknown, the call will fail with status
+ /// NOT_FOUND.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The call object.</returns>
public virtual grpc::AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
{
return CheckAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
+ /// <summary>
+ /// If the requested service is unknown, the call will fail with status
+ /// NOT_FOUND.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The call object.</returns>
public virtual grpc::AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_Check, null, options, request);
}
+ /// <summary>
+ /// Performs a watch for the serving status of the requested service.
+ /// The server will immediately send back a message indicating the current
+ /// serving status. It will then subsequently send a new message whenever
+ /// the service's serving status changes.
+ ///
+ /// If the requested service is unknown when the call is received, the
+ /// server will send a message setting the serving status to
+ /// SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ /// future point, the serving status of the service becomes known, the
+ /// server will send a new message with the service's serving status.
+ ///
+ /// If the call terminates with status UNIMPLEMENTED, then clients
+ /// should assume this method is not supported and should not retry the
+ /// call. If the call terminates with any other status (including OK),
+ /// clients should retry the call with appropriate exponential backoff.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param>
+ /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
+ /// <param name="cancellationToken">An optional token for canceling the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Health.V1.HealthCheckResponse> Watch(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, global::System.DateTime? deadline = null, global::System.Threading.CancellationToken cancellationToken = default(global::System.Threading.CancellationToken))
+ {
+ return Watch(request, new grpc::CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// Performs a watch for the serving status of the requested service.
+ /// The server will immediately send back a message indicating the current
+ /// serving status. It will then subsequently send a new message whenever
+ /// the service's serving status changes.
+ ///
+ /// If the requested service is unknown when the call is received, the
+ /// server will send a message setting the serving status to
+ /// SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ /// future point, the serving status of the service becomes known, the
+ /// server will send a new message with the service's serving status.
+ ///
+ /// If the call terminates with status UNIMPLEMENTED, then clients
+ /// should assume this method is not supported and should not retry the
+ /// call. If the call terminates with any other status (including OK),
+ /// clients should retry the call with appropriate exponential backoff.
+ /// </summary>
+ /// <param name="request">The request to send to the server.</param>
+ /// <param name="options">The options for the call.</param>
+ /// <returns>The call object.</returns>
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Health.V1.HealthCheckResponse> Watch(global::Grpc.Health.V1.HealthCheckRequest request, grpc::CallOptions options)
+ {
+ return CallInvoker.AsyncServerStreamingCall(__Method_Watch, null, options, request);
+ }
/// <summary>Creates a new instance of client from given <c>ClientBaseConfiguration</c>.</summary>
protected override HealthClient NewInstance(ClientBaseConfiguration configuration)
{
@@ -107,7 +229,18 @@ namespace Grpc.Health.V1 {
public static grpc::ServerServiceDefinition BindService(HealthBase serviceImpl)
{
return grpc::ServerServiceDefinition.CreateBuilder()
- .AddMethod(__Method_Check, serviceImpl.Check).Build();
+ .AddMethod(__Method_Check, serviceImpl.Check)
+ .AddMethod(__Method_Watch, serviceImpl.Watch).Build();
+ }
+
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, HealthBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_Check, serviceImpl.Check);
+ serviceBinder.AddMethod(__Method_Watch, serviceImpl.Watch);
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
index 35713156ea..1cd4b83e1e 100755
--- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.IntegrationTesting.Client</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Client</PackageId>
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
index 3ecefe3bc4..2890a7df58 100755
--- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.IntegrationTesting.QpsWorker</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.QpsWorker</PackageId>
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
index 1092b2c21e..ee718958bc 100755
--- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.IntegrationTesting.Server</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Server</PackageId>
@@ -19,7 +19,7 @@
<Reference Include="System" />
<Reference Include="Microsoft.CSharp" />
</ItemGroup>
-
+
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
index 22272547f6..99926497e4 100755
--- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.IntegrationTesting.StressClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.StressClient</PackageId>
diff --git a/src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs b/src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs
index b5738593f2..3431b5fa18 100644
--- a/src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs
@@ -324,6 +324,19 @@ namespace Grpc.Testing {
.AddMethod(__Method_StreamingBothWays, serviceImpl.StreamingBothWays).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, BenchmarkServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall);
+ serviceBinder.AddMethod(__Method_StreamingCall, serviceImpl.StreamingCall);
+ serviceBinder.AddMethod(__Method_StreamingFromClient, serviceImpl.StreamingFromClient);
+ serviceBinder.AddMethod(__Method_StreamingFromServer, serviceImpl.StreamingFromServer);
+ serviceBinder.AddMethod(__Method_StreamingBothWays, serviceImpl.StreamingBothWays);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/Control.cs b/src/csharp/Grpc.IntegrationTesting/Control.cs
index 6e00348451..368b86659a 100644
--- a/src/csharp/Grpc.IntegrationTesting/Control.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Control.cs
@@ -34,7 +34,7 @@ namespace Grpc.Testing {
"U2VjdXJpdHlQYXJhbXMSEwoLdXNlX3Rlc3RfY2EYASABKAgSHAoUc2VydmVy",
"X2hvc3Rfb3ZlcnJpZGUYAiABKAkSEQoJY3JlZF90eXBlGAMgASgJIk0KCkNo",
"YW5uZWxBcmcSDAoEbmFtZRgBIAEoCRITCglzdHJfdmFsdWUYAiABKAlIABIT",
- "CglpbnRfdmFsdWUYAyABKAVIAEIHCgV2YWx1ZSLvBAoMQ2xpZW50Q29uZmln",
+ "CglpbnRfdmFsdWUYAyABKAVIAEIHCgV2YWx1ZSKiBQoMQ2xpZW50Q29uZmln",
"EhYKDnNlcnZlcl90YXJnZXRzGAEgAygJEi0KC2NsaWVudF90eXBlGAIgASgO",
"MhguZ3JwYy50ZXN0aW5nLkNsaWVudFR5cGUSNQoPc2VjdXJpdHlfcGFyYW1z",
"GAMgASgLMhwuZ3JwYy50ZXN0aW5nLlNlY3VyaXR5UGFyYW1zEiQKHG91dHN0",
@@ -48,59 +48,60 @@ namespace Grpc.Testing {
"GA4gASgFEhgKEG90aGVyX2NsaWVudF9hcGkYDyABKAkSLgoMY2hhbm5lbF9h",
"cmdzGBAgAygLMhguZ3JwYy50ZXN0aW5nLkNoYW5uZWxBcmcSFgoOdGhyZWFk",
"c19wZXJfY3EYESABKAUSGwoTbWVzc2FnZXNfcGVyX3N0cmVhbRgSIAEoBRIY",
- "ChB1c2VfY29hbGVzY2VfYXBpGBMgASgIIjgKDENsaWVudFN0YXR1cxIoCgVz",
- "dGF0cxgBIAEoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cyIVCgRNYXJr",
- "Eg0KBXJlc2V0GAEgASgIImgKCkNsaWVudEFyZ3MSKwoFc2V0dXAYASABKAsy",
- "Gi5ncnBjLnRlc3RpbmcuQ2xpZW50Q29uZmlnSAASIgoEbWFyaxgCIAEoCzIS",
- "LmdycGMudGVzdGluZy5NYXJrSABCCQoHYXJndHlwZSL9AgoMU2VydmVyQ29u",
- "ZmlnEi0KC3NlcnZlcl90eXBlGAEgASgOMhguZ3JwYy50ZXN0aW5nLlNlcnZl",
- "clR5cGUSNQoPc2VjdXJpdHlfcGFyYW1zGAIgASgLMhwuZ3JwYy50ZXN0aW5n",
- "LlNlY3VyaXR5UGFyYW1zEgwKBHBvcnQYBCABKAUSHAoUYXN5bmNfc2VydmVy",
- "X3RocmVhZHMYByABKAUSEgoKY29yZV9saW1pdBgIIAEoBRIzCg5wYXlsb2Fk",
- "X2NvbmZpZxgJIAEoCzIbLmdycGMudGVzdGluZy5QYXlsb2FkQ29uZmlnEhEK",
- "CWNvcmVfbGlzdBgKIAMoBRIYChBvdGhlcl9zZXJ2ZXJfYXBpGAsgASgJEhYK",
- "DnRocmVhZHNfcGVyX2NxGAwgASgFEhwKE3Jlc291cmNlX3F1b3RhX3NpemUY",
- "6QcgASgFEi8KDGNoYW5uZWxfYXJncxjqByADKAsyGC5ncnBjLnRlc3Rpbmcu",
- "Q2hhbm5lbEFyZyJoCgpTZXJ2ZXJBcmdzEisKBXNldHVwGAEgASgLMhouZ3Jw",
- "Yy50ZXN0aW5nLlNlcnZlckNvbmZpZ0gAEiIKBG1hcmsYAiABKAsyEi5ncnBj",
- "LnRlc3RpbmcuTWFya0gAQgkKB2FyZ3R5cGUiVQoMU2VydmVyU3RhdHVzEigK",
- "BXN0YXRzGAEgASgLMhkuZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXRzEgwKBHBv",
- "cnQYAiABKAUSDQoFY29yZXMYAyABKAUiDQoLQ29yZVJlcXVlc3QiHQoMQ29y",
- "ZVJlc3BvbnNlEg0KBWNvcmVzGAEgASgFIgYKBFZvaWQi/QEKCFNjZW5hcmlv",
- "EgwKBG5hbWUYASABKAkSMQoNY2xpZW50X2NvbmZpZxgCIAEoCzIaLmdycGMu",
- "dGVzdGluZy5DbGllbnRDb25maWcSEwoLbnVtX2NsaWVudHMYAyABKAUSMQoN",
- "c2VydmVyX2NvbmZpZxgEIAEoCzIaLmdycGMudGVzdGluZy5TZXJ2ZXJDb25m",
- "aWcSEwoLbnVtX3NlcnZlcnMYBSABKAUSFgoOd2FybXVwX3NlY29uZHMYBiAB",
- "KAUSGQoRYmVuY2htYXJrX3NlY29uZHMYByABKAUSIAoYc3Bhd25fbG9jYWxf",
- "d29ya2VyX2NvdW50GAggASgFIjYKCVNjZW5hcmlvcxIpCglzY2VuYXJpb3MY",
- "ASADKAsyFi5ncnBjLnRlc3RpbmcuU2NlbmFyaW8ihAQKFVNjZW5hcmlvUmVz",
- "dWx0U3VtbWFyeRILCgNxcHMYASABKAESGwoTcXBzX3Blcl9zZXJ2ZXJfY29y",
- "ZRgCIAEoARIaChJzZXJ2ZXJfc3lzdGVtX3RpbWUYAyABKAESGAoQc2VydmVy",
- "X3VzZXJfdGltZRgEIAEoARIaChJjbGllbnRfc3lzdGVtX3RpbWUYBSABKAES",
- "GAoQY2xpZW50X3VzZXJfdGltZRgGIAEoARISCgpsYXRlbmN5XzUwGAcgASgB",
- "EhIKCmxhdGVuY3lfOTAYCCABKAESEgoKbGF0ZW5jeV85NRgJIAEoARISCgps",
- "YXRlbmN5Xzk5GAogASgBEhMKC2xhdGVuY3lfOTk5GAsgASgBEhgKEHNlcnZl",
- "cl9jcHVfdXNhZ2UYDCABKAESJgoec3VjY2Vzc2Z1bF9yZXF1ZXN0c19wZXJf",
- "c2Vjb25kGA0gASgBEiIKGmZhaWxlZF9yZXF1ZXN0c19wZXJfc2Vjb25kGA4g",
- "ASgBEiAKGGNsaWVudF9wb2xsc19wZXJfcmVxdWVzdBgPIAEoARIgChhzZXJ2",
- "ZXJfcG9sbHNfcGVyX3JlcXVlc3QYECABKAESIgoac2VydmVyX3F1ZXJpZXNf",
- "cGVyX2NwdV9zZWMYESABKAESIgoaY2xpZW50X3F1ZXJpZXNfcGVyX2NwdV9z",
- "ZWMYEiABKAEigwMKDlNjZW5hcmlvUmVzdWx0EigKCHNjZW5hcmlvGAEgASgL",
- "MhYuZ3JwYy50ZXN0aW5nLlNjZW5hcmlvEi4KCWxhdGVuY2llcxgCIAEoCzIb",
- "LmdycGMudGVzdGluZy5IaXN0b2dyYW1EYXRhEi8KDGNsaWVudF9zdGF0cxgD",
- "IAMoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cxIvCgxzZXJ2ZXJfc3Rh",
- "dHMYBCADKAsyGS5ncnBjLnRlc3RpbmcuU2VydmVyU3RhdHMSFAoMc2VydmVy",
- "X2NvcmVzGAUgAygFEjQKB3N1bW1hcnkYBiABKAsyIy5ncnBjLnRlc3Rpbmcu",
- "U2NlbmFyaW9SZXN1bHRTdW1tYXJ5EhYKDmNsaWVudF9zdWNjZXNzGAcgAygI",
- "EhYKDnNlcnZlcl9zdWNjZXNzGAggAygIEjkKD3JlcXVlc3RfcmVzdWx0cxgJ",
- "IAMoCzIgLmdycGMudGVzdGluZy5SZXF1ZXN0UmVzdWx0Q291bnQqQQoKQ2xp",
- "ZW50VHlwZRIPCgtTWU5DX0NMSUVOVBAAEhAKDEFTWU5DX0NMSUVOVBABEhAK",
- "DE9USEVSX0NMSUVOVBACKlsKClNlcnZlclR5cGUSDwoLU1lOQ19TRVJWRVIQ",
- "ABIQCgxBU1lOQ19TRVJWRVIQARIYChRBU1lOQ19HRU5FUklDX1NFUlZFUhAC",
- "EhAKDE9USEVSX1NFUlZFUhADKnIKB1JwY1R5cGUSCQoFVU5BUlkQABINCglT",
- "VFJFQU1JTkcQARIZChVTVFJFQU1JTkdfRlJPTV9DTElFTlQQAhIZChVTVFJF",
- "QU1JTkdfRlJPTV9TRVJWRVIQAxIXChNTVFJFQU1JTkdfQk9USF9XQVlTEARi",
- "BnByb3RvMw=="));
+ "ChB1c2VfY29hbGVzY2VfYXBpGBMgASgIEjEKKW1lZGlhbl9sYXRlbmN5X2Nv",
+ "bGxlY3Rpb25faW50ZXJ2YWxfbWlsbGlzGBQgASgFIjgKDENsaWVudFN0YXR1",
+ "cxIoCgVzdGF0cxgBIAEoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cyIV",
+ "CgRNYXJrEg0KBXJlc2V0GAEgASgIImgKCkNsaWVudEFyZ3MSKwoFc2V0dXAY",
+ "ASABKAsyGi5ncnBjLnRlc3RpbmcuQ2xpZW50Q29uZmlnSAASIgoEbWFyaxgC",
+ "IAEoCzISLmdycGMudGVzdGluZy5NYXJrSABCCQoHYXJndHlwZSL9AgoMU2Vy",
+ "dmVyQ29uZmlnEi0KC3NlcnZlcl90eXBlGAEgASgOMhguZ3JwYy50ZXN0aW5n",
+ "LlNlcnZlclR5cGUSNQoPc2VjdXJpdHlfcGFyYW1zGAIgASgLMhwuZ3JwYy50",
+ "ZXN0aW5nLlNlY3VyaXR5UGFyYW1zEgwKBHBvcnQYBCABKAUSHAoUYXN5bmNf",
+ "c2VydmVyX3RocmVhZHMYByABKAUSEgoKY29yZV9saW1pdBgIIAEoBRIzCg5w",
+ "YXlsb2FkX2NvbmZpZxgJIAEoCzIbLmdycGMudGVzdGluZy5QYXlsb2FkQ29u",
+ "ZmlnEhEKCWNvcmVfbGlzdBgKIAMoBRIYChBvdGhlcl9zZXJ2ZXJfYXBpGAsg",
+ "ASgJEhYKDnRocmVhZHNfcGVyX2NxGAwgASgFEhwKE3Jlc291cmNlX3F1b3Rh",
+ "X3NpemUY6QcgASgFEi8KDGNoYW5uZWxfYXJncxjqByADKAsyGC5ncnBjLnRl",
+ "c3RpbmcuQ2hhbm5lbEFyZyJoCgpTZXJ2ZXJBcmdzEisKBXNldHVwGAEgASgL",
+ "MhouZ3JwYy50ZXN0aW5nLlNlcnZlckNvbmZpZ0gAEiIKBG1hcmsYAiABKAsy",
+ "Ei5ncnBjLnRlc3RpbmcuTWFya0gAQgkKB2FyZ3R5cGUiVQoMU2VydmVyU3Rh",
+ "dHVzEigKBXN0YXRzGAEgASgLMhkuZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXRz",
+ "EgwKBHBvcnQYAiABKAUSDQoFY29yZXMYAyABKAUiDQoLQ29yZVJlcXVlc3Qi",
+ "HQoMQ29yZVJlc3BvbnNlEg0KBWNvcmVzGAEgASgFIgYKBFZvaWQi/QEKCFNj",
+ "ZW5hcmlvEgwKBG5hbWUYASABKAkSMQoNY2xpZW50X2NvbmZpZxgCIAEoCzIa",
+ "LmdycGMudGVzdGluZy5DbGllbnRDb25maWcSEwoLbnVtX2NsaWVudHMYAyAB",
+ "KAUSMQoNc2VydmVyX2NvbmZpZxgEIAEoCzIaLmdycGMudGVzdGluZy5TZXJ2",
+ "ZXJDb25maWcSEwoLbnVtX3NlcnZlcnMYBSABKAUSFgoOd2FybXVwX3NlY29u",
+ "ZHMYBiABKAUSGQoRYmVuY2htYXJrX3NlY29uZHMYByABKAUSIAoYc3Bhd25f",
+ "bG9jYWxfd29ya2VyX2NvdW50GAggASgFIjYKCVNjZW5hcmlvcxIpCglzY2Vu",
+ "YXJpb3MYASADKAsyFi5ncnBjLnRlc3RpbmcuU2NlbmFyaW8ihAQKFVNjZW5h",
+ "cmlvUmVzdWx0U3VtbWFyeRILCgNxcHMYASABKAESGwoTcXBzX3Blcl9zZXJ2",
+ "ZXJfY29yZRgCIAEoARIaChJzZXJ2ZXJfc3lzdGVtX3RpbWUYAyABKAESGAoQ",
+ "c2VydmVyX3VzZXJfdGltZRgEIAEoARIaChJjbGllbnRfc3lzdGVtX3RpbWUY",
+ "BSABKAESGAoQY2xpZW50X3VzZXJfdGltZRgGIAEoARISCgpsYXRlbmN5XzUw",
+ "GAcgASgBEhIKCmxhdGVuY3lfOTAYCCABKAESEgoKbGF0ZW5jeV85NRgJIAEo",
+ "ARISCgpsYXRlbmN5Xzk5GAogASgBEhMKC2xhdGVuY3lfOTk5GAsgASgBEhgK",
+ "EHNlcnZlcl9jcHVfdXNhZ2UYDCABKAESJgoec3VjY2Vzc2Z1bF9yZXF1ZXN0",
+ "c19wZXJfc2Vjb25kGA0gASgBEiIKGmZhaWxlZF9yZXF1ZXN0c19wZXJfc2Vj",
+ "b25kGA4gASgBEiAKGGNsaWVudF9wb2xsc19wZXJfcmVxdWVzdBgPIAEoARIg",
+ "ChhzZXJ2ZXJfcG9sbHNfcGVyX3JlcXVlc3QYECABKAESIgoac2VydmVyX3F1",
+ "ZXJpZXNfcGVyX2NwdV9zZWMYESABKAESIgoaY2xpZW50X3F1ZXJpZXNfcGVy",
+ "X2NwdV9zZWMYEiABKAEigwMKDlNjZW5hcmlvUmVzdWx0EigKCHNjZW5hcmlv",
+ "GAEgASgLMhYuZ3JwYy50ZXN0aW5nLlNjZW5hcmlvEi4KCWxhdGVuY2llcxgC",
+ "IAEoCzIbLmdycGMudGVzdGluZy5IaXN0b2dyYW1EYXRhEi8KDGNsaWVudF9z",
+ "dGF0cxgDIAMoCzIZLmdycGMudGVzdGluZy5DbGllbnRTdGF0cxIvCgxzZXJ2",
+ "ZXJfc3RhdHMYBCADKAsyGS5ncnBjLnRlc3RpbmcuU2VydmVyU3RhdHMSFAoM",
+ "c2VydmVyX2NvcmVzGAUgAygFEjQKB3N1bW1hcnkYBiABKAsyIy5ncnBjLnRl",
+ "c3RpbmcuU2NlbmFyaW9SZXN1bHRTdW1tYXJ5EhYKDmNsaWVudF9zdWNjZXNz",
+ "GAcgAygIEhYKDnNlcnZlcl9zdWNjZXNzGAggAygIEjkKD3JlcXVlc3RfcmVz",
+ "dWx0cxgJIAMoCzIgLmdycGMudGVzdGluZy5SZXF1ZXN0UmVzdWx0Q291bnQq",
+ "VgoKQ2xpZW50VHlwZRIPCgtTWU5DX0NMSUVOVBAAEhAKDEFTWU5DX0NMSUVO",
+ "VBABEhAKDE9USEVSX0NMSUVOVBACEhMKD0NBTExCQUNLX0NMSUVOVBADKlsK",
+ "ClNlcnZlclR5cGUSDwoLU1lOQ19TRVJWRVIQABIQCgxBU1lOQ19TRVJWRVIQ",
+ "ARIYChRBU1lOQ19HRU5FUklDX1NFUlZFUhACEhAKDE9USEVSX1NFUlZFUhAD",
+ "KnIKB1JwY1R5cGUSCQoFVU5BUlkQABINCglTVFJFQU1JTkcQARIZChVTVFJF",
+ "QU1JTkdfRlJPTV9DTElFTlQQAhIZChVTVFJFQU1JTkdfRlJPTV9TRVJWRVIQ",
+ "AxIXChNTVFJFQU1JTkdfQk9USF9XQVlTEARiBnByb3RvMw=="));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { global::Grpc.Testing.PayloadsReflection.Descriptor, global::Grpc.Testing.StatsReflection.Descriptor, },
new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Grpc.Testing.ClientType), typeof(global::Grpc.Testing.ServerType), typeof(global::Grpc.Testing.RpcType), }, new pbr::GeneratedClrTypeInfo[] {
@@ -109,7 +110,7 @@ namespace Grpc.Testing {
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.LoadParams), global::Grpc.Testing.LoadParams.Parser, new[]{ "ClosedLoop", "Poisson" }, new[]{ "Load" }, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.SecurityParams), global::Grpc.Testing.SecurityParams.Parser, new[]{ "UseTestCa", "ServerHostOverride", "CredType" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ChannelArg), global::Grpc.Testing.ChannelArg.Parser, new[]{ "Name", "StrValue", "IntValue" }, new[]{ "Value" }, null, null),
- new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientConfig), global::Grpc.Testing.ClientConfig.Parser, new[]{ "ServerTargets", "ClientType", "SecurityParams", "OutstandingRpcsPerChannel", "ClientChannels", "AsyncClientThreads", "RpcType", "LoadParams", "PayloadConfig", "HistogramParams", "CoreList", "CoreLimit", "OtherClientApi", "ChannelArgs", "ThreadsPerCq", "MessagesPerStream", "UseCoalesceApi" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientConfig), global::Grpc.Testing.ClientConfig.Parser, new[]{ "ServerTargets", "ClientType", "SecurityParams", "OutstandingRpcsPerChannel", "ClientChannels", "AsyncClientThreads", "RpcType", "LoadParams", "PayloadConfig", "HistogramParams", "CoreList", "CoreLimit", "OtherClientApi", "ChannelArgs", "ThreadsPerCq", "MessagesPerStream", "UseCoalesceApi", "MedianLatencyCollectionIntervalMillis" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStatus), global::Grpc.Testing.ClientStatus.Parser, new[]{ "Stats" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.Mark), global::Grpc.Testing.Mark.Parser, new[]{ "Reset" }, null, null, null),
new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientArgs), global::Grpc.Testing.ClientArgs.Parser, new[]{ "Setup", "Mark" }, new[]{ "Argtype" }, null, null),
@@ -140,6 +141,7 @@ namespace Grpc.Testing {
/// used for some language-specific variants
/// </summary>
[pbr::OriginalName("OTHER_CLIENT")] OtherClient = 2,
+ [pbr::OriginalName("CALLBACK_CLIENT")] CallbackClient = 3,
}
public enum ServerType {
@@ -1054,6 +1056,7 @@ namespace Grpc.Testing {
threadsPerCq_ = other.threadsPerCq_;
messagesPerStream_ = other.messagesPerStream_;
useCoalesceApi_ = other.useCoalesceApi_;
+ medianLatencyCollectionIntervalMillis_ = other.medianLatencyCollectionIntervalMillis_;
_unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
}
@@ -1278,6 +1281,21 @@ namespace Grpc.Testing {
}
}
+ /// <summary>Field number for the "median_latency_collection_interval_millis" field.</summary>
+ public const int MedianLatencyCollectionIntervalMillisFieldNumber = 20;
+ private int medianLatencyCollectionIntervalMillis_;
+ /// <summary>
+ /// If 0, disabled. Else, specifies the period between gathering latency
+ /// medians in milliseconds.
+ /// </summary>
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int MedianLatencyCollectionIntervalMillis {
+ get { return medianLatencyCollectionIntervalMillis_; }
+ set {
+ medianLatencyCollectionIntervalMillis_ = value;
+ }
+ }
+
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as ClientConfig);
@@ -1308,6 +1326,7 @@ namespace Grpc.Testing {
if (ThreadsPerCq != other.ThreadsPerCq) return false;
if (MessagesPerStream != other.MessagesPerStream) return false;
if (UseCoalesceApi != other.UseCoalesceApi) return false;
+ if (MedianLatencyCollectionIntervalMillis != other.MedianLatencyCollectionIntervalMillis) return false;
return Equals(_unknownFields, other._unknownFields);
}
@@ -1331,6 +1350,7 @@ namespace Grpc.Testing {
if (ThreadsPerCq != 0) hash ^= ThreadsPerCq.GetHashCode();
if (MessagesPerStream != 0) hash ^= MessagesPerStream.GetHashCode();
if (UseCoalesceApi != false) hash ^= UseCoalesceApi.GetHashCode();
+ if (MedianLatencyCollectionIntervalMillis != 0) hash ^= MedianLatencyCollectionIntervalMillis.GetHashCode();
if (_unknownFields != null) {
hash ^= _unknownFields.GetHashCode();
}
@@ -1403,6 +1423,10 @@ namespace Grpc.Testing {
output.WriteRawTag(152, 1);
output.WriteBool(UseCoalesceApi);
}
+ if (MedianLatencyCollectionIntervalMillis != 0) {
+ output.WriteRawTag(160, 1);
+ output.WriteInt32(MedianLatencyCollectionIntervalMillis);
+ }
if (_unknownFields != null) {
_unknownFields.WriteTo(output);
}
@@ -1456,6 +1480,9 @@ namespace Grpc.Testing {
if (UseCoalesceApi != false) {
size += 2 + 1;
}
+ if (MedianLatencyCollectionIntervalMillis != 0) {
+ size += 2 + pb::CodedOutputStream.ComputeInt32Size(MedianLatencyCollectionIntervalMillis);
+ }
if (_unknownFields != null) {
size += _unknownFields.CalculateSize();
}
@@ -1524,6 +1551,9 @@ namespace Grpc.Testing {
if (other.UseCoalesceApi != false) {
UseCoalesceApi = other.UseCoalesceApi;
}
+ if (other.MedianLatencyCollectionIntervalMillis != 0) {
+ MedianLatencyCollectionIntervalMillis = other.MedianLatencyCollectionIntervalMillis;
+ }
_unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
}
@@ -1616,6 +1646,10 @@ namespace Grpc.Testing {
UseCoalesceApi = input.ReadBool();
break;
}
+ case 160: {
+ MedianLatencyCollectionIntervalMillis = input.ReadInt32();
+ break;
+ }
}
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs b/src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs
index 2d233fbdc0..7e77f8d114 100644
--- a/src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs
@@ -80,6 +80,14 @@ namespace Grpc.Testing {
return grpc::ServerServiceDefinition.CreateBuilder().Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, EmptyServiceBase serviceImpl)
+ {
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
index 8daf3fa98b..c342f8a107 100755
--- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.IntegrationTesting</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting</PackageId>
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index 9f16f41ac1..c66a9a9161 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -193,6 +193,16 @@ namespace Grpc.Testing {
.AddMethod(__Method_GetGauge, serviceImpl.GetGauge).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, MetricsServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_GetAllGauges, serviceImpl.GetAllGauges);
+ serviceBinder.AddMethod(__Method_GetGauge, serviceImpl.GetGauge);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs b/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs
index 9d24762e0a..4135186275 100644
--- a/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs
+++ b/src/csharp/Grpc.IntegrationTesting/NUnitMain.cs
@@ -34,11 +34,7 @@ namespace Grpc.IntegrationTesting
{
// Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
GrpcEnvironment.SetLogger(new ConsoleLogger());
-#if NETCOREAPP1_0
- return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
-#else
- return new AutoRun().Execute(args);
-#endif
+ return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args);
}
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs
index 1da0548cb4..954c172272 100644
--- a/src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs
@@ -143,6 +143,15 @@ namespace Grpc.Testing {
.AddMethod(__Method_ReportScenario, serviceImpl.ReportScenario).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, ReportQpsScenarioServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_ReportScenario, serviceImpl.ReportScenario);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index 2176916b43..d125fd5627 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -539,6 +539,22 @@ namespace Grpc.Testing {
.AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, TestServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_EmptyCall, serviceImpl.EmptyCall);
+ serviceBinder.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall);
+ serviceBinder.AddMethod(__Method_CacheableUnaryCall, serviceImpl.CacheableUnaryCall);
+ serviceBinder.AddMethod(__Method_StreamingOutputCall, serviceImpl.StreamingOutputCall);
+ serviceBinder.AddMethod(__Method_StreamingInputCall, serviceImpl.StreamingInputCall);
+ serviceBinder.AddMethod(__Method_FullDuplexCall, serviceImpl.FullDuplexCall);
+ serviceBinder.AddMethod(__Method_HalfDuplexCall, serviceImpl.HalfDuplexCall);
+ serviceBinder.AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall);
+ }
+
}
/// <summary>
/// A simple service NOT implemented at servers so clients can test for
@@ -661,6 +677,15 @@ namespace Grpc.Testing {
.AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, UnimplementedServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall);
+ }
+
}
/// <summary>
/// A service used to control reconnect server.
@@ -779,6 +804,16 @@ namespace Grpc.Testing {
.AddMethod(__Method_Stop, serviceImpl.Stop).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, ReconnectServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_Start, serviceImpl.Start);
+ serviceBinder.AddMethod(__Method_Stop, serviceImpl.Stop);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs b/src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs
index b9e8f91231..5b22337d53 100644
--- a/src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs
@@ -321,6 +321,18 @@ namespace Grpc.Testing {
.AddMethod(__Method_QuitWorker, serviceImpl.QuitWorker).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, WorkerServiceBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_RunServer, serviceImpl.RunServer);
+ serviceBinder.AddMethod(__Method_RunClient, serviceImpl.RunClient);
+ serviceBinder.AddMethod(__Method_CoreCount, serviceImpl.CoreCount);
+ serviceBinder.AddMethod(__Method_QuitWorker, serviceImpl.QuitWorker);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
index d39d46cf1b..5b1656080a 100644
--- a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
+++ b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Microbenchmarks</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Microbenchmarks</PackageId>
diff --git a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
index 0c12f38f25..8b586c6ecb 100755
--- a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
+++ b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
@@ -4,7 +4,7 @@
<Import Project="..\Grpc.Core\Common.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<AssemblyName>Grpc.Reflection.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Reflection.Tests</PackageId>
diff --git a/src/csharp/Grpc.Reflection.Tests/NUnitMain.cs b/src/csharp/Grpc.Reflection.Tests/NUnitMain.cs
index 49ed1cc8d4..de4b4af6cf 100644
--- a/src/csharp/Grpc.Reflection.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.Reflection.Tests/NUnitMain.cs
@@ -34,11 +34,7 @@ namespace Grpc.Reflection.Tests
{
// Make logger immune to NUnit capturing stdout and stderr to workaround https://github.com/nunit/nunit/issues/1406.
GrpcEnvironment.SetLogger(new ConsoleLogger());
-#if NETCOREAPP1_0
- return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args, new ExtendedTextWrapper(Console.Out), Console.In);
-#else
- return new AutoRun().Execute(args);
-#endif
+ return new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args);
}
}
}
diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
index c00075b7c6..ed55c2f584 100644
--- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
+++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
@@ -123,6 +123,15 @@ namespace Grpc.Reflection.V1Alpha {
.AddMethod(__Method_ServerReflectionInfo, serviceImpl.ServerReflectionInfo).Build();
}
+ /// <summary>Register service method implementations with a service binder. Useful when customizing the service binding logic.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.</summary>
+ /// <param name="serviceBinder">Service methods will be bound by calling <c>AddMethod</c> on this object.</param>
+ /// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
+ public static void BindService(grpc::ServiceBinderBase serviceBinder, ServerReflectionBase serviceImpl)
+ {
+ serviceBinder.AddMethod(__Method_ServerReflectionInfo, serviceImpl.ServerReflectionInfo);
+ }
+
}
}
#endregion
diff --git a/src/csharp/Grpc.Tools.Tests/Grpc.Tools.Tests.csproj b/src/csharp/Grpc.Tools.Tests/Grpc.Tools.Tests.csproj
index a2d4874eec..cfb40f44ae 100644
--- a/src/csharp/Grpc.Tools.Tests/Grpc.Tools.Tests.csproj
+++ b/src/csharp/Grpc.Tools.Tests/Grpc.Tools.Tests.csproj
@@ -3,7 +3,7 @@
<Import Project="..\Grpc.Core\Version.csproj.include" />
<PropertyGroup>
- <TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
+ <TargetFrameworks>net45;netcoreapp1.1</TargetFrameworks>
<OutputType>Exe</OutputType>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Tools.Tests/NUnitMain.cs b/src/csharp/Grpc.Tools.Tests/NUnitMain.cs
index 418c33820e..d30d608aa3 100644
--- a/src/csharp/Grpc.Tools.Tests/NUnitMain.cs
+++ b/src/csharp/Grpc.Tools.Tests/NUnitMain.cs
@@ -24,10 +24,6 @@ namespace Grpc.Tools.Tests
static class NUnitMain
{
public static int Main(string[] args) =>
-#if NETCOREAPP1_0 || NETCOREAPP1_1
new AutoRun(typeof(NUnitMain).GetTypeInfo().Assembly).Execute(args);
-#else
- new AutoRun().Execute(args);
-#endif
};
}
diff --git a/src/csharp/README.md b/src/csharp/README.md
index d8aed94988..9a91035d06 100644
--- a/src/csharp/README.md
+++ b/src/csharp/README.md
@@ -87,6 +87,7 @@ $ python tools/run_tests/run_tests.py -l csharp -c dbg
DOCUMENTATION
-------------
+- [.NET Build Integration](BUILD-INTEGRATION.md)
- [API Reference][]
- [Helloworld Example][]
- [RouteGuide Tutorial][]
diff --git a/src/csharp/build_packages_dotnetcli.bat b/src/csharp/build_packages_dotnetcli.bat
index 27688360e9..76d4f14390 100755
--- a/src/csharp/build_packages_dotnetcli.bat
+++ b/src/csharp/build_packages_dotnetcli.bat
@@ -13,7 +13,7 @@
@rem limitations under the License.
@rem Current package versions
-set VERSION=1.17.0-dev
+set VERSION=1.18.0-dev
@rem Adjust the location of nuget.exe
set NUGET=C:\nuget\nuget.exe
diff --git a/src/csharp/build_unitypackage.bat b/src/csharp/build_unitypackage.bat
index dd74de0491..3334d24c11 100644
--- a/src/csharp/build_unitypackage.bat
+++ b/src/csharp/build_unitypackage.bat
@@ -13,7 +13,7 @@
@rem limitations under the License.
@rem Current package versions
-set VERSION=1.17.0-dev
+set VERSION=1.18.0-dev
@rem Adjust the location of nuget.exe
set NUGET=C:\nuget\nuget.exe
diff --git a/src/csharp/doc/integration.md-fig.1-classic.png b/src/csharp/doc/integration.md-fig.1-classic.png
new file mode 100644
index 0000000000..80c57261ad
--- /dev/null
+++ b/src/csharp/doc/integration.md-fig.1-classic.png
Binary files differ
diff --git a/src/csharp/doc/integration.md-fig.2-sdk.png b/src/csharp/doc/integration.md-fig.2-sdk.png
new file mode 100644
index 0000000000..6d653e4a58
--- /dev/null
+++ b/src/csharp/doc/integration.md-fig.2-sdk.png
Binary files differ
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
index a95a120d21..55ca6048bc 100644
--- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -42,7 +42,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler-gRPCPlugin'
- v = '1.17.0-dev'
+ v = '1.18.0-dev'
s.version = v
s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.'
s.description = <<-DESC
diff --git a/src/objective-c/GRPCClient/private/version.h b/src/objective-c/GRPCClient/private/version.h
index d5463c0b4c..0be0e3c9a0 100644
--- a/src/objective-c/GRPCClient/private/version.h
+++ b/src/objective-c/GRPCClient/private/version.h
@@ -22,4 +22,4 @@
// instead. This file can be regenerated from the template by running
// `tools/buildgen/generate_projects.sh`.
-#define GRPC_OBJC_VERSION_STRING @"1.17.0-dev"
+#define GRPC_OBJC_VERSION_STRING @"1.18.0-dev"
diff --git a/src/objective-c/tests/version.h b/src/objective-c/tests/version.h
index ca27c03b3c..f2fd692070 100644
--- a/src/objective-c/tests/version.h
+++ b/src/objective-c/tests/version.h
@@ -22,5 +22,5 @@
// instead. This file can be regenerated from the template by running
// `tools/buildgen/generate_projects.sh`.
-#define GRPC_OBJC_VERSION_STRING @"1.17.0-dev"
+#define GRPC_OBJC_VERSION_STRING @"1.18.0-dev"
#define GRPC_C_VERSION_STRING @"7.0.0-dev"
diff --git a/src/php/composer.json b/src/php/composer.json
index d54db91b5f..9c298c0e85 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -2,7 +2,7 @@
"name": "grpc/grpc-dev",
"description": "gRPC library for PHP - for Developement use only",
"license": "Apache-2.0",
- "version": "1.17.0",
+ "version": "1.18.0",
"require": {
"php": ">=5.5.0",
"google/protobuf": "^v3.3.0"
diff --git a/src/php/ext/grpc/channel.c b/src/php/ext/grpc/channel.c
index b17f3d9a61..c06bdea7fe 100644
--- a/src/php/ext/grpc/channel.c
+++ b/src/php/ext/grpc/channel.c
@@ -393,6 +393,8 @@ PHP_METHOD(Channel, __construct) {
channel->wrapper->target = strdup(target);
channel->wrapper->args_hashstr = strdup(sha1str);
channel->wrapper->creds_hashstr = NULL;
+ channel->wrapper->creds = creds;
+ channel->wrapper->args = args;
if (creds != NULL && creds->hashstr != NULL) {
php_grpc_int creds_hashstr_len = strlen(creds->hashstr);
char *channel_creds_hashstr = malloc(creds_hashstr_len + 1);
diff --git a/src/php/ext/grpc/channel.h b/src/php/ext/grpc/channel.h
index 27752c9a3f..ce17c4a58a 100644
--- a/src/php/ext/grpc/channel.h
+++ b/src/php/ext/grpc/channel.h
@@ -19,6 +19,7 @@
#ifndef NET_GRPC_PHP_GRPC_CHANNEL_H_
#define NET_GRPC_PHP_GRPC_CHANNEL_H_
+#include "channel_credentials.h"
#include "php_grpc.h"
/* Class entry for the PHP Channel class */
@@ -32,6 +33,8 @@ typedef struct _grpc_channel_wrapper {
char *creds_hashstr;
size_t ref_count;
gpr_mu mu;
+ grpc_channel_args args;
+ wrapped_grpc_channel_credentials *creds;
} grpc_channel_wrapper;
/* Wrapper struct for grpc_channel that can be associated with a PHP object */
diff --git a/src/php/ext/grpc/config.m4 b/src/php/ext/grpc/config.m4
index fa54ebd920..9ec2c7cf04 100755
--- a/src/php/ext/grpc/config.m4
+++ b/src/php/ext/grpc/config.m4
@@ -103,7 +103,7 @@ if test "$PHP_COVERAGE" = "yes"; then
AC_MSG_ERROR([ccache must be disabled when --enable-coverage option is used. You can disable ccache by setting environment variable CCACHE_DISABLE=1.])
fi
- lcov_version_list="1.5 1.6 1.7 1.9 1.10 1.11"
+ lcov_version_list="1.5 1.6 1.7 1.9 1.10 1.11 1.12 1.13"
AC_CHECK_PROG(LCOV, lcov, lcov)
AC_CHECK_PROG(GENHTML, genhtml, genhtml)
diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c
index fabd98975d..111c6f4867 100644
--- a/src/php/ext/grpc/php_grpc.c
+++ b/src/php/ext/grpc/php_grpc.c
@@ -26,6 +26,8 @@
#include "call_credentials.h"
#include "server_credentials.h"
#include "completion_queue.h"
+#include <ext/spl/spl_exceptions.h>
+#include <zend_exceptions.h>
ZEND_DECLARE_MODULE_GLOBALS(grpc)
static PHP_GINIT_FUNCTION(grpc);
@@ -86,6 +88,125 @@ ZEND_GET_MODULE(grpc)
}
*/
/* }}} */
+void create_new_channel(
+ wrapped_grpc_channel *channel,
+ char *target,
+ grpc_channel_args args,
+ wrapped_grpc_channel_credentials *creds) {
+ if (creds == NULL) {
+ channel->wrapper->wrapped = grpc_insecure_channel_create(target, &args,
+ NULL);
+ } else {
+ channel->wrapper->wrapped =
+ grpc_secure_channel_create(creds->wrapped, target, &args, NULL);
+ }
+}
+
+void acquire_persistent_locks() {
+ zval *data;
+ PHP_GRPC_HASH_FOREACH_VAL_START(&grpc_persistent_list, data)
+ php_grpc_zend_resource *rsrc =
+ (php_grpc_zend_resource*) PHP_GRPC_HASH_VALPTR_TO_VAL(data)
+ if (rsrc == NULL) {
+ break;
+ }
+ channel_persistent_le_t* le = rsrc->ptr;
+
+ gpr_mu_lock(&le->channel->mu);
+ PHP_GRPC_HASH_FOREACH_END()
+}
+
+void release_persistent_locks() {
+ zval *data;
+ PHP_GRPC_HASH_FOREACH_VAL_START(&grpc_persistent_list, data)
+ php_grpc_zend_resource *rsrc =
+ (php_grpc_zend_resource*) PHP_GRPC_HASH_VALPTR_TO_VAL(data)
+ if (rsrc == NULL) {
+ break;
+ }
+ channel_persistent_le_t* le = rsrc->ptr;
+
+ gpr_mu_unlock(&le->channel->mu);
+ PHP_GRPC_HASH_FOREACH_END()
+}
+
+void destroy_grpc_channels() {
+ zval *data;
+ PHP_GRPC_HASH_FOREACH_VAL_START(&grpc_persistent_list, data)
+ php_grpc_zend_resource *rsrc =
+ (php_grpc_zend_resource*) PHP_GRPC_HASH_VALPTR_TO_VAL(data)
+ if (rsrc == NULL) {
+ break;
+ }
+ channel_persistent_le_t* le = rsrc->ptr;
+
+ wrapped_grpc_channel wrapped_channel;
+ wrapped_channel.wrapper = le->channel;
+ grpc_channel_wrapper *channel = wrapped_channel.wrapper;
+ grpc_channel_destroy(channel->wrapped);
+ PHP_GRPC_HASH_FOREACH_END()
+}
+
+void restart_channels() {
+ zval *data;
+ PHP_GRPC_HASH_FOREACH_VAL_START(&grpc_persistent_list, data)
+ php_grpc_zend_resource *rsrc =
+ (php_grpc_zend_resource*) PHP_GRPC_HASH_VALPTR_TO_VAL(data)
+ if (rsrc == NULL) {
+ break;
+ }
+ channel_persistent_le_t* le = rsrc->ptr;
+
+ wrapped_grpc_channel wrapped_channel;
+ wrapped_channel.wrapper = le->channel;
+ grpc_channel_wrapper *channel = wrapped_channel.wrapper;
+ create_new_channel(&wrapped_channel, channel->target, channel->args,
+ channel->creds);
+ gpr_mu_unlock(&channel->mu);
+ PHP_GRPC_HASH_FOREACH_END()
+}
+
+void prefork() {
+ acquire_persistent_locks();
+}
+
+void postfork_child() {
+ TSRMLS_FETCH();
+
+ // loop through persistant list and destroy all underlying grpc_channel objs
+ destroy_grpc_channels();
+
+ // clear completion queue
+ grpc_php_shutdown_completion_queue(TSRMLS_C);
+
+ // clean-up grpc_core
+ grpc_shutdown();
+ if (grpc_is_initialized() > 0) {
+ zend_throw_exception(spl_ce_UnexpectedValueException,
+ "Oops, failed to shutdown gRPC Core after fork()",
+ 1 TSRMLS_CC);
+ }
+
+ // restart grpc_core
+ grpc_init();
+ grpc_php_init_completion_queue(TSRMLS_C);
+
+ // re-create grpc_channel and point wrapped to it
+ // unlock wrapped grpc channel mutex
+ restart_channels();
+}
+
+void postfork_parent() {
+ release_persistent_locks();
+}
+
+void register_fork_handlers() {
+ if (getenv("GRPC_ENABLE_FORK_SUPPORT")) {
+#ifdef GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK
+ pthread_atfork(&prefork, &postfork_parent, &postfork_child);
+#endif // GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK
+ }
+}
/* {{{ PHP_MINIT_FUNCTION
*/
@@ -265,6 +386,7 @@ PHP_MINFO_FUNCTION(grpc) {
PHP_RINIT_FUNCTION(grpc) {
if (!GRPC_G(initialized)) {
grpc_init();
+ register_fork_handlers();
grpc_php_init_completion_queue(TSRMLS_C);
GRPC_G(initialized) = 1;
}
diff --git a/src/php/ext/grpc/version.h b/src/php/ext/grpc/version.h
index 70f8bbbf40..1ddf90a667 100644
--- a/src/php/ext/grpc/version.h
+++ b/src/php/ext/grpc/version.h
@@ -20,6 +20,6 @@
#ifndef VERSION_H
#define VERSION_H
-#define PHP_GRPC_VERSION "1.17.0dev"
+#define PHP_GRPC_VERSION "1.18.0dev"
#endif /* VERSION_H */
diff --git a/src/proto/grpc/channelz/BUILD b/src/proto/grpc/channelz/BUILD
index bdb03d5e2d..b6b485e3e8 100644
--- a/src/proto/grpc/channelz/BUILD
+++ b/src/proto/grpc/channelz/BUILD
@@ -24,3 +24,10 @@ grpc_proto_library(
has_services = True,
well_known_protos = True,
)
+
+filegroup(
+ name = "channelz_proto_file",
+ srcs = [
+ "channelz.proto",
+ ],
+)
diff --git a/src/proto/grpc/channelz/channelz.proto b/src/proto/grpc/channelz/channelz.proto
index d930dfcfb4..6202e5e817 100644
--- a/src/proto/grpc/channelz/channelz.proto
+++ b/src/proto/grpc/channelz/channelz.proto
@@ -424,6 +424,8 @@ service Channelz {
rpc GetTopChannels(GetTopChannelsRequest) returns (GetTopChannelsResponse);
// Gets all servers that exist in the process.
rpc GetServers(GetServersRequest) returns (GetServersResponse);
+ // Returns a single Server, or else a NOT_FOUND code.
+ rpc GetServer(GetServerRequest) returns (GetServerResponse);
// Gets all server sockets that exist in the process.
rpc GetServerSockets(GetServerSocketsRequest) returns (GetServerSocketsResponse);
// Returns a single Channel, or else a NOT_FOUND code.
@@ -466,6 +468,17 @@ message GetServersResponse {
bool end = 2;
}
+message GetServerRequest {
+ // server_id is the identifier of the specific server to get.
+ int64 server_id = 1;
+}
+
+message GetServerResponse {
+ // The Server that corresponds to the requested server_id. This field
+ // should be set.
+ Server server = 1;
+}
+
message GetServerSocketsRequest {
int64 server_id = 1;
// start_socket_id indicates that only sockets at or above this id should be
diff --git a/src/proto/grpc/health/v1/BUILD b/src/proto/grpc/health/v1/BUILD
index 97642985c9..38a7d992e0 100644
--- a/src/proto/grpc/health/v1/BUILD
+++ b/src/proto/grpc/health/v1/BUILD
@@ -29,4 +29,3 @@ filegroup(
"health.proto",
],
)
-
diff --git a/src/proto/grpc/reflection/v1alpha/BUILD b/src/proto/grpc/reflection/v1alpha/BUILD
index 4605418447..4d919d029e 100644
--- a/src/proto/grpc/reflection/v1alpha/BUILD
+++ b/src/proto/grpc/reflection/v1alpha/BUILD
@@ -22,3 +22,11 @@ grpc_proto_library(
name = "reflection_proto",
srcs = ["reflection.proto"],
)
+
+filegroup(
+ name = "reflection_proto_file",
+ srcs = [
+ "reflection.proto",
+ ],
+)
+
diff --git a/src/proto/grpc/testing/BUILD b/src/proto/grpc/testing/BUILD
index 16721ff2ed..9876d5160a 100644
--- a/src/proto/grpc/testing/BUILD
+++ b/src/proto/grpc/testing/BUILD
@@ -15,6 +15,8 @@
licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
+load("@grpc_python_dependencies//:requirements.bzl", "requirement")
+load("@org_pubref_rules_protobuf//python:rules.bzl", "py_proto_library")
grpc_package(name = "testing", visibility = "public")
@@ -48,7 +50,8 @@ grpc_proto_library(
grpc_proto_library(
name = "echo_proto",
srcs = ["echo.proto"],
- deps = ["echo_messages_proto"],
+ deps = ["echo_messages_proto",
+ "simple_messages_proto"],
generate_mocks = True,
)
@@ -58,12 +61,30 @@ grpc_proto_library(
has_services = False,
)
+py_proto_library(
+ name = "py_empty_proto",
+ protos = ["empty.proto",],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+)
+
grpc_proto_library(
name = "messages_proto",
srcs = ["messages.proto"],
has_services = False,
)
+py_proto_library(
+ name = "py_messages_proto",
+ protos = ["messages.proto",],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+)
+
grpc_proto_library(
name = "metrics_proto",
srcs = ["metrics.proto"],
@@ -100,6 +121,12 @@ grpc_proto_library(
)
grpc_proto_library(
+ name = "simple_messages_proto",
+ srcs = ["simple_messages.proto"],
+ has_services = False,
+)
+
+grpc_proto_library(
name = "stats_proto",
srcs = ["stats.proto"],
has_services = False,
@@ -116,3 +143,17 @@ grpc_proto_library(
"messages_proto",
],
)
+
+py_proto_library(
+ name = "py_test_proto",
+ protos = ["test.proto",],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+ proto_deps = [
+ ":py_empty_proto",
+ ":py_messages_proto",
+ ]
+)
+
diff --git a/src/proto/grpc/testing/compiler_test.proto b/src/proto/grpc/testing/compiler_test.proto
index db5ca48331..9fa5590a59 100644
--- a/src/proto/grpc/testing/compiler_test.proto
+++ b/src/proto/grpc/testing/compiler_test.proto
@@ -20,6 +20,9 @@
syntax = "proto3";
// Ignored detached comment
+// The comments in this file are not meant for readability
+// but rather to test to make sure that the code generator
+// properly preserves comments on files, services, and RPCs
// Ignored package leading comment
package grpc.testing;
diff --git a/src/proto/grpc/testing/control.proto b/src/proto/grpc/testing/control.proto
index 4cfdc2cafb..564d8234b7 100644
--- a/src/proto/grpc/testing/control.proto
+++ b/src/proto/grpc/testing/control.proto
@@ -33,6 +33,7 @@ enum ServerType {
ASYNC_SERVER = 1;
ASYNC_GENERIC_SERVER = 2;
OTHER_SERVER = 3; // used for some language-specific variants
+ CALLBACK_SERVER = 4;
}
enum RpcType {
diff --git a/src/proto/grpc/testing/echo.proto b/src/proto/grpc/testing/echo.proto
index 13e28320fc..977858f6bc 100644
--- a/src/proto/grpc/testing/echo.proto
+++ b/src/proto/grpc/testing/echo.proto
@@ -16,11 +16,15 @@
syntax = "proto3";
import "src/proto/grpc/testing/echo_messages.proto";
+import "src/proto/grpc/testing/simple_messages.proto";
package grpc.testing;
service EchoTestService {
rpc Echo(EchoRequest) returns (EchoResponse);
+ // A service which checks that the initial metadata sent over contains some
+ // expected key value pair
+ rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse);
rpc RequestStream(stream EchoRequest) returns (EchoResponse);
rpc ResponseStream(EchoRequest) returns (stream EchoResponse);
rpc BidiStream(stream EchoRequest) returns (stream EchoResponse);
diff --git a/src/proto/grpc/testing/proto2/BUILD.bazel b/src/proto/grpc/testing/proto2/BUILD.bazel
new file mode 100644
index 0000000000..c4c4f004ef
--- /dev/null
+++ b/src/proto/grpc/testing/proto2/BUILD.bazel
@@ -0,0 +1,30 @@
+load("@grpc_python_dependencies//:requirements.bzl", "requirement")
+load("@org_pubref_rules_protobuf//python:rules.bzl", "py_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+py_proto_library(
+ name = "empty2_proto",
+ protos = [
+ "empty2.proto",
+ ],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+)
+
+py_proto_library(
+ name = "empty2_extensions_proto",
+ protos = [
+ "empty2_extensions.proto",
+ ],
+ proto_deps = [
+ ":empty2_proto",
+ ],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+)
+
diff --git a/src/proto/grpc/testing/simple_messages.proto b/src/proto/grpc/testing/simple_messages.proto
new file mode 100644
index 0000000000..4b65d18909
--- /dev/null
+++ b/src/proto/grpc/testing/simple_messages.proto
@@ -0,0 +1,24 @@
+
+// Copyright 2018 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package grpc.testing;
+
+message SimpleRequest {
+}
+
+message SimpleResponse {
+}
diff --git a/src/python/.gitignore b/src/python/.gitignore
index 7b520579a0..41813129bd 100644
--- a/src/python/.gitignore
+++ b/src/python/.gitignore
@@ -1,3 +1,4 @@
gens/
*_pb2.py
*_pb2_grpc.py
+*.egg-info/
diff --git a/src/python/grpcio/_parallel_compile_patch.py b/src/python/grpcio/_parallel_compile_patch.py
new file mode 100644
index 0000000000..4d03ef49ba
--- /dev/null
+++ b/src/python/grpcio/_parallel_compile_patch.py
@@ -0,0 +1,63 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Patches the compile() to allow enable parallel compilation of C/C++.
+
+build_ext has lots of C/C++ files and normally them one by one.
+Enabling parallel build helps a lot.
+"""
+
+import distutils.ccompiler
+import os
+
+try:
+ BUILD_EXT_COMPILER_JOBS = int(
+ os.environ.get('GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS', '1'))
+except ValueError:
+ BUILD_EXT_COMPILER_JOBS = 1
+
+
+# monkey-patch for parallel compilation
+def _parallel_compile(self,
+ sources,
+ output_dir=None,
+ macros=None,
+ include_dirs=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ depends=None):
+ # setup the same way as distutils.ccompiler.CCompiler
+ # https://github.com/python/cpython/blob/31368a4f0e531c19affe2a1becd25fc316bc7501/Lib/distutils/ccompiler.py#L564
+ macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
+ output_dir, macros, include_dirs, sources, depends, extra_postargs)
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+
+ def _compile_single_file(obj):
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ return
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
+ # run compilation of individual files in parallel
+ import multiprocessing.pool
+ multiprocessing.pool.ThreadPool(BUILD_EXT_COMPILER_JOBS).map(
+ _compile_single_file, objects)
+ return objects
+
+
+def monkeypatch_compile_maybe():
+ """Monkeypatching is dumb, but the build speed gain is worth it."""
+ if BUILD_EXT_COMPILER_JOBS > 1:
+ distutils.ccompiler.CCompiler.compile = _parallel_compile
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 0a3097111f..b805f4277b 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -39,36 +39,6 @@ PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_GEN_STEM = os.path.join(GRPC_STEM, 'src', 'python', 'gens')
CYTHON_STEM = os.path.join(PYTHON_STEM, 'grpc', '_cython')
-CONF_PY_ADDENDUM = """
-extensions.append('sphinx.ext.napoleon')
-napoleon_google_docstring = True
-napoleon_numpy_docstring = True
-napoleon_include_special_with_doc = True
-
-html_theme = 'sphinx_rtd_theme'
-copyright = "2016, The gRPC Authors"
-"""
-
-API_GLOSSARY = """
-
-Glossary
-================
-
-.. glossary::
-
- metadatum
- A key-value pair included in the HTTP header. It is a
- 2-tuple where the first entry is the key and the
- second is the value, i.e. (key, value). The metadata key is an ASCII str,
- and must be a valid HTTP header name. The metadata value can be
- either a valid HTTP ASCII str, or bytes. If bytes are provided,
- the key must end with '-bin', i.e.
- ``('binary-metadata-bin', b'\\x00\\xFF')``
-
- metadata
- A sequence of metadatum.
-"""
-
class CommandError(Exception):
"""Simple exception class for GRPC custom commands."""
@@ -124,25 +94,14 @@ class SphinxDocumentation(setuptools.Command):
def run(self):
# We import here to ensure that setup.py has had a chance to install the
# relevant package eggs first.
- import sphinx
- import sphinx.apidoc
- metadata = self.distribution.metadata
- src_dir = os.path.join(PYTHON_STEM, 'grpc')
- sys.path.append(src_dir)
- sphinx.apidoc.main([
- '', '--force', '--full', '-H', metadata.name, '-A', metadata.author,
- '-V', metadata.version, '-R', metadata.version, '-o',
- os.path.join('doc', 'src'), src_dir
- ])
- conf_filepath = os.path.join('doc', 'src', 'conf.py')
- with open(conf_filepath, 'a') as conf_file:
- conf_file.write(CONF_PY_ADDENDUM)
- glossary_filepath = os.path.join('doc', 'src', 'grpc.rst')
- with open(glossary_filepath, 'a') as glossary_filepath:
- glossary_filepath.write(API_GLOSSARY)
- sphinx.main(
- ['', os.path.join('doc', 'src'),
- os.path.join('doc', 'build')])
+ import sphinx.cmd.build
+ source_dir = os.path.join(GRPC_STEM, 'doc', 'python', 'sphinx')
+ target_dir = os.path.join(GRPC_STEM, 'doc', 'build')
+ exit_code = sphinx.cmd.build.build_main(
+ ['-b', 'html', '-W', '--keep-going', source_dir, target_dir])
+ if exit_code is not 0:
+ raise CommandError(
+ "Documentation generation has warnings or errors")
class BuildProjectMetadata(setuptools.Command):
@@ -253,6 +212,12 @@ class BuildExt(build_ext.build_ext):
LINK_OPTIONS = {}
def build_extensions(self):
+ # This special conditioning is here due to difference of compiler
+ # behavior in gcc and clang. The clang doesn't take --stdc++11
+ # flags but gcc does. Since the setuptools of Python only support
+ # all C or all C++ compilation, the mix of C and C++ will crash.
+ # *By default*, the macOS use clang and Linux use gcc, that's why
+ # the special condition here is checking platform.
if "darwin" in sys.platform:
config = os.environ.get('CONFIG', 'opt')
target_path = os.path.abspath(
@@ -274,8 +239,14 @@ class BuildExt(build_ext.build_ext):
extra_defines = [
'EXTRA_DEFINES="GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK=1"'
]
+ # Ensure the BoringSSL are built instead of using system provided
+ # libraries. It prevents dependency issues while distributing to
+ # Mac users who use MacPorts to manage their libraries. #17002
+ mod_env = dict(os.environ)
+ mod_env['REQUIRE_CUSTOM_LIBRARIES_opt'] = '1'
make_process = subprocess.Popen(
['make'] + extra_defines + targets,
+ env=mod_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
make_out, make_err = make_process.communicate()
diff --git a/src/python/grpcio/grpc/BUILD.bazel b/src/python/grpcio/grpc/BUILD.bazel
index 2e6839ef2d..6958ccdfb6 100644
--- a/src/python/grpcio/grpc/BUILD.bazel
+++ b/src/python/grpcio/grpc/BUILD.bazel
@@ -13,7 +13,6 @@ py_library(
":interceptor",
":server",
"//src/python/grpcio/grpc/_cython:cygrpc",
- "//src/python/grpcio/grpc/beta",
"//src/python/grpcio/grpc/experimental",
"//src/python/grpcio/grpc/framework",
requirement('enum34'),
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index 863696d236..6022fc3ef2 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -15,12 +15,14 @@
import abc
import enum
+import logging
import sys
-
import six
from grpc._cython import cygrpc as _cygrpc
+logging.getLogger(__name__).addHandler(logging.NullHandler())
+
############################## Future Interface ###############################
@@ -48,11 +50,13 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Returns:
bool:
Returns True if the computation was canceled.
+
Returns False under all other circumstances, for example:
+
1. computation has begun and could not be canceled.
2. computation has finished
3. computation is scheduled for execution and it is impossible
- to determine its state without blocking.
+ to determine its state without blocking.
"""
raise NotImplementedError()
@@ -66,7 +70,9 @@ class Future(six.with_metaclass(abc.ABCMeta)):
bool:
Returns True if the computation was cancelled before its result became
available.
- False under all other circumstances, for example:
+
+ Returns False under all other circumstances, for example:
+
1. computation was not cancelled.
2. computation's result is available.
"""
@@ -79,9 +85,9 @@ class Future(six.with_metaclass(abc.ABCMeta)):
This method does not block.
Returns:
- bool:
Returns True if the computation is scheduled for execution or
currently executing.
+
Returns False if the computation already executed or was cancelled.
"""
raise NotImplementedError()
@@ -210,7 +216,33 @@ class ChannelConnectivity(enum.Enum):
@enum.unique
class StatusCode(enum.Enum):
- """Mirrors grpc_status_code in the gRPC Core."""
+ """Mirrors grpc_status_code in the gRPC Core.
+
+ Attributes:
+ OK: Not an error; returned on success
+ CANCELLED: The operation was cancelled (typically by the caller).
+ UNKNOWN: Unknown error.
+ INVALID_ARGUMENT: Client specified an invalid argument.
+ DEADLINE_EXCEEDED: Deadline expired before operation could complete.
+ NOT_FOUND: Some requested entity (e.g., file or directory) was not found.
+ ALREADY_EXISTS: Some entity that we attempted to create (e.g., file or directory)
+ already exists.
+ PERMISSION_DENIED: The caller does not have permission to execute the specified
+ operation.
+ UNAUTHENTICATED: The request does not have valid authentication credentials for the
+ operation.
+ RESOURCE_EXHAUSTED: Some resource has been exhausted, perhaps a per-user quota, or
+ perhaps the entire file system is out of space.
+ FAILED_PRECONDITION: Operation was rejected because the system is not in a state
+ required for the operation's execution.
+ ABORTED: The operation was aborted, typically due to a concurrency issue
+ like sequencer check failures, transaction aborts, etc.
+ UNIMPLEMENTED: Operation is not implemented or not supported/enabled in this service.
+ INTERNAL: Internal errors. Means some invariants expected by underlying
+ system has been broken.
+ UNAVAILABLE: The service is currently unavailable.
+ DATA_LOSS: Unrecoverable data loss or corruption.
+ """
OK = (_cygrpc.StatusCode.ok, 'ok')
CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
@@ -357,6 +389,8 @@ class ClientCallDetails(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to
the service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional flag t
+ enable wait for ready mechanism.
"""
@@ -450,8 +484,7 @@ class StreamUnaryClientInterceptor(six.with_metaclass(abc.ABCMeta)):
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
- `response_future = continuation(client_call_details,
- request_iterator)`
+ `response_future = continuation(client_call_details, request_iterator)`
to continue with the RPC. `continuation` returns an object that is
both a Call for the RPC and a Future. In the event of RPC completion,
the return Call-Future's result value will be the response message
@@ -462,11 +495,11 @@ class StreamUnaryClientInterceptor(six.with_metaclass(abc.ABCMeta)):
request_iterator: An iterator that yields request values for the RPC.
Returns:
- An object that is both a Call for the RPC and a Future.
- In the event of RPC completion, the return Call-Future's
- result value will be the response message of the RPC.
- Should the event terminate with non-OK status, the returned
- Call-Future's exception value will be an RpcError.
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's
+ result value will be the response message of the RPC.
+ Should the event terminate with non-OK status, the returned
+ Call-Future's exception value will be an RpcError.
"""
raise NotImplementedError()
@@ -482,13 +515,13 @@ class StreamStreamClientInterceptor(six.with_metaclass(abc.ABCMeta)):
request_iterator):
"""Intercepts a stream-stream invocation.
+ Args:
continuation: A function that proceeds with the invocation by
executing the next interceptor in chain or invoking the
actual RPC on the underlying Channel. It is the interceptor's
responsibility to call it if it decides to move the RPC forward.
The interceptor can use
- `response_iterator = continuation(client_call_details,
- request_iterator)`
+ `response_iterator = continuation(client_call_details, request_iterator)`
to continue with the RPC. `continuation` returns an object that is
both a Call for the RPC and an iterator for response values.
Drawing response values from the returned Call-iterator may
@@ -499,10 +532,10 @@ class StreamStreamClientInterceptor(six.with_metaclass(abc.ABCMeta)):
request_iterator: An iterator that yields request values for the RPC.
Returns:
- An object that is both a Call for the RPC and an iterator of
- response values. Drawing response values from the returned
- Call-iterator may raise RpcError indicating termination of
- the RPC with non-OK status.
+ An object that is both a Call for the RPC and an iterator of
+ response values. Drawing response values from the returned
+ Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status.
"""
raise NotImplementedError()
@@ -609,7 +642,12 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-unary RPC from client-side."""
@abc.abstractmethod
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
"""Synchronously invokes the underlying RPC.
Args:
@@ -619,6 +657,8 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
The response value for the RPC.
@@ -631,7 +671,12 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
raise NotImplementedError()
@abc.abstractmethod
- def with_call(self, request, timeout=None, metadata=None, credentials=None):
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
"""Synchronously invokes the underlying RPC.
Args:
@@ -641,6 +686,8 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
The response value for the RPC and a Call value for the RPC.
@@ -653,7 +700,12 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
raise NotImplementedError()
@abc.abstractmethod
- def future(self, request, timeout=None, metadata=None, credentials=None):
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
"""Asynchronously invokes the underlying RPC.
Args:
@@ -663,6 +715,8 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
An object that is both a Call for the RPC and a Future.
@@ -678,7 +732,12 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-stream RPC from client-side."""
@abc.abstractmethod
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
"""Invokes the underlying RPC.
Args:
@@ -688,6 +747,8 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: An optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
An object that is both a Call for the RPC and an iterator of
@@ -706,7 +767,8 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
"""Synchronously invokes the underlying RPC.
Args:
@@ -717,6 +779,8 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
The response value for the RPC.
@@ -733,7 +797,8 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
"""Synchronously invokes the underlying RPC on the client.
Args:
@@ -744,6 +809,8 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
The response value for the RPC and a Call object for the RPC.
@@ -760,7 +827,8 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
"""Asynchronously invokes the underlying RPC on the client.
Args:
@@ -770,6 +838,8 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
An object that is both a Call for the RPC and a Future.
@@ -789,7 +859,8 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
"""Invokes the underlying RPC on the client.
Args:
@@ -799,6 +870,8 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable wait for ready mechanism
Returns:
An object that is both a Call for the RPC and an iterator of
@@ -972,8 +1045,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
"""Gets one or more peer identity(s).
Equivalent to
- servicer_context.auth_context().get(
- servicer_context.peer_identity_key())
+ servicer_context.auth_context().get(servicer_context.peer_identity_key())
Returns:
An iterable of the identities, or None if the call is not
@@ -1651,7 +1723,7 @@ def server(thread_pool,
handlers. The interceptors are given control in the order they are
specified. This is an EXPERIMENTAL API.
options: An optional list of key-value pairs (channel args in gRPC runtime)
- to configure the channel.
+ to configure the channel.
maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
will service before returning RESOURCE_EXHAUSTED status, or None to
indicate no limit.
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index eeeb4ddb33..35fa82d56b 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -24,7 +24,6 @@ from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
@@ -176,6 +175,7 @@ def _event_handler(state, response_deserializer):
return handle_event
+#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
if cygrpc.is_fork_support_enabled():
@@ -467,10 +467,11 @@ def _end_unary_response_blocking(state, call, with_call, deadline):
raise _Rendezvous(state, None, None, deadline)
-def _stream_unary_invocation_operationses(metadata):
+def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
- cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
+ cygrpc.SendInitialMetadataOperation(metadata,
+ initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
@@ -478,15 +479,19 @@ def _stream_unary_invocation_operationses(metadata):
)
-def _stream_unary_invocation_operationses_and_tags(metadata):
+def _stream_unary_invocation_operationses_and_tags(metadata,
+ initial_metadata_flags):
return tuple((
operations,
None,
- ) for operations in _stream_unary_invocation_operationses(metadata))
+ )
+ for operations in _stream_unary_invocation_operationses(
+ metadata, initial_metadata_flags))
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+ # pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
@@ -495,15 +500,18 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
- def _prepare(self, request, timeout, metadata):
+ def _prepare(self, request, timeout, metadata, wait_for_ready):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
- cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
+ cygrpc.SendInitialMetadataOperation(metadata,
+ initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
@@ -512,9 +520,10 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
)
return state, operations, deadline, None
- def _blocking(self, request, timeout, metadata, credentials):
+ def _blocking(self, request, timeout, metadata, credentials,
+ wait_for_ready):
state, operations, deadline, rendezvous = self._prepare(
- request, timeout, metadata)
+ request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous
else:
@@ -528,17 +537,34 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
_handle_event(event, state, self._response_deserializer)
return state, call,
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
- state, call, = self._blocking(request, timeout, metadata, credentials)
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
+ state, call, = self._blocking(request, timeout, metadata, credentials,
+ wait_for_ready)
return _end_unary_response_blocking(state, call, False, None)
- def with_call(self, request, timeout=None, metadata=None, credentials=None):
- state, call, = self._blocking(request, timeout, metadata, credentials)
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
+ state, call, = self._blocking(request, timeout, metadata, credentials,
+ wait_for_ready)
return _end_unary_response_blocking(state, call, True, None)
- def future(self, request, timeout=None, metadata=None, credentials=None):
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
state, operations, deadline, rendezvous = self._prepare(
- request, timeout, metadata)
+ request, timeout, metadata, wait_for_ready)
if state is None:
raise rendezvous
else:
@@ -553,6 +579,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+ # pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
@@ -561,16 +588,24 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
if serialized_request is None:
raise rendezvous
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
- cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
+ cygrpc.SendInitialMetadataOperation(metadata,
+ initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
@@ -589,6 +624,7 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+ # pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
@@ -597,13 +633,17 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
- def _blocking(self, request_iterator, timeout, metadata, credentials):
+ def _blocking(self, request_iterator, timeout, metadata, credentials,
+ wait_for_ready):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
call = self._channel.segregated_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
- _stream_unary_invocation_operationses_and_tags(metadata))
+ _stream_unary_invocation_operationses_and_tags(
+ metadata, initial_metadata_flags))
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
@@ -619,32 +659,38 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
- credentials)
+ credentials, wait_for_ready)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
- credentials)
+ credentials, wait_for_ready)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
call = self._managed_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
- _stream_unary_invocation_operationses(metadata), event_handler)
+ _stream_unary_invocation_operationses(
+ metadata, initial_metadata_flags), event_handler)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
@@ -652,6 +698,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+ # pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
@@ -664,12 +711,16 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
operationses = (
(
- cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
+ cygrpc.SendInitialMetadataOperation(metadata,
+ initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
@@ -684,6 +735,24 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
return _Rendezvous(state, call, self._response_deserializer, deadline)
+class _InitialMetadataFlags(int):
+ """Stores immutable initial metadata flags"""
+
+ def __new__(cls, value=_EMPTY_FLAGS):
+ value &= cygrpc.InitialMetadataFlags.used_mask
+ return super(_InitialMetadataFlags, cls).__new__(cls, value)
+
+ def with_wait_for_ready(self, wait_for_ready):
+ if wait_for_ready is not None:
+ if wait_for_ready:
+ self = self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
+ cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
+ elif not wait_for_ready:
+ self = self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
+ cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
+ return self
+
+
class _ChannelCallState(object):
def __init__(self, channel):
@@ -980,8 +1049,9 @@ class Channel(grpc.Channel):
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
- cygrpc.fork_unregister_channel(self)
+ if cygrpc is not None: # Globals may have already been collected.
+ cygrpc.fork_unregister_channel(self)
# This prevent the failed-at-initializing object removal from failing.
# Though the __init__ failed, the removal will still trigger __del__.
- if hasattr(self, "_connectivity_state"):
+ if _moot is not None and hasattr(self, "_connectivity_state"):
_moot(self._connectivity_state)
diff --git a/src/python/grpcio/grpc/_common.py b/src/python/grpcio/grpc/_common.py
index 3805c7e82a..f69127e38e 100644
--- a/src/python/grpcio/grpc/_common.py
+++ b/src/python/grpcio/grpc/_common.py
@@ -20,7 +20,6 @@ import six
import grpc
from grpc._cython import cygrpc
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
@@ -66,18 +65,13 @@ def encode(s):
if isinstance(s, bytes):
return s
else:
- return s.encode('ascii')
+ return s.encode('utf8')
def decode(b):
- if isinstance(b, str):
- return b
- else:
- try:
- return b.decode('utf8')
- except UnicodeDecodeError:
- _LOGGER.exception('Invalid encoding on %s', b)
- return b.decode('latin1')
+ if isinstance(b, bytes):
+ return b.decode('utf-8', 'replace')
+ return b
def _transform(message, transformer, exception_message):
diff --git a/src/python/grpcio/grpc/_cython/BUILD.bazel b/src/python/grpcio/grpc/_cython/BUILD.bazel
index cfd3a51d9b..e318298d0a 100644
--- a/src/python/grpcio/grpc/_cython/BUILD.bazel
+++ b/src/python/grpcio/grpc/_cython/BUILD.bazel
@@ -12,6 +12,7 @@ pyx_library(
"_cygrpc/grpc_string.pyx.pxi",
"_cygrpc/arguments.pyx.pxi",
"_cygrpc/call.pyx.pxi",
+ "_cygrpc/channelz.pyx.pxi",
"_cygrpc/channel.pyx.pxi",
"_cygrpc/credentials.pyx.pxi",
"_cygrpc/completion_queue.pyx.pxi",
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channelz.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channelz.pyx.pxi
new file mode 100644
index 0000000000..113f7976dd
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channelz.pyx.pxi
@@ -0,0 +1,69 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def channelz_get_top_channels(start_channel_id):
+ cdef char *c_returned_str = grpc_channelz_get_top_channels(
+ start_channel_id,
+ )
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get top channels, please ensure your' \
+ ' start_channel_id==%s is valid' % start_channel_id)
+ return c_returned_str
+
+def channelz_get_servers(start_server_id):
+ cdef char *c_returned_str = grpc_channelz_get_servers(start_server_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get servers, please ensure your' \
+ ' start_server_id==%s is valid' % start_server_id)
+ return c_returned_str
+
+def channelz_get_server(server_id):
+ cdef char *c_returned_str = grpc_channelz_get_server(server_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the server, please ensure your' \
+ ' server_id==%s is valid' % server_id)
+ return c_returned_str
+
+def channelz_get_server_sockets(server_id, start_socket_id):
+ cdef char *c_returned_str = grpc_channelz_get_server_sockets(
+ server_id,
+ start_socket_id,
+ )
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get server sockets, please ensure your' \
+ ' server_id==%s and start_socket_id==%s is valid' %
+ (server_id, start_socket_id))
+ return c_returned_str
+
+def channelz_get_channel(channel_id):
+ cdef char *c_returned_str = grpc_channelz_get_channel(channel_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the channel, please ensure your' \
+ ' channel_id==%s is valid' % (channel_id))
+ return c_returned_str
+
+def channelz_get_subchannel(subchannel_id):
+ cdef char *c_returned_str = grpc_channelz_get_subchannel(subchannel_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the subchannel, please ensure your' \
+ ' subchannel_id==%s is valid' % (subchannel_id))
+ return c_returned_str
+
+def channelz_get_socket(socket_id):
+ cdef char *c_returned_str = grpc_channelz_get_socket(socket_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the socket, please ensure your' \
+ ' socket_id==%s is valid' % (socket_id))
+ return c_returned_str
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
index 8d73215247..1cef726970 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
@@ -15,7 +15,7 @@
cdef class CallCredentials:
- cdef grpc_call_credentials *c(self)
+ cdef grpc_call_credentials *c(self) except *
# TODO(https://github.com/grpc/grpc/issues/12531): remove.
cdef grpc_call_credentials *c_credentials
@@ -36,7 +36,7 @@ cdef class MetadataPluginCallCredentials(CallCredentials):
cdef readonly object _metadata_plugin
cdef readonly bytes _name
- cdef grpc_call_credentials *c(self)
+ cdef grpc_call_credentials *c(self) except *
cdef grpc_call_credentials *_composition(call_credentialses)
@@ -46,12 +46,12 @@ cdef class CompositeCallCredentials(CallCredentials):
cdef readonly tuple _call_credentialses
- cdef grpc_call_credentials *c(self)
+ cdef grpc_call_credentials *c(self) except *
cdef class ChannelCredentials:
- cdef grpc_channel_credentials *c(self)
+ cdef grpc_channel_credentials *c(self) except *
# TODO(https://github.com/grpc/grpc/issues/12531): remove.
cdef grpc_channel_credentials *c_credentials
@@ -68,7 +68,7 @@ cdef class SSLChannelCredentials(ChannelCredentials):
cdef readonly object _private_key
cdef readonly object _certificate_chain
- cdef grpc_channel_credentials *c(self)
+ cdef grpc_channel_credentials *c(self) except *
cdef class CompositeChannelCredentials(ChannelCredentials):
@@ -76,7 +76,7 @@ cdef class CompositeChannelCredentials(ChannelCredentials):
cdef readonly tuple _call_credentialses
cdef readonly ChannelCredentials _channel_credentials
- cdef grpc_channel_credentials *c(self)
+ cdef grpc_channel_credentials *c(self) except *
cdef class ServerCertificateConfig:
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index 63048e8da0..2f51be40ce 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -35,7 +35,7 @@ def _spawn_callback_async(callback, args):
cdef class CallCredentials:
- cdef grpc_call_credentials *c(self):
+ cdef grpc_call_credentials *c(self) except *:
raise NotImplementedError()
@@ -61,6 +61,7 @@ cdef int _get_metadata(
cdef void _destroy(void *state) with gil:
cpython.Py_DECREF(<object>state)
+ grpc_shutdown()
cdef class MetadataPluginCallCredentials(CallCredentials):
@@ -69,13 +70,14 @@ cdef class MetadataPluginCallCredentials(CallCredentials):
self._metadata_plugin = metadata_plugin
self._name = name
- cdef grpc_call_credentials *c(self):
+ cdef grpc_call_credentials *c(self) except *:
cdef grpc_metadata_credentials_plugin c_metadata_plugin
c_metadata_plugin.get_metadata = _get_metadata
c_metadata_plugin.destroy = _destroy
c_metadata_plugin.state = <void *>self._metadata_plugin
c_metadata_plugin.type = self._name
cpython.Py_INCREF(self._metadata_plugin)
+ fork_handlers_and_grpc_init()
return grpc_metadata_credentials_create_from_plugin(c_metadata_plugin, NULL)
@@ -101,13 +103,13 @@ cdef class CompositeCallCredentials(CallCredentials):
def __cinit__(self, call_credentialses):
self._call_credentialses = call_credentialses
- cdef grpc_call_credentials *c(self):
+ cdef grpc_call_credentials *c(self) except *:
return _composition(self._call_credentialses)
cdef class ChannelCredentials:
- cdef grpc_channel_credentials *c(self):
+ cdef grpc_channel_credentials *c(self) except *:
raise NotImplementedError()
@@ -129,11 +131,13 @@ cdef class SSLSessionCacheLRU:
cdef class SSLChannelCredentials(ChannelCredentials):
def __cinit__(self, pem_root_certificates, private_key, certificate_chain):
+ if pem_root_certificates is not None and not isinstance(pem_root_certificates, bytes):
+ raise TypeError('expected certificate to be bytes, got %s' % (type(pem_root_certificates)))
self._pem_root_certificates = pem_root_certificates
self._private_key = private_key
self._certificate_chain = certificate_chain
- cdef grpc_channel_credentials *c(self):
+ cdef grpc_channel_credentials *c(self) except *:
cdef const char *c_pem_root_certificates
cdef grpc_ssl_pem_key_cert_pair c_pem_key_certificate_pair
if self._pem_root_certificates is None:
@@ -162,7 +166,7 @@ cdef class CompositeChannelCredentials(ChannelCredentials):
self._call_credentialses = call_credentialses
self._channel_credentials = channel_credentials
- cdef grpc_channel_credentials *c(self):
+ cdef grpc_channel_credentials *c(self) except *:
cdef grpc_channel_credentials *c_channel_credentials
c_channel_credentials = self._channel_credentials.c()
cdef grpc_call_credentials *c_call_credentials_composition = _composition(
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 4781219319..5bbc10af25 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -13,6 +13,7 @@
# limitations under the License.
cimport libc.time
+from libc.stdint cimport intptr_t
# Typedef types with approximately the same semantics to provide their names to
@@ -121,7 +122,6 @@ cdef extern from "grpc/grpc.h":
GRPC_STATUS_DATA_LOSS
GRPC_STATUS__DO_NOT_USE
- const char *GRPC_ARG_PRIMARY_USER_AGENT_STRING
const char *GRPC_ARG_ENABLE_CENSUS
const char *GRPC_ARG_MAX_CONCURRENT_STREAMS
const char *GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH
@@ -140,6 +140,10 @@ cdef extern from "grpc/grpc.h":
const int GRPC_WRITE_NO_COMPRESS
const int GRPC_WRITE_USED_MASK
+ const int GRPC_INITIAL_METADATA_WAIT_FOR_READY
+ const int GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+ const int GRPC_INITIAL_METADATA_USED_MASK
+
const int GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
ctypedef struct grpc_completion_queue:
@@ -186,12 +190,6 @@ cdef extern from "grpc/grpc.h":
size_t arguments_length "num_args"
grpc_arg *arguments "args"
- ctypedef enum grpc_compression_level:
- GRPC_COMPRESS_LEVEL_NONE
- GRPC_COMPRESS_LEVEL_LOW
- GRPC_COMPRESS_LEVEL_MED
- GRPC_COMPRESS_LEVEL_HIGH
-
ctypedef enum grpc_stream_compression_level:
GRPC_STREAM_COMPRESS_LEVEL_NONE
GRPC_STREAM_COMPRESS_LEVEL_LOW
@@ -387,6 +385,15 @@ cdef extern from "grpc/grpc.h":
void grpc_server_cancel_all_calls(grpc_server *server) nogil
void grpc_server_destroy(grpc_server *server) nogil
+ char* grpc_channelz_get_top_channels(intptr_t start_channel_id)
+ char* grpc_channelz_get_servers(intptr_t start_server_id)
+ char* grpc_channelz_get_server(intptr_t server_id)
+ char* grpc_channelz_get_server_sockets(intptr_t server_id,
+ intptr_t start_socket_id)
+ char* grpc_channelz_get_channel(intptr_t channel_id)
+ char* grpc_channelz_get_subchannel(intptr_t subchannel_id)
+ char* grpc_channelz_get_socket(intptr_t socket_id)
+
cdef extern from "grpc/grpc_security.h":
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
index f9a1b2856d..a1618d04d0 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
@@ -266,7 +266,7 @@ cdef grpc_error* socket_listen(grpc_custom_socket* socket) with gil:
(<SocketWrapper>socket.impl).socket.listen(50)
return grpc_error_none()
-cdef void accept_callback_cython(SocketWrapper s):
+cdef void accept_callback_cython(SocketWrapper s) except *:
try:
conn, address = s.socket.accept()
sw = SocketWrapper()
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
index 334e561baa..00a1b23a67 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
@@ -14,7 +14,6 @@
import logging
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
# This function will ascii encode unicode string inputs if neccesary.
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi
index a18c365807..fc72ac1576 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pxd.pxi
@@ -14,10 +14,10 @@
cdef void _store_c_metadata(
- metadata, grpc_metadata **c_metadata, size_t *c_count)
+ metadata, grpc_metadata **c_metadata, size_t *c_count) except *
-cdef void _release_c_metadata(grpc_metadata *c_metadata, int count)
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count) except *
cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi
index c39fef08fa..caf867b569 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/metadata.pyx.pxi
@@ -15,11 +15,17 @@
import collections
+class InitialMetadataFlags:
+ used_mask = GRPC_INITIAL_METADATA_USED_MASK
+ wait_for_ready = GRPC_INITIAL_METADATA_WAIT_FOR_READY
+ wait_for_ready_explicitly_set = GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+
+
_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
cdef void _store_c_metadata(
- metadata, grpc_metadata **c_metadata, size_t *c_count):
+ metadata, grpc_metadata **c_metadata, size_t *c_count) except *:
if metadata is None:
c_count[0] = 0
c_metadata[0] = NULL
@@ -39,7 +45,7 @@ cdef void _store_c_metadata(
c_metadata[0][index].value = _slice_from_bytes(encoded_value)
-cdef void _release_c_metadata(grpc_metadata *c_metadata, int count):
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count) except *:
if 0 < count:
for index in range(count):
grpc_slice_unref(c_metadata[index].key)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/operation.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/operation.pxd.pxi
index 69a2a4989e..c9df32dadf 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/operation.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/operation.pxd.pxi
@@ -15,8 +15,8 @@
cdef class Operation:
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this!
cdef grpc_op c_op
@@ -29,8 +29,8 @@ cdef class SendInitialMetadataOperation(Operation):
cdef grpc_metadata *_c_initial_metadata
cdef size_t _c_initial_metadata_count
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class SendMessageOperation(Operation):
@@ -39,16 +39,16 @@ cdef class SendMessageOperation(Operation):
cdef readonly int _flags
cdef grpc_byte_buffer *_c_message_byte_buffer
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class SendCloseFromClientOperation(Operation):
cdef readonly int _flags
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class SendStatusFromServerOperation(Operation):
@@ -61,8 +61,8 @@ cdef class SendStatusFromServerOperation(Operation):
cdef size_t _c_trailing_metadata_count
cdef grpc_slice _c_details
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class ReceiveInitialMetadataOperation(Operation):
@@ -71,8 +71,8 @@ cdef class ReceiveInitialMetadataOperation(Operation):
cdef tuple _initial_metadata
cdef grpc_metadata_array _c_initial_metadata
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class ReceiveMessageOperation(Operation):
@@ -81,8 +81,8 @@ cdef class ReceiveMessageOperation(Operation):
cdef grpc_byte_buffer *_c_message_byte_buffer
cdef bytes _message
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class ReceiveStatusOnClientOperation(Operation):
@@ -97,8 +97,8 @@ cdef class ReceiveStatusOnClientOperation(Operation):
cdef str _details
cdef str _error_string
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
cdef class ReceiveCloseOnServerOperation(Operation):
@@ -107,5 +107,5 @@ cdef class ReceiveCloseOnServerOperation(Operation):
cdef object _cancelled
cdef int _c_cancelled
- cdef void c(self)
- cdef void un_c(self)
+ cdef void c(self) except *
+ cdef void un_c(self) except *
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/operation.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/operation.pyx.pxi
index 454627f570..c8a390106a 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/operation.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/operation.pyx.pxi
@@ -15,10 +15,10 @@
cdef class Operation:
- cdef void c(self):
+ cdef void c(self) except *:
raise NotImplementedError()
- cdef void un_c(self):
+ cdef void un_c(self) except *:
raise NotImplementedError()
@@ -31,7 +31,7 @@ cdef class SendInitialMetadataOperation(Operation):
def type(self):
return GRPC_OP_SEND_INITIAL_METADATA
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
self.c_op.flags = self._flags
_store_c_metadata(
@@ -41,7 +41,7 @@ cdef class SendInitialMetadataOperation(Operation):
self.c_op.data.send_initial_metadata.count = self._c_initial_metadata_count
self.c_op.data.send_initial_metadata.maybe_compression_level.is_set = 0
- cdef void un_c(self):
+ cdef void un_c(self) except *:
_release_c_metadata(
self._c_initial_metadata, self._c_initial_metadata_count)
@@ -55,7 +55,7 @@ cdef class SendMessageOperation(Operation):
def type(self):
return GRPC_OP_SEND_MESSAGE
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_SEND_MESSAGE
self.c_op.flags = self._flags
cdef grpc_slice message_slice = grpc_slice_from_copied_buffer(
@@ -65,7 +65,7 @@ cdef class SendMessageOperation(Operation):
grpc_slice_unref(message_slice)
self.c_op.data.send_message.send_message = self._c_message_byte_buffer
- cdef void un_c(self):
+ cdef void un_c(self) except *:
grpc_byte_buffer_destroy(self._c_message_byte_buffer)
@@ -77,11 +77,11 @@ cdef class SendCloseFromClientOperation(Operation):
def type(self):
return GRPC_OP_SEND_CLOSE_FROM_CLIENT
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_SEND_CLOSE_FROM_CLIENT
self.c_op.flags = self._flags
- cdef void un_c(self):
+ cdef void un_c(self) except *:
pass
@@ -96,7 +96,7 @@ cdef class SendStatusFromServerOperation(Operation):
def type(self):
return GRPC_OP_SEND_STATUS_FROM_SERVER
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
self.c_op.flags = self._flags
_store_c_metadata(
@@ -110,7 +110,7 @@ cdef class SendStatusFromServerOperation(Operation):
self._c_details = _slice_from_bytes(_encode(self._details))
self.c_op.data.send_status_from_server.status_details = &self._c_details
- cdef void un_c(self):
+ cdef void un_c(self) except *:
grpc_slice_unref(self._c_details)
_release_c_metadata(
self._c_trailing_metadata, self._c_trailing_metadata_count)
@@ -124,14 +124,14 @@ cdef class ReceiveInitialMetadataOperation(Operation):
def type(self):
return GRPC_OP_RECV_INITIAL_METADATA
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
self.c_op.flags = self._flags
grpc_metadata_array_init(&self._c_initial_metadata)
self.c_op.data.receive_initial_metadata.receive_initial_metadata = (
&self._c_initial_metadata)
- cdef void un_c(self):
+ cdef void un_c(self) except *:
self._initial_metadata = _metadata(&self._c_initial_metadata)
grpc_metadata_array_destroy(&self._c_initial_metadata)
@@ -147,13 +147,13 @@ cdef class ReceiveMessageOperation(Operation):
def type(self):
return GRPC_OP_RECV_MESSAGE
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_RECV_MESSAGE
self.c_op.flags = self._flags
self.c_op.data.receive_message.receive_message = (
&self._c_message_byte_buffer)
- cdef void un_c(self):
+ cdef void un_c(self) except *:
cdef grpc_byte_buffer_reader message_reader
cdef bint message_reader_status
cdef grpc_slice message_slice
@@ -189,7 +189,7 @@ cdef class ReceiveStatusOnClientOperation(Operation):
def type(self):
return GRPC_OP_RECV_STATUS_ON_CLIENT
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
self.c_op.flags = self._flags
grpc_metadata_array_init(&self._c_trailing_metadata)
@@ -202,7 +202,7 @@ cdef class ReceiveStatusOnClientOperation(Operation):
self.c_op.data.receive_status_on_client.error_string = (
&self._c_error_string)
- cdef void un_c(self):
+ cdef void un_c(self) except *:
self._trailing_metadata = _metadata(&self._c_trailing_metadata)
grpc_metadata_array_destroy(&self._c_trailing_metadata)
self._code = self._c_code
@@ -235,12 +235,12 @@ cdef class ReceiveCloseOnServerOperation(Operation):
def type(self):
return GRPC_OP_RECV_CLOSE_ON_SERVER
- cdef void c(self):
+ cdef void c(self) except *:
self.c_op.type = GRPC_OP_RECV_CLOSE_ON_SERVER
self.c_op.flags = self._flags
self.c_op.data.receive_close_on_server.cancelled = &self._c_cancelled
- cdef void un_c(self):
+ cdef void un_c(self) except *:
self._cancelled = bool(self._c_cancelled)
def cancelled(self):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi
index 7decae95bb..e17ca6d335 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi
@@ -14,14 +14,16 @@
from libc.string cimport memcpy
-import pkg_resources
+import pkgutil
cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
char **pem_root_certs) nogil:
with gil:
- temporary_pem_root_certs = pkg_resources.resource_string(
- __name__.rstrip('.cygrpc'), '_credentials/roots.pem')
+ pkg = __name__
+ if pkg.endswith('.cygrpc'):
+ pkg = pkg[:-len('.cygrpc')]
+ temporary_pem_root_certs = pkgutil.get_data(pkg, '_credentials/roots.pem')
pem_root_certs[0] = <char *>gpr_malloc(len(temporary_pem_root_certs) + 1)
memcpy(
pem_root_certs[0], <char *>temporary_pem_root_certs,
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index 5779437b92..ce701724fd 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -18,7 +18,6 @@ import logging
import time
import grpc
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
cdef class Server:
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi
index f9a3b5e8f4..d8ba1ea9bd 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi
@@ -32,7 +32,7 @@ cdef class _RequestCallTag(_Tag):
cdef CallDetails call_details
cdef grpc_metadata_array c_invocation_metadata
- cdef void prepare(self)
+ cdef void prepare(self) except *
cdef RequestCallEvent event(self, grpc_event c_event)
@@ -44,7 +44,7 @@ cdef class _BatchOperationTag(_Tag):
cdef grpc_op *c_ops
cdef size_t c_nops
- cdef void prepare(self)
+ cdef void prepare(self) except *
cdef BatchOperationEvent event(self, grpc_event c_event)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
index aaca458442..be5013c8f7 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
@@ -35,7 +35,7 @@ cdef class _RequestCallTag(_Tag):
self.call = None
self.call_details = None
- cdef void prepare(self):
+ cdef void prepare(self) except *:
self.call = Call()
self.call_details = CallDetails()
grpc_metadata_array_init(&self.c_invocation_metadata)
@@ -55,7 +55,7 @@ cdef class _BatchOperationTag:
self._operations = operations
self._retained_call = call
- cdef void prepare(self):
+ cdef void prepare(self) except *:
self.c_nops = 0 if self._operations is None else len(self._operations)
if 0 < self.c_nops:
self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/time.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/time.pxd.pxi
index ce67c61eaf..1319ac0481 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/time.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/time.pxd.pxi
@@ -16,4 +16,4 @@
cdef gpr_timespec _timespec_from_time(object time)
-cdef double _time_from_timespec(gpr_timespec timespec)
+cdef double _time_from_timespec(gpr_timespec timespec) except *
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/time.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/time.pyx.pxi
index 7a668680b8..c452dd54f8 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/time.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/time.pyx.pxi
@@ -24,7 +24,7 @@ cdef gpr_timespec _timespec_from_time(object time):
return timespec
-cdef double _time_from_timespec(gpr_timespec timespec):
+cdef double _time_from_timespec(gpr_timespec timespec) except *:
cdef gpr_timespec real_timespec = gpr_convert_clock_type(
timespec, GPR_CLOCK_REALTIME)
return <double>real_timespec.seconds + <double>real_timespec.nanoseconds / 1e9
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx
index 026f7ba2e3..9ab919375c 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -15,7 +15,6 @@
cimport cpython
-import pkg_resources
import os.path
import sys
@@ -36,6 +35,7 @@ include "_cygrpc/server.pyx.pxi"
include "_cygrpc/tag.pyx.pxi"
include "_cygrpc/time.pyx.pxi"
include "_cygrpc/_hooks.pyx.pxi"
+include "_cygrpc/channelz.pyx.pxi"
include "_cygrpc/grpc_gevent.pyx.pxi"
diff --git a/src/python/grpcio/grpc/_grpcio_metadata.py b/src/python/grpcio/grpc/_grpcio_metadata.py
index 42b3a1ad49..7a9f173947 100644
--- a/src/python/grpcio/grpc/_grpcio_metadata.py
+++ b/src/python/grpcio/grpc/_grpcio_metadata.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc/_grpcio_metadata.py.template`!!!
-__version__ = """1.17.0.dev0"""
+__version__ = """1.18.0.dev0"""
diff --git a/src/python/grpcio/grpc/_interceptor.py b/src/python/grpcio/grpc/_interceptor.py
index 1d2d374ad1..fc0ad77eb9 100644
--- a/src/python/grpcio/grpc/_interceptor.py
+++ b/src/python/grpcio/grpc/_interceptor.py
@@ -46,7 +46,7 @@ def service_pipeline(interceptors):
class _ClientCallDetails(
collections.namedtuple(
'_ClientCallDetails',
- ('method', 'timeout', 'metadata', 'credentials')),
+ ('method', 'timeout', 'metadata', 'credentials', 'wait_for_ready')),
grpc.ClientCallDetails):
pass
@@ -72,7 +72,12 @@ def _unwrap_client_call_details(call_details, default_details):
except AttributeError:
credentials = default_details.credentials
- return method, timeout, metadata, credentials
+ try:
+ wait_for_ready = call_details.wait_for_ready
+ except AttributeError:
+ wait_for_ready = default_details.wait_for_ready
+
+ return method, timeout, metadata, credentials, wait_for_ready
class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call):
@@ -130,9 +135,12 @@ class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call):
def __iter__(self):
return self
- def next(self):
+ def __next__(self):
raise self._exception
+ def next(self):
+ return self.__next__()
+
class _UnaryOutcome(grpc.Call, grpc.Future):
@@ -193,31 +201,42 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
self._method = method
self._interceptor = interceptor
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
response, ignored_call = self._with_call(
request,
timeout=timeout,
metadata=metadata,
- credentials=credentials)
+ credentials=credentials,
+ wait_for_ready=wait_for_ready)
return response
- def _with_call(self, request, timeout=None, metadata=None,
- credentials=None):
- client_call_details = _ClientCallDetails(self._method, timeout,
- metadata, credentials)
+ def _with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
+ client_call_details = _ClientCallDetails(
+ self._method, timeout, metadata, credentials, wait_for_ready)
def continuation(new_details, request):
- new_method, new_timeout, new_metadata, new_credentials = (
+ new_method, new_timeout, new_metadata, new_credentials, new_wait_for_ready = (
_unwrap_client_call_details(new_details, client_call_details))
try:
response, call = self._thunk(new_method).with_call(
request,
timeout=new_timeout,
metadata=new_metadata,
- credentials=new_credentials)
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready)
return _UnaryOutcome(response, call)
- except grpc.RpcError:
- raise
+ except grpc.RpcError as rpc_error:
+ return rpc_error
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
@@ -225,25 +244,37 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
continuation, client_call_details, request)
return call.result(), call
- def with_call(self, request, timeout=None, metadata=None, credentials=None):
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
return self._with_call(
request,
timeout=timeout,
metadata=metadata,
- credentials=credentials)
+ credentials=credentials,
+ wait_for_ready=wait_for_ready)
- def future(self, request, timeout=None, metadata=None, credentials=None):
- client_call_details = _ClientCallDetails(self._method, timeout,
- metadata, credentials)
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
+ client_call_details = _ClientCallDetails(
+ self._method, timeout, metadata, credentials, wait_for_ready)
def continuation(new_details, request):
- new_method, new_timeout, new_metadata, new_credentials = (
+ new_method, new_timeout, new_metadata, new_credentials, new_wait_for_ready = (
_unwrap_client_call_details(new_details, client_call_details))
return self._thunk(new_method).future(
request,
timeout=new_timeout,
metadata=new_metadata,
- credentials=new_credentials)
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready)
try:
return self._interceptor.intercept_unary_unary(
@@ -259,18 +290,24 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
self._method = method
self._interceptor = interceptor
- def __call__(self, request, timeout=None, metadata=None, credentials=None):
- client_call_details = _ClientCallDetails(self._method, timeout,
- metadata, credentials)
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None):
+ client_call_details = _ClientCallDetails(
+ self._method, timeout, metadata, credentials, wait_for_ready)
def continuation(new_details, request):
- new_method, new_timeout, new_metadata, new_credentials = (
+ new_method, new_timeout, new_metadata, new_credentials, new_wait_for_ready = (
_unwrap_client_call_details(new_details, client_call_details))
return self._thunk(new_method)(
request,
timeout=new_timeout,
metadata=new_metadata,
- credentials=new_credentials)
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready)
try:
return self._interceptor.intercept_unary_stream(
@@ -290,34 +327,38 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
response, ignored_call = self._with_call(
request_iterator,
timeout=timeout,
metadata=metadata,
- credentials=credentials)
+ credentials=credentials,
+ wait_for_ready=wait_for_ready)
return response
def _with_call(self,
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
- client_call_details = _ClientCallDetails(self._method, timeout,
- metadata, credentials)
+ credentials=None,
+ wait_for_ready=None):
+ client_call_details = _ClientCallDetails(
+ self._method, timeout, metadata, credentials, wait_for_ready)
def continuation(new_details, request_iterator):
- new_method, new_timeout, new_metadata, new_credentials = (
+ new_method, new_timeout, new_metadata, new_credentials, new_wait_for_ready = (
_unwrap_client_call_details(new_details, client_call_details))
try:
response, call = self._thunk(new_method).with_call(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
- credentials=new_credentials)
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready)
return _UnaryOutcome(response, call)
- except grpc.RpcError:
- raise
+ except grpc.RpcError as rpc_error:
+ return rpc_error
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
@@ -329,29 +370,33 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
+ credentials=None,
+ wait_for_ready=None):
return self._with_call(
request_iterator,
timeout=timeout,
metadata=metadata,
- credentials=credentials)
+ credentials=credentials,
+ wait_for_ready=wait_for_ready)
def future(self,
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
- client_call_details = _ClientCallDetails(self._method, timeout,
- metadata, credentials)
+ credentials=None,
+ wait_for_ready=None):
+ client_call_details = _ClientCallDetails(
+ self._method, timeout, metadata, credentials, wait_for_ready)
def continuation(new_details, request_iterator):
- new_method, new_timeout, new_metadata, new_credentials = (
+ new_method, new_timeout, new_metadata, new_credentials, new_wait_for_ready = (
_unwrap_client_call_details(new_details, client_call_details))
return self._thunk(new_method).future(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
- credentials=new_credentials)
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready)
try:
return self._interceptor.intercept_stream_unary(
@@ -371,18 +416,20 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
request_iterator,
timeout=None,
metadata=None,
- credentials=None):
- client_call_details = _ClientCallDetails(self._method, timeout,
- metadata, credentials)
+ credentials=None,
+ wait_for_ready=None):
+ client_call_details = _ClientCallDetails(
+ self._method, timeout, metadata, credentials, wait_for_ready)
def continuation(new_details, request_iterator):
- new_method, new_timeout, new_metadata, new_credentials = (
+ new_method, new_timeout, new_metadata, new_credentials, new_wait_for_ready = (
_unwrap_client_call_details(new_details, client_call_details))
return self._thunk(new_method)(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
- credentials=new_credentials)
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready)
try:
return self._interceptor.intercept_stream_stream(
diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py
index 88ab4d8371..916ee080b6 100644
--- a/src/python/grpcio/grpc/_plugin_wrapping.py
+++ b/src/python/grpcio/grpc/_plugin_wrapping.py
@@ -20,7 +20,6 @@ import grpc
from grpc import _common
from grpc._cython import cygrpc
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index daa000a6e1..7276a7fd90 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -27,7 +27,6 @@ from grpc import _interceptor
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_TAG = 'shutdown'
diff --git a/src/python/grpcio/grpc/beta/BUILD.bazel b/src/python/grpcio/grpc/beta/BUILD.bazel
deleted file mode 100644
index 731be5cb25..0000000000
--- a/src/python/grpcio/grpc/beta/BUILD.bazel
+++ /dev/null
@@ -1,58 +0,0 @@
-load("@grpc_python_dependencies//:requirements.bzl", "requirement")
-package(default_visibility = ["//visibility:public"])
-
-py_library(
- name = "beta",
- srcs = ["__init__.py",],
- deps = [
- ":client_adaptations",
- ":metadata",
- ":server_adaptations",
- ":implementations",
- ":interfaces",
- ":utilities",
- ],
-)
-
-py_library(
- name = "client_adaptations",
- srcs = ["_client_adaptations.py"],
- imports=["../../",]
-)
-
-py_library(
- name = "metadata",
- srcs = ["_metadata.py"],
-)
-
-py_library(
- name = "server_adaptations",
- srcs = ["_server_adaptations.py"],
- imports=["../../",],
-)
-
-py_library(
- name = "implementations",
- srcs = ["implementations.py"],
- imports=["../../",],
-)
-
-py_library(
- name = "interfaces",
- srcs = ["interfaces.py"],
- deps = [
- requirement("six"),
- ],
- imports=["../../",],
-)
-
-py_library(
- name = "utilities",
- srcs = ["utilities.py"],
- deps = [
- ":implementations",
- ":interfaces",
- "//src/python/grpcio/grpc/framework/foundation",
- ],
-)
-
diff --git a/src/python/grpcio/grpc/framework/foundation/callable_util.py b/src/python/grpcio/grpc/framework/foundation/callable_util.py
index fb8d5f7c1e..24daf3406f 100644
--- a/src/python/grpcio/grpc/framework/foundation/callable_util.py
+++ b/src/python/grpcio/grpc/framework/foundation/callable_util.py
@@ -21,7 +21,6 @@ import logging
import six
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
diff --git a/src/python/grpcio/grpc/framework/foundation/logging_pool.py b/src/python/grpcio/grpc/framework/foundation/logging_pool.py
index 7702d1785f..216e3990db 100644
--- a/src/python/grpcio/grpc/framework/foundation/logging_pool.py
+++ b/src/python/grpcio/grpc/framework/foundation/logging_pool.py
@@ -17,7 +17,6 @@ import logging
from concurrent import futures
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
diff --git a/src/python/grpcio/grpc/framework/foundation/stream_util.py b/src/python/grpcio/grpc/framework/foundation/stream_util.py
index 9184f95873..1faaf29bd7 100644
--- a/src/python/grpcio/grpc/framework/foundation/stream_util.py
+++ b/src/python/grpcio/grpc/framework/foundation/stream_util.py
@@ -19,7 +19,6 @@ import threading
from grpc.framework.foundation import stream
_NO_VALUE = object()
-logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index cb48b9f62c..ce65c594fe 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -208,12 +208,14 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
+ 'src/core/lib/uri/uri_parser.cc',
'src/core/lib/debug/trace.cc',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
+ 'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
@@ -280,7 +282,7 @@ CORE_SOURCE_FILES = [
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
- 'src/core/tsi/alts/handshaker/alts_tsi_event.cc',
+ 'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
@@ -322,19 +324,17 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy_factory.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
- 'src/core/ext/filters/client_channel/method_params.cc',
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
+ 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_index.cc',
- 'src/core/ext/filters/client_channel/uri_parser.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/client_channel/health/health.pb.c',
- 'src/core/tsi/alts_transport_security.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py
index 71113e68d9..2e91818d2c 100644
--- a/src/python/grpcio/grpc_version.py
+++ b/src/python/grpcio/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
-VERSION = '1.17.0.dev0'
+VERSION = '1.18.0.dev0'
diff --git a/src/python/grpcio_channelz/.gitignore b/src/python/grpcio_channelz/.gitignore
new file mode 100644
index 0000000000..0c5da6b5af
--- /dev/null
+++ b/src/python/grpcio_channelz/.gitignore
@@ -0,0 +1,6 @@
+*.proto
+*_pb2.py
+*_pb2_grpc.py
+build/
+grpcio_channelz.egg-info/
+dist/
diff --git a/src/python/grpcio_channelz/MANIFEST.in b/src/python/grpcio_channelz/MANIFEST.in
new file mode 100644
index 0000000000..ee93e21a69
--- /dev/null
+++ b/src/python/grpcio_channelz/MANIFEST.in
@@ -0,0 +1,4 @@
+include grpc_version.py
+recursive-include grpc_channelz *.py
+global-exclude *.pyc
+include LICENSE
diff --git a/src/python/grpcio_channelz/README.rst b/src/python/grpcio_channelz/README.rst
new file mode 100644
index 0000000000..efeaa56064
--- /dev/null
+++ b/src/python/grpcio_channelz/README.rst
@@ -0,0 +1,9 @@
+gRPC Python Channelz package
+==============================
+
+Channelz is a live debug tool in gRPC Python.
+
+Dependencies
+------------
+
+Depends on the `grpcio` package, available from PyPI via `pip install grpcio`.
diff --git a/src/python/grpcio_channelz/channelz_commands.py b/src/python/grpcio_channelz/channelz_commands.py
new file mode 100644
index 0000000000..7f158c2a4b
--- /dev/null
+++ b/src/python/grpcio_channelz/channelz_commands.py
@@ -0,0 +1,67 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Provides distutils command classes for the GRPC Python setup process."""
+
+import os
+import shutil
+
+import setuptools
+
+ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
+CHANNELZ_PROTO = os.path.join(ROOT_DIR,
+ '../../proto/grpc/channelz/channelz.proto')
+LICENSE = os.path.join(ROOT_DIR, '../../../LICENSE')
+
+
+class Preprocess(setuptools.Command):
+ """Command to copy proto modules from grpc/src/proto and LICENSE from
+ the root directory"""
+
+ description = ''
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ if os.path.isfile(CHANNELZ_PROTO):
+ shutil.copyfile(CHANNELZ_PROTO,
+ os.path.join(ROOT_DIR,
+ 'grpc_channelz/v1/channelz.proto'))
+ if os.path.isfile(LICENSE):
+ shutil.copyfile(LICENSE, os.path.join(ROOT_DIR, 'LICENSE'))
+
+
+class BuildPackageProtos(setuptools.Command):
+ """Command to generate project *_pb2.py modules from proto files."""
+
+ description = 'build grpc protobuf modules'
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ # due to limitations of the proto generator, we require that only *one*
+ # directory is provided as an 'include' directory. We assume it's the '' key
+ # to `self.distribution.package_dir` (and get a key error if it's not
+ # there).
+ from grpc_tools import command
+ command.build_package_protos(self.distribution.package_dir[''])
diff --git a/src/python/grpcio_channelz/grpc_channelz/__init__.py b/src/python/grpcio_channelz/grpc_channelz/__init__.py
new file mode 100644
index 0000000000..38fdfc9c5c
--- /dev/null
+++ b/src/python/grpcio_channelz/grpc_channelz/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/python/grpcio_channelz/grpc_channelz/v1/BUILD.bazel b/src/python/grpcio_channelz/grpc_channelz/v1/BUILD.bazel
new file mode 100644
index 0000000000..aae8cedb76
--- /dev/null
+++ b/src/python/grpcio_channelz/grpc_channelz/v1/BUILD.bazel
@@ -0,0 +1,38 @@
+load("@grpc_python_dependencies//:requirements.bzl", "requirement")
+load("@org_pubref_rules_protobuf//python:rules.bzl", "py_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+genrule(
+ name = "mv_channelz_proto",
+ srcs = [
+ "//src/proto/grpc/channelz:channelz_proto_file",
+ ],
+ outs = ["channelz.proto",],
+ cmd = "cp $< $@",
+)
+
+py_proto_library(
+ name = "py_channelz_proto",
+ protos = ["mv_channelz_proto",],
+ imports = [
+ "external/com_google_protobuf/src/",
+ ],
+ inputs = [
+ "@com_google_protobuf//:well_known_protos",
+ ],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+)
+
+py_library(
+ name = "grpc_channelz",
+ srcs = ["channelz.py",],
+ deps = [
+ ":py_channelz_proto",
+ "//src/python/grpcio/grpc:grpcio",
+ ],
+ imports=["../../",],
+)
diff --git a/src/python/grpcio_channelz/grpc_channelz/v1/__init__.py b/src/python/grpcio_channelz/grpc_channelz/v1/__init__.py
new file mode 100644
index 0000000000..38fdfc9c5c
--- /dev/null
+++ b/src/python/grpcio_channelz/grpc_channelz/v1/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/python/grpcio_channelz/grpc_channelz/v1/channelz.py b/src/python/grpcio_channelz/grpc_channelz/v1/channelz.py
new file mode 100644
index 0000000000..573b9d0d5a
--- /dev/null
+++ b/src/python/grpcio_channelz/grpc_channelz/v1/channelz.py
@@ -0,0 +1,141 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Channelz debug service implementation in gRPC Python."""
+
+import grpc
+from grpc._cython import cygrpc
+
+import grpc_channelz.v1.channelz_pb2 as _channelz_pb2
+import grpc_channelz.v1.channelz_pb2_grpc as _channelz_pb2_grpc
+
+from google.protobuf import json_format
+
+
+class ChannelzServicer(_channelz_pb2_grpc.ChannelzServicer):
+ """Servicer handling RPCs for service statuses."""
+
+ @staticmethod
+ def GetTopChannels(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_top_channels(request.start_channel_id),
+ _channelz_pb2.GetTopChannelsResponse(),
+ )
+ except (ValueError, json_format.ParseError) as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+ @staticmethod
+ def GetServers(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_servers(request.start_server_id),
+ _channelz_pb2.GetServersResponse(),
+ )
+ except (ValueError, json_format.ParseError) as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+ @staticmethod
+ def GetServer(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_server(request.server_id),
+ _channelz_pb2.GetServerResponse(),
+ )
+ except ValueError as e:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ context.set_details(str(e))
+ except json_format.ParseError as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+ @staticmethod
+ def GetServerSockets(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_server_sockets(request.server_id,
+ request.start_socket_id),
+ _channelz_pb2.GetServerSocketsResponse(),
+ )
+ except ValueError as e:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ context.set_details(str(e))
+ except json_format.ParseError as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+ @staticmethod
+ def GetChannel(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_channel(request.channel_id),
+ _channelz_pb2.GetChannelResponse(),
+ )
+ except ValueError as e:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ context.set_details(str(e))
+ except json_format.ParseError as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+ @staticmethod
+ def GetSubchannel(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_subchannel(request.subchannel_id),
+ _channelz_pb2.GetSubchannelResponse(),
+ )
+ except ValueError as e:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ context.set_details(str(e))
+ except json_format.ParseError as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+ @staticmethod
+ def GetSocket(request, context):
+ try:
+ return json_format.Parse(
+ cygrpc.channelz_get_socket(request.socket_id),
+ _channelz_pb2.GetSocketResponse(),
+ )
+ except ValueError as e:
+ context.set_code(grpc.StatusCode.NOT_FOUND)
+ context.set_details(str(e))
+ except json_format.ParseError as e:
+ context.set_code(grpc.StatusCode.INTERNAL)
+ context.set_details(str(e))
+
+
+def add_channelz_servicer(server):
+ """Add Channelz servicer to a server. Channelz servicer is in charge of
+ pulling information from C-Core for entire process. It will allow the
+ server to response to Channelz queries.
+
+ The Channelz statistic is enabled by default inside C-Core. Whether the
+ statistic is enabled or not is isolated from adding Channelz servicer.
+ That means you can query Channelz info with a Channelz-disabled channel,
+ and you can add Channelz servicer to a Channelz-disabled server.
+
+ The Channelz statistic can be enabled or disabled by channel option
+ 'grpc.enable_channelz'. Set to 1 to enable, set to 0 to disable.
+
+ This is an EXPERIMENTAL API.
+
+ Args:
+ server: grpc.Server to which Channelz service will be added.
+ """
+ _channelz_pb2_grpc.add_ChannelzServicer_to_server(ChannelzServicer(),
+ server)
diff --git a/src/python/grpcio_channelz/grpc_version.py b/src/python/grpcio_channelz/grpc_version.py
new file mode 100644
index 0000000000..16356ea402
--- /dev/null
+++ b/src/python/grpcio_channelz/grpc_version.py
@@ -0,0 +1,17 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_channelz/grpc_version.py.template`!!!
+
+VERSION = '1.18.0.dev0'
diff --git a/src/python/grpcio_channelz/setup.py b/src/python/grpcio_channelz/setup.py
new file mode 100644
index 0000000000..f8c0e93913
--- /dev/null
+++ b/src/python/grpcio_channelz/setup.py
@@ -0,0 +1,96 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Setup module for the GRPC Python package's Channelz."""
+
+import os
+import sys
+
+import setuptools
+
+# Ensure we're in the proper directory whether or not we're being used by pip.
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+# Break import-style to ensure we can actually find our local modules.
+import grpc_version
+
+
+class _NoOpCommand(setuptools.Command):
+ """No-op command."""
+
+ description = ''
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ pass
+
+
+CLASSIFIERS = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'License :: OSI Approved :: Apache Software License',
+]
+
+PACKAGE_DIRECTORIES = {
+ '': '.',
+}
+
+INSTALL_REQUIRES = (
+ 'protobuf>=3.6.0',
+ 'grpcio>={version}'.format(version=grpc_version.VERSION),
+)
+
+try:
+ import channelz_commands as _channelz_commands
+ # we are in the build environment, otherwise the above import fails
+ SETUP_REQUIRES = (
+ 'grpcio-tools=={version}'.format(version=grpc_version.VERSION),)
+ COMMAND_CLASS = {
+ # Run preprocess from the repository *before* doing any packaging!
+ 'preprocess': _channelz_commands.Preprocess,
+ 'build_package_protos': _channelz_commands.BuildPackageProtos,
+ }
+except ImportError:
+ SETUP_REQUIRES = ()
+ COMMAND_CLASS = {
+ # wire up commands to no-op not to break the external dependencies
+ 'preprocess': _NoOpCommand,
+ 'build_package_protos': _NoOpCommand,
+ }
+
+setuptools.setup(
+ name='grpcio-channelz',
+ version=grpc_version.VERSION,
+ license='Apache License 2.0',
+ description='Channel Level Live Debug Information Service for gRPC',
+ author='The gRPC Authors',
+ author_email='grpc-io@googlegroups.com',
+ classifiers=CLASSIFIERS,
+ url='https://grpc.io',
+ package_dir=PACKAGE_DIRECTORIES,
+ packages=setuptools.find_packages('.'),
+ install_requires=INSTALL_REQUIRES,
+ setup_requires=SETUP_REQUIRES,
+ cmdclass=COMMAND_CLASS)
diff --git a/src/python/grpcio_health_checking/MANIFEST.in b/src/python/grpcio_health_checking/MANIFEST.in
index 996c74a9d4..3a22311b8e 100644
--- a/src/python/grpcio_health_checking/MANIFEST.in
+++ b/src/python/grpcio_health_checking/MANIFEST.in
@@ -1,3 +1,4 @@
include grpc_version.py
recursive-include grpc_health *.py
global-exclude *.pyc
+include LICENSE
diff --git a/src/python/grpcio_health_checking/grpc_version.py b/src/python/grpcio_health_checking/grpc_version.py
index a30aac2e0b..85fa762f7e 100644
--- a/src/python/grpcio_health_checking/grpc_version.py
+++ b/src/python/grpcio_health_checking/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!!
-VERSION = '1.17.0.dev0'
+VERSION = '1.18.0.dev0'
diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py
index 933f965aa2..3820ef0bba 100644
--- a/src/python/grpcio_health_checking/health_commands.py
+++ b/src/python/grpcio_health_checking/health_commands.py
@@ -20,10 +20,12 @@ import setuptools
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/health/v1/health.proto')
+LICENSE = os.path.join(ROOT_DIR, '../../../LICENSE')
-class CopyProtoModules(setuptools.Command):
- """Command to copy proto modules from grpc/src/proto."""
+class Preprocess(setuptools.Command):
+ """Command to copy proto modules from grpc/src/proto and LICENSE from
+ the root directory"""
description = ''
user_options = []
@@ -39,6 +41,8 @@ class CopyProtoModules(setuptools.Command):
shutil.copyfile(HEALTH_PROTO,
os.path.join(ROOT_DIR,
'grpc_health/v1/health.proto'))
+ if os.path.isfile(LICENSE):
+ shutil.copyfile(LICENSE, os.path.join(ROOT_DIR, 'LICENSE'))
class BuildPackageProtos(setuptools.Command):
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index db2edae2ce..5a09a80f6a 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -68,7 +68,7 @@ try:
'grpcio-tools=={version}'.format(version=grpc_version.VERSION),)
COMMAND_CLASS = {
# Run preprocess from the repository *before* doing any packaging!
- 'preprocess': _health_commands.CopyProtoModules,
+ 'preprocess': _health_commands.Preprocess,
'build_package_protos': _health_commands.BuildPackageProtos,
}
except ImportError:
diff --git a/src/python/grpcio_reflection/MANIFEST.in b/src/python/grpcio_reflection/MANIFEST.in
index d6fb6ce73a..10b01fa41d 100644
--- a/src/python/grpcio_reflection/MANIFEST.in
+++ b/src/python/grpcio_reflection/MANIFEST.in
@@ -1,3 +1,4 @@
include grpc_version.py
recursive-include grpc_reflection *.py
global-exclude *.pyc
+include LICENSE
diff --git a/src/python/grpcio_reflection/grpc_reflection/v1alpha/BUILD.bazel b/src/python/grpcio_reflection/grpc_reflection/v1alpha/BUILD.bazel
new file mode 100644
index 0000000000..3a2ba26371
--- /dev/null
+++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@grpc_python_dependencies//:requirements.bzl", "requirement")
+load("@org_pubref_rules_protobuf//python:rules.bzl", "py_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+genrule(
+ name = "mv_reflection_proto",
+ srcs = [
+ "//src/proto/grpc/reflection/v1alpha:reflection_proto_file",
+ ],
+ outs = ["reflection.proto",],
+ cmd = "cp $< $@",
+)
+
+py_proto_library(
+ name = "py_reflection_proto",
+ protos = [":mv_reflection_proto",],
+ with_grpc = True,
+ deps = [
+ requirement('protobuf'),
+ ],
+)
+
+py_library(
+ name = "grpc_reflection",
+ srcs = ["reflection.py",],
+ deps = [
+ ":py_reflection_proto",
+ "//src/python/grpcio/grpc:grpcio",
+ requirement('protobuf'),
+ ],
+ imports=["../../",],
+)
+
diff --git a/src/python/grpcio_reflection/grpc_version.py b/src/python/grpcio_reflection/grpc_version.py
index aafea9fe76..e62ab169a2 100644
--- a/src/python/grpcio_reflection/grpc_version.py
+++ b/src/python/grpcio_reflection/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
-VERSION = '1.17.0.dev0'
+VERSION = '1.18.0.dev0'
diff --git a/src/python/grpcio_reflection/reflection_commands.py b/src/python/grpcio_reflection/reflection_commands.py
index 6f91f6b875..311ca4c4db 100644
--- a/src/python/grpcio_reflection/reflection_commands.py
+++ b/src/python/grpcio_reflection/reflection_commands.py
@@ -21,10 +21,12 @@ import setuptools
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
REFLECTION_PROTO = os.path.join(
ROOT_DIR, '../../proto/grpc/reflection/v1alpha/reflection.proto')
+LICENSE = os.path.join(ROOT_DIR, '../../../LICENSE')
-class CopyProtoModules(setuptools.Command):
- """Command to copy proto modules from grpc/src/proto."""
+class Preprocess(setuptools.Command):
+ """Command to copy proto modules from grpc/src/proto and LICENSE from
+ the root directory"""
description = ''
user_options = []
@@ -41,6 +43,8 @@ class CopyProtoModules(setuptools.Command):
REFLECTION_PROTO,
os.path.join(ROOT_DIR,
'grpc_reflection/v1alpha/reflection.proto'))
+ if os.path.isfile(LICENSE):
+ shutil.copyfile(LICENSE, os.path.join(ROOT_DIR, 'LICENSE'))
class BuildPackageProtos(setuptools.Command):
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index b4087d87b4..f205069acd 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -69,7 +69,7 @@ try:
'grpcio-tools=={version}'.format(version=grpc_version.VERSION),)
COMMAND_CLASS = {
# Run preprocess from the repository *before* doing any packaging!
- 'preprocess': _reflection_commands.CopyProtoModules,
+ 'preprocess': _reflection_commands.Preprocess,
'build_package_protos': _reflection_commands.BuildPackageProtos,
}
except ImportError:
diff --git a/src/python/grpcio_testing/MANIFEST.in b/src/python/grpcio_testing/MANIFEST.in
index 39b3565217..559dfaf786 100644
--- a/src/python/grpcio_testing/MANIFEST.in
+++ b/src/python/grpcio_testing/MANIFEST.in
@@ -1,3 +1,4 @@
include grpc_version.py
recursive-include grpc_testing *.py
global-exclude *.pyc
+include LICENSE
diff --git a/src/python/grpcio_testing/grpc_version.py b/src/python/grpcio_testing/grpc_version.py
index 876acd3142..7b4c1695fa 100644
--- a/src/python/grpcio_testing/grpc_version.py
+++ b/src/python/grpcio_testing/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
-VERSION = '1.17.0.dev0'
+VERSION = '1.18.0.dev0'
diff --git a/src/python/grpcio_testing/setup.py b/src/python/grpcio_testing/setup.py
index 6ceb1fc5c9..18db71e0f0 100644
--- a/src/python/grpcio_testing/setup.py
+++ b/src/python/grpcio_testing/setup.py
@@ -24,6 +24,23 @@ os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import style to ensure that we can find same-directory modules.
import grpc_version
+
+class _NoOpCommand(setuptools.Command):
+ """No-op command."""
+
+ description = ''
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ pass
+
+
PACKAGE_DIRECTORIES = {
'': '.',
}
@@ -33,6 +50,19 @@ INSTALL_REQUIRES = (
'grpcio>={version}'.format(version=grpc_version.VERSION),
)
+try:
+ import testing_commands as _testing_commands
+ # we are in the build environment, otherwise the above import fails
+ COMMAND_CLASS = {
+ # Run preprocess from the repository *before* doing any packaging!
+ 'preprocess': _testing_commands.Preprocess,
+ }
+except ImportError:
+ COMMAND_CLASS = {
+ # wire up commands to no-op not to break the external dependencies
+ 'preprocess': _NoOpCommand,
+ }
+
setuptools.setup(
name='grpcio-testing',
version=grpc_version.VERSION,
@@ -43,4 +73,5 @@ setuptools.setup(
url='https://grpc.io',
package_dir=PACKAGE_DIRECTORIES,
packages=setuptools.find_packages('.'),
- install_requires=INSTALL_REQUIRES)
+ install_requires=INSTALL_REQUIRES,
+ cmdclass=COMMAND_CLASS)
diff --git a/src/python/grpcio_testing/testing_commands.py b/src/python/grpcio_testing/testing_commands.py
new file mode 100644
index 0000000000..fb40d37efb
--- /dev/null
+++ b/src/python/grpcio_testing/testing_commands.py
@@ -0,0 +1,39 @@
+# Copyright 2018 gRPC Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Provides distutils command classes for the GRPC Python setup process."""
+
+import os
+import shutil
+
+import setuptools
+
+ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
+LICENSE = os.path.join(ROOT_DIR, '../../../LICENSE')
+
+
+class Preprocess(setuptools.Command):
+ """Command to copy LICENSE from root directory."""
+
+ description = ''
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ if os.path.isfile(LICENSE):
+ shutil.copyfile(LICENSE, os.path.join(ROOT_DIR, 'LICENSE'))
diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py
index 6931d93ef0..65e9a99950 100644
--- a/src/python/grpcio_tests/commands.py
+++ b/src/python/grpcio_tests/commands.py
@@ -130,6 +130,13 @@ class TestGevent(setuptools.Command):
# Beta API is unsupported for gevent
'protoc_plugin.beta_python_plugin_test',
'unit.beta._beta_features_test',
+ # TODO(https://github.com/grpc/grpc/issues/15411) unpin gevent version
+ # This test will stuck while running higher version of gevent
+ 'unit._auth_context_test.AuthContextTest.testSessionResumption',
+ # TODO(https://github.com/grpc/grpc/issues/17330) enable these three tests
+ 'channelz._channelz_servicer_test.ChannelzServicerTest.test_many_subchannels',
+ 'channelz._channelz_servicer_test.ChannelzServicerTest.test_many_subchannels_and_sockets',
+ 'channelz._channelz_servicer_test.ChannelzServicerTest.test_streaming_rpc'
)
description = 'run tests with gevent. Assumes grpc/gevent are installed'
user_options = []
diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py
index cc9b41587c..2fcd1ad617 100644
--- a/src/python/grpcio_tests/grpc_version.py
+++ b/src/python/grpcio_tests/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
-VERSION = '1.17.0.dev0'
+VERSION = '1.18.0.dev0'
diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py
index 61c98fa038..f56425ac6d 100644
--- a/src/python/grpcio_tests/setup.py
+++ b/src/python/grpcio_tests/setup.py
@@ -39,6 +39,7 @@ PACKAGE_DIRECTORIES = {
INSTALL_REQUIRES = (
'coverage>=4.0', 'enum34>=1.0.4',
'grpcio>={version}'.format(version=grpc_version.VERSION),
+ 'grpcio-channelz>={version}'.format(version=grpc_version.VERSION),
'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
'grpcio-health-checking>={version}'.format(version=grpc_version.VERSION),
'oauth2client>=1.4.7', 'protobuf>=3.6.0', 'six>=1.10', 'google-auth>=1.0.0',
diff --git a/src/python/grpcio_tests/tests/_sanity/_sanity_test.py b/src/python/grpcio_tests/tests/_sanity/_sanity_test.py
index b4079850ff..7da6e7b34c 100644
--- a/src/python/grpcio_tests/tests/_sanity/_sanity_test.py
+++ b/src/python/grpcio_tests/tests/_sanity/_sanity_test.py
@@ -13,9 +13,9 @@
# limitations under the License.
import json
+import pkgutil
import unittest
-import pkg_resources
import six
import tests
@@ -35,7 +35,7 @@ class SanityTest(unittest.TestCase):
loader.suite)
})
- tests_json_string = pkg_resources.resource_string('tests', 'tests.json')
+ tests_json_string = pkgutil.get_data('tests', 'tests.json')
tests_json = json.loads(tests_json_string.decode()
if six.PY3 else tests_json_string)
diff --git a/src/python/grpcio_tests/tests/channelz/BUILD.bazel b/src/python/grpcio_tests/tests/channelz/BUILD.bazel
new file mode 100644
index 0000000000..63513616e7
--- /dev/null
+++ b/src/python/grpcio_tests/tests/channelz/BUILD.bazel
@@ -0,0 +1,15 @@
+package(default_visibility = ["//visibility:public"])
+
+py_test(
+ name = "channelz_servicer_test",
+ srcs = ["_channelz_servicer_test.py"],
+ main = "_channelz_servicer_test.py",
+ size = "small",
+ deps = [
+ "//src/python/grpcio/grpc:grpcio",
+ "//src/python/grpcio_channelz/grpc_channelz/v1:grpc_channelz",
+ "//src/python/grpcio_tests/tests/unit:test_common",
+ "//src/python/grpcio_tests/tests/unit/framework/common:common",
+ ],
+ imports = ["../../",],
+)
diff --git a/src/python/grpcio_tests/tests/channelz/__init__.py b/src/python/grpcio_tests/tests/channelz/__init__.py
new file mode 100644
index 0000000000..38fdfc9c5c
--- /dev/null
+++ b/src/python/grpcio_tests/tests/channelz/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py b/src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
new file mode 100644
index 0000000000..8ca5189522
--- /dev/null
+++ b/src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
@@ -0,0 +1,470 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests of grpc_channelz.v1.channelz."""
+
+import unittest
+
+from concurrent import futures
+
+import grpc
+from grpc_channelz.v1 import channelz
+from grpc_channelz.v1 import channelz_pb2
+from grpc_channelz.v1 import channelz_pb2_grpc
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_SUCCESSFUL_UNARY_UNARY = '/test/SuccessfulUnaryUnary'
+_FAILED_UNARY_UNARY = '/test/FailedUnaryUnary'
+_SUCCESSFUL_STREAM_STREAM = '/test/SuccessfulStreamStream'
+
+_REQUEST = b'\x00\x00\x00'
+_RESPONSE = b'\x01\x01\x01'
+
+_DISABLE_REUSE_PORT = (('grpc.so_reuseport', 0),)
+_ENABLE_CHANNELZ = (('grpc.enable_channelz', 1),)
+_DISABLE_CHANNELZ = (('grpc.enable_channelz', 0),)
+
+
+def _successful_unary_unary(request, servicer_context):
+ return _RESPONSE
+
+
+def _failed_unary_unary(request, servicer_context):
+ servicer_context.set_code(grpc.StatusCode.INTERNAL)
+ servicer_context.set_details("Channelz Test Intended Failure")
+
+
+def _successful_stream_stream(request_iterator, servicer_context):
+ for _ in request_iterator:
+ yield _RESPONSE
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _SUCCESSFUL_UNARY_UNARY:
+ return grpc.unary_unary_rpc_method_handler(_successful_unary_unary)
+ elif handler_call_details.method == _FAILED_UNARY_UNARY:
+ return grpc.unary_unary_rpc_method_handler(_failed_unary_unary)
+ elif handler_call_details.method == _SUCCESSFUL_STREAM_STREAM:
+ return grpc.stream_stream_rpc_method_handler(
+ _successful_stream_stream)
+ else:
+ return None
+
+
+class _ChannelServerPair(object):
+
+ def __init__(self):
+ # Server will enable channelz service
+ self.server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=3),
+ options=_DISABLE_REUSE_PORT + _ENABLE_CHANNELZ)
+ port = self.server.add_insecure_port('[::]:0')
+ self.server.add_generic_rpc_handlers((_GenericHandler(),))
+ self.server.start()
+
+ # Channel will enable channelz service...
+ self.channel = grpc.insecure_channel('localhost:%d' % port,
+ _ENABLE_CHANNELZ)
+
+
+def _generate_channel_server_pairs(n):
+ return [_ChannelServerPair() for i in range(n)]
+
+
+def _close_channel_server_pairs(pairs):
+ for pair in pairs:
+ pair.server.stop(None)
+ # TODO(ericgribkoff) This del should not be required
+ del pair.server
+ pair.channel.close()
+
+
+class ChannelzServicerTest(unittest.TestCase):
+
+ def _send_successful_unary_unary(self, idx):
+ _, r = self._pairs[idx].channel.unary_unary(
+ _SUCCESSFUL_UNARY_UNARY).with_call(_REQUEST)
+ self.assertEqual(r.code(), grpc.StatusCode.OK)
+
+ def _send_failed_unary_unary(self, idx):
+ try:
+ self._pairs[idx].channel.unary_unary(_FAILED_UNARY_UNARY).with_call(
+ _REQUEST)
+ except grpc.RpcError:
+ return
+ else:
+ self.fail("This call supposed to fail")
+
+ def _send_successful_stream_stream(self, idx):
+ response_iterator = self._pairs[idx].channel.stream_stream(
+ _SUCCESSFUL_STREAM_STREAM).__call__(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH))
+ cnt = 0
+ for _ in response_iterator:
+ cnt += 1
+ self.assertEqual(cnt, test_constants.STREAM_LENGTH)
+
+ def _get_channel_id(self, idx):
+ """Channel id may not be consecutive"""
+ resp = self._channelz_stub.GetTopChannels(
+ channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
+ self.assertGreater(len(resp.channel), idx)
+ return resp.channel[idx].ref.channel_id
+
+ def setUp(self):
+ self._pairs = []
+ # This server is for Channelz info fetching only
+ # It self should not enable Channelz
+ self._server = grpc.server(
+ futures.ThreadPoolExecutor(max_workers=3),
+ options=_DISABLE_REUSE_PORT + _DISABLE_CHANNELZ)
+ port = self._server.add_insecure_port('[::]:0')
+ channelz.add_channelz_servicer(self._server)
+ self._server.start()
+
+ # This channel is used to fetch Channelz info only
+ # Channelz should not be enabled
+ self._channel = grpc.insecure_channel('localhost:%d' % port,
+ _DISABLE_CHANNELZ)
+ self._channelz_stub = channelz_pb2_grpc.ChannelzStub(self._channel)
+
+ def tearDown(self):
+ self._server.stop(None)
+ self._channel.close()
+ _close_channel_server_pairs(self._pairs)
+
+ def test_get_top_channels_basic(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ resp = self._channelz_stub.GetTopChannels(
+ channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
+ self.assertEqual(len(resp.channel), 1)
+ self.assertEqual(resp.end, True)
+
+ def test_get_top_channels_high_start_id(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ resp = self._channelz_stub.GetTopChannels(
+ channelz_pb2.GetTopChannelsRequest(start_channel_id=10000))
+ self.assertEqual(len(resp.channel), 0)
+ self.assertEqual(resp.end, True)
+
+ def test_successful_request(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ self._send_successful_unary_unary(0)
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
+ self.assertEqual(resp.channel.data.calls_started, 1)
+ self.assertEqual(resp.channel.data.calls_succeeded, 1)
+ self.assertEqual(resp.channel.data.calls_failed, 0)
+
+ def test_failed_request(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ self._send_failed_unary_unary(0)
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
+ self.assertEqual(resp.channel.data.calls_started, 1)
+ self.assertEqual(resp.channel.data.calls_succeeded, 0)
+ self.assertEqual(resp.channel.data.calls_failed, 1)
+
+ def test_many_requests(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ k_success = 7
+ k_failed = 9
+ for i in range(k_success):
+ self._send_successful_unary_unary(0)
+ for i in range(k_failed):
+ self._send_failed_unary_unary(0)
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
+ self.assertEqual(resp.channel.data.calls_started, k_success + k_failed)
+ self.assertEqual(resp.channel.data.calls_succeeded, k_success)
+ self.assertEqual(resp.channel.data.calls_failed, k_failed)
+
+ def test_many_channel(self):
+ k_channels = 4
+ self._pairs = _generate_channel_server_pairs(k_channels)
+ resp = self._channelz_stub.GetTopChannels(
+ channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
+ self.assertEqual(len(resp.channel), k_channels)
+
+ def test_many_requests_many_channel(self):
+ k_channels = 4
+ self._pairs = _generate_channel_server_pairs(k_channels)
+ k_success = 11
+ k_failed = 13
+ for i in range(k_success):
+ self._send_successful_unary_unary(0)
+ self._send_successful_unary_unary(2)
+ for i in range(k_failed):
+ self._send_failed_unary_unary(1)
+ self._send_failed_unary_unary(2)
+
+ # The first channel saw only successes
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
+ self.assertEqual(resp.channel.data.calls_started, k_success)
+ self.assertEqual(resp.channel.data.calls_succeeded, k_success)
+ self.assertEqual(resp.channel.data.calls_failed, 0)
+
+ # The second channel saw only failures
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(1)))
+ self.assertEqual(resp.channel.data.calls_started, k_failed)
+ self.assertEqual(resp.channel.data.calls_succeeded, 0)
+ self.assertEqual(resp.channel.data.calls_failed, k_failed)
+
+ # The third channel saw both successes and failures
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(2)))
+ self.assertEqual(resp.channel.data.calls_started, k_success + k_failed)
+ self.assertEqual(resp.channel.data.calls_succeeded, k_success)
+ self.assertEqual(resp.channel.data.calls_failed, k_failed)
+
+ # The fourth channel saw nothing
+ resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(3)))
+ self.assertEqual(resp.channel.data.calls_started, 0)
+ self.assertEqual(resp.channel.data.calls_succeeded, 0)
+ self.assertEqual(resp.channel.data.calls_failed, 0)
+
+ def test_many_subchannels(self):
+ k_channels = 4
+ self._pairs = _generate_channel_server_pairs(k_channels)
+ k_success = 17
+ k_failed = 19
+ for i in range(k_success):
+ self._send_successful_unary_unary(0)
+ self._send_successful_unary_unary(2)
+ for i in range(k_failed):
+ self._send_failed_unary_unary(1)
+ self._send_failed_unary_unary(2)
+
+ gtc_resp = self._channelz_stub.GetTopChannels(
+ channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
+ self.assertEqual(len(gtc_resp.channel), k_channels)
+ for i in range(k_channels):
+ # If no call performed in the channel, there shouldn't be any subchannel
+ if gtc_resp.channel[i].data.calls_started == 0:
+ self.assertEqual(len(gtc_resp.channel[i].subchannel_ref), 0)
+ continue
+
+ # Otherwise, the subchannel should exist
+ self.assertGreater(len(gtc_resp.channel[i].subchannel_ref), 0)
+ gsc_resp = self._channelz_stub.GetSubchannel(
+ channelz_pb2.GetSubchannelRequest(
+ subchannel_id=gtc_resp.channel[i].subchannel_ref[
+ 0].subchannel_id))
+ self.assertEqual(gtc_resp.channel[i].data.calls_started,
+ gsc_resp.subchannel.data.calls_started)
+ self.assertEqual(gtc_resp.channel[i].data.calls_succeeded,
+ gsc_resp.subchannel.data.calls_succeeded)
+ self.assertEqual(gtc_resp.channel[i].data.calls_failed,
+ gsc_resp.subchannel.data.calls_failed)
+
+ def test_server_basic(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ resp = self._channelz_stub.GetServers(
+ channelz_pb2.GetServersRequest(start_server_id=0))
+ self.assertEqual(len(resp.server), 1)
+
+ def test_get_one_server(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ gss_resp = self._channelz_stub.GetServers(
+ channelz_pb2.GetServersRequest(start_server_id=0))
+ self.assertEqual(len(gss_resp.server), 1)
+ gs_resp = self._channelz_stub.GetServer(
+ channelz_pb2.GetServerRequest(
+ server_id=gss_resp.server[0].ref.server_id))
+ self.assertEqual(gss_resp.server[0].ref.server_id,
+ gs_resp.server.ref.server_id)
+
+ def test_server_call(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ k_success = 23
+ k_failed = 29
+ for i in range(k_success):
+ self._send_successful_unary_unary(0)
+ for i in range(k_failed):
+ self._send_failed_unary_unary(0)
+
+ resp = self._channelz_stub.GetServers(
+ channelz_pb2.GetServersRequest(start_server_id=0))
+ self.assertEqual(len(resp.server), 1)
+ self.assertEqual(resp.server[0].data.calls_started,
+ k_success + k_failed)
+ self.assertEqual(resp.server[0].data.calls_succeeded, k_success)
+ self.assertEqual(resp.server[0].data.calls_failed, k_failed)
+
+ def test_many_subchannels_and_sockets(self):
+ k_channels = 4
+ self._pairs = _generate_channel_server_pairs(k_channels)
+ k_success = 3
+ k_failed = 5
+ for i in range(k_success):
+ self._send_successful_unary_unary(0)
+ self._send_successful_unary_unary(2)
+ for i in range(k_failed):
+ self._send_failed_unary_unary(1)
+ self._send_failed_unary_unary(2)
+
+ gtc_resp = self._channelz_stub.GetTopChannels(
+ channelz_pb2.GetTopChannelsRequest(start_channel_id=0))
+ self.assertEqual(len(gtc_resp.channel), k_channels)
+ for i in range(k_channels):
+ # If no call performed in the channel, there shouldn't be any subchannel
+ if gtc_resp.channel[i].data.calls_started == 0:
+ self.assertEqual(len(gtc_resp.channel[i].subchannel_ref), 0)
+ continue
+
+ # Otherwise, the subchannel should exist
+ self.assertGreater(len(gtc_resp.channel[i].subchannel_ref), 0)
+ gsc_resp = self._channelz_stub.GetSubchannel(
+ channelz_pb2.GetSubchannelRequest(
+ subchannel_id=gtc_resp.channel[i].subchannel_ref[
+ 0].subchannel_id))
+ self.assertEqual(len(gsc_resp.subchannel.socket_ref), 1)
+
+ gs_resp = self._channelz_stub.GetSocket(
+ channelz_pb2.GetSocketRequest(
+ socket_id=gsc_resp.subchannel.socket_ref[0].socket_id))
+ self.assertEqual(gsc_resp.subchannel.data.calls_started,
+ gs_resp.socket.data.streams_started)
+ self.assertEqual(gsc_resp.subchannel.data.calls_started,
+ gs_resp.socket.data.streams_succeeded)
+ # Calls started == messages sent, only valid for unary calls
+ self.assertEqual(gsc_resp.subchannel.data.calls_started,
+ gs_resp.socket.data.messages_sent)
+ # Only receive responses when the RPC was successful
+ self.assertEqual(gsc_resp.subchannel.data.calls_succeeded,
+ gs_resp.socket.data.messages_received)
+
+ def test_streaming_rpc(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ # In C++, the argument for _send_successful_stream_stream is message length.
+ # Here the argument is still channel idx, to be consistent with the other two.
+ self._send_successful_stream_stream(0)
+
+ gc_resp = self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=self._get_channel_id(0)))
+ self.assertEqual(gc_resp.channel.data.calls_started, 1)
+ self.assertEqual(gc_resp.channel.data.calls_succeeded, 1)
+ self.assertEqual(gc_resp.channel.data.calls_failed, 0)
+ # Subchannel exists
+ self.assertGreater(len(gc_resp.channel.subchannel_ref), 0)
+
+ gsc_resp = self._channelz_stub.GetSubchannel(
+ channelz_pb2.GetSubchannelRequest(
+ subchannel_id=gc_resp.channel.subchannel_ref[0].subchannel_id))
+ self.assertEqual(gsc_resp.subchannel.data.calls_started, 1)
+ self.assertEqual(gsc_resp.subchannel.data.calls_succeeded, 1)
+ self.assertEqual(gsc_resp.subchannel.data.calls_failed, 0)
+ # Socket exists
+ self.assertEqual(len(gsc_resp.subchannel.socket_ref), 1)
+
+ gs_resp = self._channelz_stub.GetSocket(
+ channelz_pb2.GetSocketRequest(
+ socket_id=gsc_resp.subchannel.socket_ref[0].socket_id))
+ self.assertEqual(gs_resp.socket.data.streams_started, 1)
+ self.assertEqual(gs_resp.socket.data.streams_succeeded, 1)
+ self.assertEqual(gs_resp.socket.data.streams_failed, 0)
+ self.assertEqual(gs_resp.socket.data.messages_sent,
+ test_constants.STREAM_LENGTH)
+ self.assertEqual(gs_resp.socket.data.messages_received,
+ test_constants.STREAM_LENGTH)
+
+ def test_server_sockets(self):
+ self._pairs = _generate_channel_server_pairs(1)
+ self._send_successful_unary_unary(0)
+ self._send_failed_unary_unary(0)
+
+ gs_resp = self._channelz_stub.GetServers(
+ channelz_pb2.GetServersRequest(start_server_id=0))
+ self.assertEqual(len(gs_resp.server), 1)
+ self.assertEqual(gs_resp.server[0].data.calls_started, 2)
+ self.assertEqual(gs_resp.server[0].data.calls_succeeded, 1)
+ self.assertEqual(gs_resp.server[0].data.calls_failed, 1)
+
+ gss_resp = self._channelz_stub.GetServerSockets(
+ channelz_pb2.GetServerSocketsRequest(
+ server_id=gs_resp.server[0].ref.server_id, start_socket_id=0))
+ # If the RPC call failed, it will raise a grpc.RpcError
+ # So, if there is no exception raised, considered pass
+
+ def test_server_listen_sockets(self):
+ self._pairs = _generate_channel_server_pairs(1)
+
+ gss_resp = self._channelz_stub.GetServers(
+ channelz_pb2.GetServersRequest(start_server_id=0))
+ self.assertEqual(len(gss_resp.server), 1)
+ self.assertEqual(len(gss_resp.server[0].listen_socket), 1)
+
+ gs_resp = self._channelz_stub.GetSocket(
+ channelz_pb2.GetSocketRequest(
+ socket_id=gss_resp.server[0].listen_socket[0].socket_id))
+ # If the RPC call failed, it will raise a grpc.RpcError
+ # So, if there is no exception raised, considered pass
+
+ def test_invalid_query_get_server(self):
+ try:
+ self._channelz_stub.GetServer(
+ channelz_pb2.GetServerRequest(server_id=10000))
+ except BaseException as e:
+ self.assertIn('StatusCode.NOT_FOUND', str(e))
+ else:
+ self.fail('Invalid query not detected')
+
+ def test_invalid_query_get_channel(self):
+ try:
+ self._channelz_stub.GetChannel(
+ channelz_pb2.GetChannelRequest(channel_id=10000))
+ except BaseException as e:
+ self.assertIn('StatusCode.NOT_FOUND', str(e))
+ else:
+ self.fail('Invalid query not detected')
+
+ def test_invalid_query_get_subchannel(self):
+ try:
+ self._channelz_stub.GetSubchannel(
+ channelz_pb2.GetSubchannelRequest(subchannel_id=10000))
+ except BaseException as e:
+ self.assertIn('StatusCode.NOT_FOUND', str(e))
+ else:
+ self.fail('Invalid query not detected')
+
+ def test_invalid_query_get_socket(self):
+ try:
+ self._channelz_stub.GetSocket(
+ channelz_pb2.GetSocketRequest(socket_id=10000))
+ except BaseException as e:
+ self.assertIn('StatusCode.NOT_FOUND', str(e))
+ else:
+ self.fail('Invalid query not detected')
+
+ def test_invalid_query_get_server_sockets(self):
+ try:
+ self._channelz_stub.GetServerSockets(
+ channelz_pb2.GetServerSocketsRequest(
+ server_id=10000,
+ start_socket_id=0,
+ ))
+ except BaseException as e:
+ self.assertIn('StatusCode.NOT_FOUND', str(e))
+ else:
+ self.fail('Invalid query not detected')
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/interop/BUILD.bazel b/src/python/grpcio_tests/tests/interop/BUILD.bazel
new file mode 100644
index 0000000000..aebdbf67eb
--- /dev/null
+++ b/src/python/grpcio_tests/tests/interop/BUILD.bazel
@@ -0,0 +1,101 @@
+load("@grpc_python_dependencies//:requirements.bzl", "requirement")
+
+package(default_visibility = ["//visibility:public"])
+
+py_library(
+ name = "_intraop_test_case",
+ srcs = ["_intraop_test_case.py"],
+ deps = [
+ ":methods",
+ ],
+ imports=["../../",],
+)
+
+py_library(
+ name = "client",
+ srcs = ["client.py"],
+ deps = [
+ "//src/python/grpcio/grpc:grpcio",
+ ":methods",
+ ":resources",
+ "//src/proto/grpc/testing:py_test_proto",
+ requirement('google-auth'),
+ ],
+ imports=["../../",],
+)
+
+py_library(
+ name = "methods",
+ srcs = ["methods.py"],
+ deps = [
+ "//src/python/grpcio/grpc:grpcio",
+ "//src/proto/grpc/testing:py_empty_proto",
+ "//src/proto/grpc/testing:py_messages_proto",
+ "//src/proto/grpc/testing:py_test_proto",
+ requirement('google-auth'),
+ requirement('requests'),
+ requirement('enum34'),
+ requirement('urllib3'),
+ requirement('chardet'),
+ requirement('certifi'),
+ requirement('idna'),
+ ],
+ imports=["../../",],
+)
+
+py_library(
+ name = "resources",
+ srcs = ["resources.py"],
+ data = [
+ "//src/python/grpcio_tests/tests/interop/credentials",
+ ],
+)
+
+py_library(
+ name = "server",
+ srcs = ["server.py"],
+ deps = [
+ "//src/python/grpcio/grpc:grpcio",
+ ":methods",
+ ":resources",
+ "//src/python/grpcio_tests/tests/unit:test_common",
+ "//src/proto/grpc/testing:py_test_proto",
+ ],
+ imports=["../../",],
+)
+
+py_test(
+ name="_insecure_intraop_test",
+ size="small",
+ srcs=["_insecure_intraop_test.py",],
+ main="_insecure_intraop_test.py",
+ deps=[
+ "//src/python/grpcio/grpc:grpcio",
+ ":_intraop_test_case",
+ ":methods",
+ ":server",
+ "//src/python/grpcio_tests/tests/unit:test_common",
+ "//src/proto/grpc/testing:py_test_proto",
+ ],
+ imports=["../../",],
+ data=[
+ "//src/python/grpcio_tests/tests/unit/credentials",
+ ],
+)
+
+py_test(
+ name="_secure_intraop_test",
+ size="small",
+ srcs=["_secure_intraop_test.py",],
+ main="_secure_intraop_test.py",
+ deps=[
+ "//src/python/grpcio/grpc:grpcio",
+ ":_intraop_test_case",
+ ":methods",
+ ":server",
+ "//src/python/grpcio_tests/tests/unit:test_common",
+ "//src/proto/grpc/testing:py_test_proto",
+ ],
+ imports=["../../",],
+)
+
diff --git a/src/python/grpcio_tests/tests/interop/credentials/BUILD.bazel b/src/python/grpcio_tests/tests/interop/credentials/BUILD.bazel
new file mode 100644
index 0000000000..bc2b997292
--- /dev/null
+++ b/src/python/grpcio_tests/tests/interop/credentials/BUILD.bazel
@@ -0,0 +1,9 @@
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+ name="credentials",
+ srcs=glob([
+ "**",
+ ]),
+)
+
diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
index cda15a68a3..c11f6c8fad 100644
--- a/src/python/grpcio_tests/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -23,7 +23,6 @@ from google.auth import environment_vars as google_auth_environment_vars
from google.auth.transport import grpc as google_auth_transport_grpc
from google.auth.transport import requests as google_auth_transport_requests
import grpc
-from grpc.beta import implementations
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
@@ -377,7 +376,7 @@ def _unimplemented_service(unimplemented_service_stub):
def _custom_metadata(stub):
initial_metadata_value = "test_initial_metadata_value"
- trailing_metadata_value = "\x0a\x0b\x0a\x0b\x0a\x0b"
+ trailing_metadata_value = b"\x0a\x0b\x0a\x0b\x0a\x0b"
metadata = ((_INITIAL_METADATA_KEY, initial_metadata_value),
(_TRAILING_METADATA_KEY, trailing_metadata_value))
@@ -391,7 +390,7 @@ def _custom_metadata(stub):
if trailing_metadata[_TRAILING_METADATA_KEY] != trailing_metadata_value:
raise ValueError('expected trailing metadata %s, got %s' %
(trailing_metadata_value,
- initial_metadata[_TRAILING_METADATA_KEY]))
+ trailing_metadata[_TRAILING_METADATA_KEY]))
# Testing with UnaryCall
request = messages_pb2.SimpleRequest(
@@ -422,7 +421,7 @@ def _compute_engine_creds(stub, args):
def _oauth2_auth_token(stub, args):
json_key_filename = os.environ[google_auth_environment_vars.CREDENTIALS]
- wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ wanted_email = json.load(open(json_key_filename, 'r'))['client_email']
response = _large_unary_common_behavior(stub, True, True, None)
if wanted_email != response.username:
raise ValueError('expected username %s, got %s' % (wanted_email,
@@ -435,7 +434,7 @@ def _oauth2_auth_token(stub, args):
def _jwt_token_creds(stub, args):
json_key_filename = os.environ[google_auth_environment_vars.CREDENTIALS]
- wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ wanted_email = json.load(open(json_key_filename, 'r'))['client_email']
response = _large_unary_common_behavior(stub, True, False, None)
if wanted_email != response.username:
raise ValueError('expected username %s, got %s' % (wanted_email,
@@ -444,7 +443,7 @@ def _jwt_token_creds(stub, args):
def _per_rpc_creds(stub, args):
json_key_filename = os.environ[google_auth_environment_vars.CREDENTIALS]
- wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ wanted_email = json.load(open(json_key_filename, 'r'))['client_email']
google_credentials, unused_project_id = google_auth.default(
scopes=[args.oauth_scope])
call_credentials = grpc.metadata_call_credentials(
@@ -457,6 +456,22 @@ def _per_rpc_creds(stub, args):
response.username))
+def _special_status_message(stub, args):
+ details = b'\t\ntest with whitespace\r\nand Unicode BMP \xe2\x98\xba and non-BMP \xf0\x9f\x98\x88\t\n'.decode(
+ 'utf-8')
+ code = 2
+ status = grpc.StatusCode.UNKNOWN # code = 2
+
+ # Test with a UnaryCall
+ request = messages_pb2.SimpleRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_size=1,
+ payload=messages_pb2.Payload(body=b'\x00'),
+ response_status=messages_pb2.EchoStatus(code=code, message=details))
+ response_future = stub.UnaryCall.future(request)
+ _validate_status_code_and_details(response_future, status, details)
+
+
@enum.unique
class TestCase(enum.Enum):
EMPTY_UNARY = 'empty_unary'
@@ -476,6 +491,7 @@ class TestCase(enum.Enum):
JWT_TOKEN_CREDS = 'jwt_token_creds'
PER_RPC_CREDS = 'per_rpc_creds'
TIMEOUT_ON_SLEEPING_SERVER = 'timeout_on_sleeping_server'
+ SPECIAL_STATUS_MESSAGE = 'special_status_message'
def test_interoperability(self, stub, args):
if self is TestCase.EMPTY_UNARY:
@@ -512,6 +528,8 @@ class TestCase(enum.Enum):
_jwt_token_creds(stub, args)
elif self is TestCase.PER_RPC_CREDS:
_per_rpc_creds(stub, args)
+ elif self is TestCase.SPECIAL_STATUS_MESSAGE:
+ _special_status_message(stub, args)
else:
raise NotImplementedError(
'Test case "%s" not implemented!' % self.name)
diff --git a/src/python/grpcio_tests/tests/interop/resources.py b/src/python/grpcio_tests/tests/interop/resources.py
index 2f76cf5db6..a55919a60a 100644
--- a/src/python/grpcio_tests/tests/interop/resources.py
+++ b/src/python/grpcio_tests/tests/interop/resources.py
@@ -14,27 +14,24 @@
"""Constants and functions for data used in interoperability testing."""
import argparse
+import pkgutil
import os
-import pkg_resources
-
_ROOT_CERTIFICATES_RESOURCE_PATH = 'credentials/ca.pem'
_PRIVATE_KEY_RESOURCE_PATH = 'credentials/server1.key'
_CERTIFICATE_CHAIN_RESOURCE_PATH = 'credentials/server1.pem'
def test_root_certificates():
- return pkg_resources.resource_string(__name__,
- _ROOT_CERTIFICATES_RESOURCE_PATH)
+ return pkgutil.get_data(__name__, _ROOT_CERTIFICATES_RESOURCE_PATH)
def private_key():
- return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
+ return pkgutil.get_data(__name__, _PRIVATE_KEY_RESOURCE_PATH)
def certificate_chain():
- return pkg_resources.resource_string(__name__,
- _CERTIFICATE_CHAIN_RESOURCE_PATH)
+ return pkgutil.get_data(__name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
def parse_bool(value):
diff --git a/src/python/grpcio_tests/tests/interop/server.py b/src/python/grpcio_tests/tests/interop/server.py
index 768cdaf468..72f88a1c98 100644
--- a/src/python/grpcio_tests/tests/interop/server.py
+++ b/src/python/grpcio_tests/tests/interop/server.py
@@ -25,8 +25,8 @@ from tests.interop import methods
from tests.interop import resources
from tests.unit import test_common
-logging.basicConfig()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py
index 740bdcf1eb..337a94b546 100644
--- a/src/python/grpcio_tests/tests/qps/worker_server.py
+++ b/src/python/grpcio_tests/tests/qps/worker_server.py
@@ -39,7 +39,7 @@ class WorkerServer(worker_service_pb2_grpc.WorkerServiceServicer):
self._quit_event = threading.Event()
def RunServer(self, request_iterator, context):
- config = next(request_iterator).setup
+ config = next(request_iterator).setup #pylint: disable=stop-iteration-return
server, port = self._create_server(config)
cores = multiprocessing.cpu_count()
server.start()
@@ -102,7 +102,7 @@ class WorkerServer(worker_service_pb2_grpc.WorkerServiceServicer):
return (server, port)
def RunClient(self, request_iterator, context):
- config = next(request_iterator).setup
+ config = next(request_iterator).setup #pylint: disable=stop-iteration-return
client_runners = []
qps_data = histogram.Histogram(config.histogram_params.resolution,
config.histogram_params.max_possible)
diff --git a/src/python/grpcio_tests/tests/reflection/BUILD.bazel b/src/python/grpcio_tests/tests/reflection/BUILD.bazel
new file mode 100644
index 0000000000..c0efb0b7ce
--- /dev/null
+++ b/src/python/grpcio_tests/tests/reflection/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@grpc_python_dependencies//:requirements.bzl", "requirement")
+
+package(default_visibility = ["//visibility:public"])
+
+py_test(
+ name="_reflection_servicer_test",
+ size="small",
+ timeout="moderate",
+ srcs=["_reflection_servicer_test.py",],
+ main="_reflection_servicer_test.py",
+ deps=[
+ "//src/python/grpcio/grpc:grpcio",
+ "//src/python/grpcio_reflection/grpc_reflection/v1alpha:grpc_reflection",
+ "//src/python/grpcio_tests/tests/unit:test_common",
+ "//src/proto/grpc/testing:py_empty_proto",
+ "//src/proto/grpc/testing/proto2:empty2_extensions_proto",
+ requirement('protobuf'),
+ ],
+ imports=["../../",],
+)
+
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index 76d5d22d57..9cffd3df19 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -1,5 +1,6 @@
[
"_sanity._sanity_test.SanityTest",
+ "channelz._channelz_servicer_test.ChannelzServicerTest",
"health_check._health_servicer_test.HealthServicerTest",
"interop._insecure_intraop_test.InsecureIntraopTest",
"interop._secure_intraop_test.SecureIntraopTest",
@@ -42,11 +43,14 @@
"unit._cython.cygrpc_test.SecureServerSecureClient",
"unit._cython.cygrpc_test.TypeSmokeTest",
"unit._empty_message_test.EmptyMessageTest",
+ "unit._error_message_encoding_test.ErrorMessageEncodingTest",
"unit._exit_test.ExitTest",
"unit._interceptor_test.InterceptorTest",
"unit._invalid_metadata_test.InvalidMetadataTest",
"unit._invocation_defects_test.InvocationDefectsTest",
+ "unit._logging_test.LoggingTest",
"unit._metadata_code_details_test.MetadataCodeDetailsTest",
+ "unit._metadata_flags_test.MetadataFlagsTest",
"unit._metadata_test.MetadataTest",
"unit._reconnect_test.ReconnectTest",
"unit._resource_exhausted_test.ResourceExhaustedTest",
diff --git a/src/python/grpcio_tests/tests/unit/BUILD.bazel b/src/python/grpcio_tests/tests/unit/BUILD.bazel
index dcd6d9fbb2..de33b81e32 100644
--- a/src/python/grpcio_tests/tests/unit/BUILD.bazel
+++ b/src/python/grpcio_tests/tests/unit/BUILD.bazel
@@ -17,6 +17,7 @@ GRPCIO_TESTS_UNIT = [
"_interceptor_test.py",
"_invalid_metadata_test.py",
"_invocation_defects_test.py",
+ "_logging_test.py",
"_metadata_code_details_test.py",
"_metadata_test.py",
# TODO: Issue 16336
diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py
index f6245be77d..38072861a4 100644
--- a/src/python/grpcio_tests/tests/unit/_api_test.py
+++ b/src/python/grpcio_tests/tests/unit/_api_test.py
@@ -14,6 +14,7 @@
"""Test of gRPC Python's application-layer API."""
import unittest
+import logging
import six
@@ -102,4 +103,5 @@ class ChannelTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_auth_context_test.py b/src/python/grpcio_tests/tests/unit/_auth_context_test.py
index d174051070..b1b5bbdcab 100644
--- a/src/python/grpcio_tests/tests/unit/_auth_context_test.py
+++ b/src/python/grpcio_tests/tests/unit/_auth_context_test.py
@@ -15,6 +15,7 @@
import pickle
import unittest
+import logging
import grpc
from grpc import _channel
@@ -187,4 +188,5 @@ class AuthContextTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_auth_test.py b/src/python/grpcio_tests/tests/unit/_auth_test.py
index e2cb938936..d9df2add4f 100644
--- a/src/python/grpcio_tests/tests/unit/_auth_test.py
+++ b/src/python/grpcio_tests/tests/unit/_auth_test.py
@@ -16,6 +16,7 @@
import collections
import threading
import unittest
+import logging
from grpc import _auth
@@ -77,4 +78,5 @@ class AccessTokenAuthMetadataPluginTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_args_test.py b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
index dd1d2969a2..228c0e4c16 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_args_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
@@ -15,6 +15,7 @@
from concurrent import futures
import unittest
+import logging
import grpc
@@ -62,4 +63,5 @@ class ChannelArgsTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_close_test.py b/src/python/grpcio_tests/tests/unit/_channel_close_test.py
index af3a9ee1ee..82fa165710 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_close_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_close_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""Tests server and client side compression."""
+import logging
import threading
import time
import unittest
@@ -182,4 +183,5 @@ class ChannelCloseTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
index f9eb0011dc..727fb7d65f 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""Tests of grpc._channel.Channel connectivity."""
+import logging
import threading
import time
import unittest
@@ -142,4 +143,5 @@ class ChannelConnectivityTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
index 30b486079c..345460ef40 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
@@ -15,6 +15,7 @@
import threading
import unittest
+import logging
import grpc
from tests.unit.framework.common import test_constants
@@ -85,4 +86,5 @@ class ChannelReadyFutureTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py
index 0b11f03adf..876d8e827e 100644
--- a/src/python/grpcio_tests/tests/unit/_compression_test.py
+++ b/src/python/grpcio_tests/tests/unit/_compression_test.py
@@ -15,6 +15,7 @@
import unittest
+import logging
import grpc
from grpc import _grpcio_metadata
@@ -117,4 +118,5 @@ class CompressionTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_credentials_test.py b/src/python/grpcio_tests/tests/unit/_credentials_test.py
index f487fe66a2..187a6f0388 100644
--- a/src/python/grpcio_tests/tests/unit/_credentials_test.py
+++ b/src/python/grpcio_tests/tests/unit/_credentials_test.py
@@ -14,6 +14,8 @@
"""Tests of credentials."""
import unittest
+import logging
+import six
import grpc
@@ -52,6 +54,17 @@ class CredentialsTest(unittest.TestCase):
self.assertIsInstance(channel_first_second_and_third,
grpc.ChannelCredentials)
+ @unittest.skipIf(six.PY2, 'only invalid in Python3')
+ def test_invalid_string_certificate(self):
+ self.assertRaises(
+ TypeError,
+ grpc.ssl_channel_credentials,
+ root_certificates='A Certificate',
+ private_key=None,
+ certificate_chain=None,
+ )
+
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_empty_message_test.py b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
index c55ef61c13..3e8393b53c 100644
--- a/src/python/grpcio_tests/tests/unit/_empty_message_test.py
+++ b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
+import logging
import grpc
@@ -118,4 +119,5 @@ class EmptyMessageTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_error_message_encoding_test.py b/src/python/grpcio_tests/tests/unit/_error_message_encoding_test.py
new file mode 100644
index 0000000000..6c551df3ec
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_error_message_encoding_test.py
@@ -0,0 +1,86 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests 'utf-8' encoded error message."""
+
+import unittest
+import weakref
+
+import grpc
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_UNICODE_ERROR_MESSAGES = [
+ b'\xe2\x80\x9d'.decode('utf-8'),
+ b'abc\x80\xd0\xaf'.decode('latin-1'),
+ b'\xc3\xa9'.decode('utf-8'),
+]
+
+_REQUEST = b'\x00\x00\x00'
+_RESPONSE = b'\x00\x00\x00'
+
+_UNARY_UNARY = '/test/UnaryUnary'
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(self, request_streaming=None, response_streaming=None):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+
+ def unary_unary(self, request, servicer_context):
+ servicer_context.set_code(grpc.StatusCode.UNKNOWN)
+ servicer_context.set_details(request.decode('utf-8'))
+ return _RESPONSE
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, test):
+ self._test = test
+
+ def service(self, handler_call_details):
+ return _MethodHandler()
+
+
+class ErrorMessageEncodingTest(unittest.TestCase):
+
+ def setUp(self):
+ self._server = test_common.test_server()
+ self._server.add_generic_rpc_handlers((_GenericHandler(
+ weakref.proxy(self)),))
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
+
+ def tearDown(self):
+ self._server.stop(0)
+
+ def testMessageEncoding(self):
+ for message in _UNICODE_ERROR_MESSAGES:
+ multi_callable = self._channel.unary_unary(_UNARY_UNARY)
+ with self.assertRaises(grpc.RpcError) as cm:
+ multi_callable(message.encode('utf-8'))
+
+ self.assertEqual(cm.exception.code(), grpc.StatusCode.UNKNOWN)
+ self.assertEqual(cm.exception.details(), message)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
index 0a0239a63d..d1263c2c56 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
@@ -16,6 +16,7 @@
import argparse
import threading
import time
+import logging
import grpc
@@ -86,7 +87,7 @@ def hang_stream_stream(request_iterator, servicer_context):
def hang_partial_stream_stream(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
- yield next(request_iterator)
+ yield next(request_iterator) #pylint: disable=stop-iteration-return
time.sleep(WAIT_TIME)
@@ -161,6 +162,7 @@ def infinite_request_iterator():
if __name__ == '__main__':
+ logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('scenario', type=str)
parser.add_argument(
diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py
index f40f3ae07c..5226537579 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_test.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_test.py
@@ -26,6 +26,7 @@ import sys
import threading
import time
import unittest
+import logging
from tests.unit import _exit_scenarios
@@ -187,4 +188,5 @@ class ExitTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_interceptor_test.py b/src/python/grpcio_tests/tests/unit/_interceptor_test.py
index 3d547b71cd..99db0ac58b 100644
--- a/src/python/grpcio_tests/tests/unit/_interceptor_test.py
+++ b/src/python/grpcio_tests/tests/unit/_interceptor_test.py
@@ -17,6 +17,7 @@ import collections
import itertools
import threading
import unittest
+import logging
from concurrent import futures
import grpc
@@ -598,4 +599,5 @@ class InterceptorTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
index f153089a24..0ff49490d5 100644
--- a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
@@ -14,6 +14,7 @@
"""Test of RPCs made against gRPC Python's application-layer API."""
import unittest
+import logging
import grpc
@@ -129,6 +130,10 @@ class InvalidMetadataTest(unittest.TestCase):
self._stream_stream(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
+ def testInvalidMetadata(self):
+ self.assertRaises(TypeError, self._unary_unary, b'', metadata=42)
+
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
index 93a5fdf9ff..00949e2236 100644
--- a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
@@ -15,6 +15,7 @@
import itertools
import threading
import unittest
+import logging
import grpc
@@ -271,4 +272,5 @@ class InvocationDefectsTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_logging_test.py b/src/python/grpcio_tests/tests/unit/_logging_test.py
new file mode 100644
index 0000000000..631b9de9db
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_logging_test.py
@@ -0,0 +1,80 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Test of gRPC Python's interaction with the python logging module"""
+
+import unittest
+import six
+from six.moves import reload_module
+import logging
+import grpc
+import functools
+import sys
+
+
+def patch_stderr(f):
+
+ @functools.wraps(f)
+ def _impl(*args, **kwargs):
+ old_stderr = sys.stderr
+ sys.stderr = six.StringIO()
+ try:
+ f(*args, **kwargs)
+ finally:
+ sys.stderr = old_stderr
+
+ return _impl
+
+
+def isolated_logging(f):
+
+ @functools.wraps(f)
+ def _impl(*args, **kwargs):
+ reload_module(logging)
+ reload_module(grpc)
+ try:
+ f(*args, **kwargs)
+ finally:
+ reload_module(logging)
+
+ return _impl
+
+
+class LoggingTest(unittest.TestCase):
+
+ @isolated_logging
+ def test_logger_not_occupied(self):
+ self.assertEqual(0, len(logging.getLogger().handlers))
+
+ @patch_stderr
+ @isolated_logging
+ def test_handler_found(self):
+ self.assertEqual(0, len(sys.stderr.getvalue()))
+
+ @isolated_logging
+ def test_can_configure_logger(self):
+ intended_stream = six.StringIO()
+ logging.basicConfig(stream=intended_stream)
+ self.assertEqual(1, len(logging.getLogger().handlers))
+ self.assertIs(logging.getLogger().handlers[0].stream, intended_stream)
+
+ @isolated_logging
+ def test_grpc_logger(self):
+ self.assertIn("grpc", logging.Logger.manager.loggerDict)
+ root_logger = logging.getLogger("grpc")
+ self.assertEqual(1, len(root_logger.handlers))
+ self.assertIsInstance(root_logger.handlers[0], logging.NullHandler)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
index ca10bd4dab..0dafab827a 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -15,6 +15,7 @@
import threading
import unittest
+import logging
import grpc
@@ -656,4 +657,5 @@ class MetadataCodeDetailsTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py b/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py
new file mode 100644
index 0000000000..2d352e99d4
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py
@@ -0,0 +1,251 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tests metadata flags feature by testing wait-for-ready semantics"""
+
+import time
+import weakref
+import unittest
+import threading
+import socket
+from six.moves import queue
+
+import grpc
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_UNARY_UNARY = '/test/UnaryUnary'
+_UNARY_STREAM = '/test/UnaryStream'
+_STREAM_UNARY = '/test/StreamUnary'
+_STREAM_STREAM = '/test/StreamStream'
+
+_REQUEST = b'\x00\x00\x00'
+_RESPONSE = b'\x00\x00\x00'
+
+
+def handle_unary_unary(test, request, servicer_context):
+ return _RESPONSE
+
+
+def handle_unary_stream(test, request, servicer_context):
+ for _ in range(test_constants.STREAM_LENGTH):
+ yield _RESPONSE
+
+
+def handle_stream_unary(test, request_iterator, servicer_context):
+ for _ in request_iterator:
+ pass
+ return _RESPONSE
+
+
+def handle_stream_stream(test, request_iterator, servicer_context):
+ for _ in request_iterator:
+ yield _RESPONSE
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(self, test, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = lambda req, ctx: handle_stream_stream(test, req, ctx)
+ elif self.request_streaming:
+ self.stream_unary = lambda req, ctx: handle_stream_unary(test, req, ctx)
+ elif self.response_streaming:
+ self.unary_stream = lambda req, ctx: handle_unary_stream(test, req, ctx)
+ else:
+ self.unary_unary = lambda req, ctx: handle_unary_unary(test, req, ctx)
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, test):
+ self._test = test
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(self._test, False, False)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(self._test, False, True)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(self._test, True, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(self._test, True, True)
+ else:
+ return None
+
+
+def get_free_loopback_tcp_port():
+ tcp = socket.socket(socket.AF_INET6)
+ tcp.bind(('', 0))
+ address_tuple = tcp.getsockname()
+ return tcp, "[::1]:%s" % (address_tuple[1])
+
+
+def create_dummy_channel():
+ """Creating dummy channels is a workaround for retries"""
+ _, addr = get_free_loopback_tcp_port()
+ return grpc.insecure_channel(addr)
+
+
+def perform_unary_unary_call(channel, wait_for_ready=None):
+ channel.unary_unary(_UNARY_UNARY).__call__(
+ _REQUEST,
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready)
+
+
+def perform_unary_unary_with_call(channel, wait_for_ready=None):
+ channel.unary_unary(_UNARY_UNARY).with_call(
+ _REQUEST,
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready)
+
+
+def perform_unary_unary_future(channel, wait_for_ready=None):
+ channel.unary_unary(_UNARY_UNARY).future(
+ _REQUEST,
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready).result(
+ timeout=test_constants.LONG_TIMEOUT)
+
+
+def perform_unary_stream_call(channel, wait_for_ready=None):
+ response_iterator = channel.unary_stream(_UNARY_STREAM).__call__(
+ _REQUEST,
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready)
+ for _ in response_iterator:
+ pass
+
+
+def perform_stream_unary_call(channel, wait_for_ready=None):
+ channel.stream_unary(_STREAM_UNARY).__call__(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH),
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready)
+
+
+def perform_stream_unary_with_call(channel, wait_for_ready=None):
+ channel.stream_unary(_STREAM_UNARY).with_call(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH),
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready)
+
+
+def perform_stream_unary_future(channel, wait_for_ready=None):
+ channel.stream_unary(_STREAM_UNARY).future(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH),
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready).result(
+ timeout=test_constants.LONG_TIMEOUT)
+
+
+def perform_stream_stream_call(channel, wait_for_ready=None):
+ response_iterator = channel.stream_stream(_STREAM_STREAM).__call__(
+ iter([_REQUEST] * test_constants.STREAM_LENGTH),
+ timeout=test_constants.LONG_TIMEOUT,
+ wait_for_ready=wait_for_ready)
+ for _ in response_iterator:
+ pass
+
+
+_ALL_CALL_CASES = [
+ perform_unary_unary_call, perform_unary_unary_with_call,
+ perform_unary_unary_future, perform_unary_stream_call,
+ perform_stream_unary_call, perform_stream_unary_with_call,
+ perform_stream_unary_future, perform_stream_stream_call
+]
+
+
+class MetadataFlagsTest(unittest.TestCase):
+
+ def check_connection_does_failfast(self, fn, channel, wait_for_ready=None):
+ try:
+ fn(channel, wait_for_ready)
+ self.fail("The Call should fail")
+ except BaseException as e: # pylint: disable=broad-except
+ self.assertIn('StatusCode.UNAVAILABLE', str(e))
+
+ def test_call_wait_for_ready_default(self):
+ for perform_call in _ALL_CALL_CASES:
+ self.check_connection_does_failfast(perform_call,
+ create_dummy_channel())
+
+ def test_call_wait_for_ready_disabled(self):
+ for perform_call in _ALL_CALL_CASES:
+ self.check_connection_does_failfast(
+ perform_call, create_dummy_channel(), wait_for_ready=False)
+
+ def test_call_wait_for_ready_enabled(self):
+ # To test the wait mechanism, Python thread is required to make
+ # client set up first without handling them case by case.
+ # Also, Python thread don't pass the unhandled exceptions to
+ # main thread. So, it need another method to store the
+ # exceptions and raise them again in main thread.
+ unhandled_exceptions = queue.Queue()
+ tcp, addr = get_free_loopback_tcp_port()
+ wg = test_common.WaitGroup(len(_ALL_CALL_CASES))
+
+ def wait_for_transient_failure(channel_connectivity):
+ if channel_connectivity == grpc.ChannelConnectivity.TRANSIENT_FAILURE:
+ wg.done()
+
+ def test_call(perform_call):
+ try:
+ channel = grpc.insecure_channel(addr)
+ channel.subscribe(wait_for_transient_failure)
+ perform_call(channel, wait_for_ready=True)
+ except BaseException as e: # pylint: disable=broad-except
+ # If the call failed, the thread would be destroyed. The channel
+ # object can be collected before calling the callback, which
+ # will result in a deadlock.
+ wg.done()
+ unhandled_exceptions.put(e, True)
+
+ test_threads = []
+ for perform_call in _ALL_CALL_CASES:
+ test_thread = threading.Thread(
+ target=test_call, args=(perform_call,))
+ test_thread.exception = None
+ test_thread.start()
+ test_threads.append(test_thread)
+
+ # Start the server after the connections are waiting
+ wg.wait()
+ tcp.close()
+ server = test_common.test_server()
+ server.add_generic_rpc_handlers((_GenericHandler(weakref.proxy(self)),))
+ server.add_insecure_port(addr)
+ server.start()
+
+ for test_thread in test_threads:
+ test_thread.join()
+
+ # Stop the server to make test end properly
+ server.stop(0)
+
+ if not unhandled_exceptions.empty():
+ raise unhandled_exceptions.get(True)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
index 5908421011..777ab683e3 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -15,6 +15,7 @@
import unittest
import weakref
+import logging
import grpc
from grpc import _channel
@@ -237,4 +238,5 @@ class MetadataTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_reconnect_test.py b/src/python/grpcio_tests/tests/unit/_reconnect_test.py
index a708d8d862..f6d4fcbd0a 100644
--- a/src/python/grpcio_tests/tests/unit/_reconnect_test.py
+++ b/src/python/grpcio_tests/tests/unit/_reconnect_test.py
@@ -15,6 +15,7 @@
import socket
import time
+import logging
import unittest
import grpc
@@ -100,4 +101,5 @@ class ReconnectTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
index df4b129018..4fead8fcd5 100644
--- a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
+++ b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
@@ -15,6 +15,7 @@
import threading
import unittest
+import logging
import grpc
from grpc import _channel
@@ -253,4 +254,5 @@ class ResourceExhaustedTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py
index 34e7831a98..a768d6c7c1 100644
--- a/src/python/grpcio_tests/tests/unit/_rpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py
@@ -16,6 +16,7 @@
import itertools
import threading
import unittest
+import logging
from concurrent import futures
import grpc
@@ -846,4 +847,5 @@ class RPCTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py b/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py
index 0d78034b7b..9e4bd61816 100644
--- a/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py
+++ b/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py
@@ -35,6 +35,7 @@ import os
import six
import threading
import unittest
+import logging
from concurrent import futures
@@ -69,18 +70,11 @@ SERVER_CERT_CHAIN_2_PEM = (resources.cert_hier_2_server_1_cert() +
Call = collections.namedtuple('Call', ['did_raise', 'returned_cert_config'])
-def _create_client_stub(
- port,
- expect_success,
- root_certificates=None,
- private_key=None,
- certificate_chain=None,
-):
- channel = grpc.secure_channel('localhost:{}'.format(port),
- grpc.ssl_channel_credentials(
- root_certificates=root_certificates,
- private_key=private_key,
- certificate_chain=certificate_chain))
+def _create_channel(port, credentials):
+ return grpc.secure_channel('localhost:{}'.format(port), credentials)
+
+
+def _create_client_stub(channel, expect_success):
if expect_success:
# per Nathaniel: there's some robustness issue if we start
# using a channel without waiting for it to be actually ready
@@ -175,14 +169,13 @@ class _ServerSSLCertReloadTest(
root_certificates=None,
private_key=None,
certificate_chain=None):
- client_stub = _create_client_stub(
- self.port,
- expect_success,
+ credentials = grpc.ssl_channel_credentials(
root_certificates=root_certificates,
private_key=private_key,
certificate_chain=certificate_chain)
- self._perform_rpc(client_stub, expect_success)
- del client_stub
+ with _create_channel(self.port, credentials) as client_channel:
+ client_stub = _create_client_stub(client_channel, expect_success)
+ self._perform_rpc(client_stub, expect_success)
def _test(self):
# things should work...
@@ -258,12 +251,13 @@ class _ServerSSLCertReloadTest(
# now create the "persistent" clients
self.cert_config_fetcher.reset()
self.cert_config_fetcher.configure(False, None)
- persistent_client_stub_A = _create_client_stub(
+ channel_A = _create_channel(
self.port,
- True,
- root_certificates=CA_1_PEM,
- private_key=CLIENT_KEY_2_PEM,
- certificate_chain=CLIENT_CERT_CHAIN_2_PEM)
+ grpc.ssl_channel_credentials(
+ root_certificates=CA_1_PEM,
+ private_key=CLIENT_KEY_2_PEM,
+ certificate_chain=CLIENT_CERT_CHAIN_2_PEM))
+ persistent_client_stub_A = _create_client_stub(channel_A, True)
self._perform_rpc(persistent_client_stub_A, True)
actual_calls = self.cert_config_fetcher.getCalls()
self.assertEqual(len(actual_calls), 1)
@@ -272,12 +266,13 @@ class _ServerSSLCertReloadTest(
self.cert_config_fetcher.reset()
self.cert_config_fetcher.configure(False, None)
- persistent_client_stub_B = _create_client_stub(
+ channel_B = _create_channel(
self.port,
- True,
- root_certificates=CA_1_PEM,
- private_key=CLIENT_KEY_2_PEM,
- certificate_chain=CLIENT_CERT_CHAIN_2_PEM)
+ grpc.ssl_channel_credentials(
+ root_certificates=CA_1_PEM,
+ private_key=CLIENT_KEY_2_PEM,
+ certificate_chain=CLIENT_CERT_CHAIN_2_PEM))
+ persistent_client_stub_B = _create_client_stub(channel_B, True)
self._perform_rpc(persistent_client_stub_B, True)
actual_calls = self.cert_config_fetcher.getCalls()
self.assertEqual(len(actual_calls), 1)
@@ -358,6 +353,9 @@ class _ServerSSLCertReloadTest(
actual_calls = self.cert_config_fetcher.getCalls()
self.assertEqual(len(actual_calls), 0)
+ channel_A.close()
+ channel_B.close()
+
class ServerSSLCertConfigFetcherParamsChecks(unittest.TestCase):
@@ -518,4 +516,5 @@ class ServerSSLCertReloadTestCertConfigReuse(_ServerSSLCertReloadTest):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_server_test.py b/src/python/grpcio_tests/tests/unit/_server_test.py
index acf4a17921..2c8205f365 100644
--- a/src/python/grpcio_tests/tests/unit/_server_test.py
+++ b/src/python/grpcio_tests/tests/unit/_server_test.py
@@ -14,6 +14,7 @@
from concurrent import futures
import unittest
+import logging
import grpc
@@ -49,4 +50,5 @@ class ServerTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_session_cache_test.py b/src/python/grpcio_tests/tests/unit/_session_cache_test.py
index b4e4670fa7..9223a6da03 100644
--- a/src/python/grpcio_tests/tests/unit/_session_cache_test.py
+++ b/src/python/grpcio_tests/tests/unit/_session_cache_test.py
@@ -15,6 +15,7 @@
import pickle
import unittest
+import logging
import grpc
from grpc import _channel
@@ -142,4 +143,5 @@ class SSLSessionCacheTest(unittest.TestCase):
if __name__ == '__main__':
+ logging.basicConfig()
unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/beta/BUILD.bazel b/src/python/grpcio_tests/tests/unit/beta/BUILD.bazel
deleted file mode 100644
index d3e0fe20eb..0000000000
--- a/src/python/grpcio_tests/tests/unit/beta/BUILD.bazel
+++ /dev/null
@@ -1,75 +0,0 @@
-load("@grpc_python_dependencies//:requirements.bzl", "requirement")
-
-package(default_visibility = ["//visibility:public"])
-
-py_library(
- name = "test_utilities",
- srcs = ["test_utilities.py"],
- deps = [
- "//src/python/grpcio/grpc:grpcio",
- ],
-)
-
-py_test(
- name = "_beta_features_test",
- srcs = ["_beta_features_test.py"],
- main = "_beta_features_test.py",
- size = "small",
- deps = [
- "//src/python/grpcio/grpc:grpcio",
- "//src/python/grpcio_tests/tests/unit:resources",
- "//src/python/grpcio_tests/tests/unit/framework/common",
- ":test_utilities",
- ],
- imports=["../../../",],
-)
-
-py_test(
- name = "_connectivity_channel_test",
- srcs = ["_connectivity_channel_test.py"],
- main = "_connectivity_channel_test.py",
- size = "small",
- deps = [
- "//src/python/grpcio/grpc:grpcio",
- ],
-)
-
-# TODO(ghostwriternr): To be added later.
-#py_test(
-# name = "_implementations_test",
-# srcs = ["_implementations_test.py"],
-# main = "_implementations_test.py",
-# size = "small",
-# deps = [
-# "//src/python/grpcio/grpc:grpcio",
-# "//src/python/grpcio_tests/tests/unit:resources",
-# requirement('oauth2client'),
-# ],
-# imports=["../../../",],
-#)
-
-py_test(
- name = "_not_found_test",
- srcs = ["_not_found_test.py"],
- main = "_not_found_test.py",
- size = "small",
- deps = [
- "//src/python/grpcio/grpc:grpcio",
- "//src/python/grpcio_tests/tests/unit/framework/common",
- ],
- imports=["../../../",],
-)
-
-py_test(
- name = "_utilities_test",
- srcs = ["_utilities_test.py"],
- main = "_utilities_test.py",
- size = "small",
- deps = [
- "//src/python/grpcio/grpc:grpcio",
- "//src/python/grpcio_tests/tests/unit/framework/common",
- ],
- imports=["../../../",],
-)
-
-
diff --git a/src/python/grpcio_tests/tests/unit/resources.py b/src/python/grpcio_tests/tests/unit/resources.py
index 51a8979f58..6efd870fc8 100644
--- a/src/python/grpcio_tests/tests/unit/resources.py
+++ b/src/python/grpcio_tests/tests/unit/resources.py
@@ -14,8 +14,7 @@
"""Constants and functions for data used in testing."""
import os
-
-import pkg_resources
+import pkgutil
_ROOT_CERTIFICATES_RESOURCE_PATH = 'credentials/ca.pem'
_PRIVATE_KEY_RESOURCE_PATH = 'credentials/server1.key'
@@ -23,94 +22,92 @@ _CERTIFICATE_CHAIN_RESOURCE_PATH = 'credentials/server1.pem'
def test_root_certificates():
- return pkg_resources.resource_string(__name__,
- _ROOT_CERTIFICATES_RESOURCE_PATH)
+ return pkgutil.get_data(__name__, _ROOT_CERTIFICATES_RESOURCE_PATH)
def private_key():
- return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
+ return pkgutil.get_data(__name__, _PRIVATE_KEY_RESOURCE_PATH)
def certificate_chain():
- return pkg_resources.resource_string(__name__,
- _CERTIFICATE_CHAIN_RESOURCE_PATH)
+ return pkgutil.get_data(__name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
def cert_hier_1_root_ca_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__, 'credentials/certificate_hierarchy_1/certs/ca.cert.pem')
def cert_hier_1_intermediate_ca_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_1/intermediate/certs/intermediate.cert.pem'
)
def cert_hier_1_client_1_key():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_1/intermediate/private/client.key.pem'
)
def cert_hier_1_client_1_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_1/intermediate/certs/client.cert.pem'
)
def cert_hier_1_server_1_key():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_1/intermediate/private/localhost-1.key.pem'
)
def cert_hier_1_server_1_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_1/intermediate/certs/localhost-1.cert.pem'
)
def cert_hier_2_root_ca_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__, 'credentials/certificate_hierarchy_2/certs/ca.cert.pem')
def cert_hier_2_intermediate_ca_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_2/intermediate/certs/intermediate.cert.pem'
)
def cert_hier_2_client_1_key():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_2/intermediate/private/client.key.pem'
)
def cert_hier_2_client_1_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_2/intermediate/certs/client.cert.pem'
)
def cert_hier_2_server_1_key():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_2/intermediate/private/localhost-1.key.pem'
)
def cert_hier_2_server_1_cert():
- return pkg_resources.resource_string(
+ return pkgutil.get_data(
__name__,
'credentials/certificate_hierarchy_2/intermediate/certs/localhost-1.cert.pem'
)
diff --git a/src/python/grpcio_tests/tests/unit/test_common.py b/src/python/grpcio_tests/tests/unit/test_common.py
index 61717ae135..bc3b24862d 100644
--- a/src/python/grpcio_tests/tests/unit/test_common.py
+++ b/src/python/grpcio_tests/tests/unit/test_common.py
@@ -14,6 +14,7 @@
"""Common code used throughout tests of gRPC."""
import collections
+import threading
from concurrent import futures
import grpc
@@ -107,3 +108,28 @@ def test_server(max_workers=10):
return grpc.server(
futures.ThreadPoolExecutor(max_workers=max_workers),
options=(('grpc.so_reuseport', 0),))
+
+
+class WaitGroup(object):
+
+ def __init__(self, n=0):
+ self.count = n
+ self.cv = threading.Condition()
+
+ def add(self, n):
+ self.cv.acquire()
+ self.count += n
+ self.cv.release()
+
+ def done(self):
+ self.cv.acquire()
+ self.count -= 1
+ if self.count == 0:
+ self.cv.notify_all()
+ self.cv.release()
+
+ def wait(self):
+ self.cv.acquire()
+ while self.count > 0:
+ self.cv.wait()
+ self.cv.release()
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 0e192b6201..18245e9107 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -98,6 +98,7 @@ grpc_resource_quota_set_max_threads_type grpc_resource_quota_set_max_threads_imp
grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import;
grpc_channelz_get_servers_type grpc_channelz_get_servers_import;
+grpc_channelz_get_server_type grpc_channelz_get_server_import;
grpc_channelz_get_server_sockets_type grpc_channelz_get_server_sockets_import;
grpc_channelz_get_channel_type grpc_channelz_get_channel_import;
grpc_channelz_get_subchannel_type grpc_channelz_get_subchannel_import;
@@ -355,6 +356,7 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable");
grpc_channelz_get_top_channels_import = (grpc_channelz_get_top_channels_type) GetProcAddress(library, "grpc_channelz_get_top_channels");
grpc_channelz_get_servers_import = (grpc_channelz_get_servers_type) GetProcAddress(library, "grpc_channelz_get_servers");
+ grpc_channelz_get_server_import = (grpc_channelz_get_server_type) GetProcAddress(library, "grpc_channelz_get_server");
grpc_channelz_get_server_sockets_import = (grpc_channelz_get_server_sockets_type) GetProcAddress(library, "grpc_channelz_get_server_sockets");
grpc_channelz_get_channel_import = (grpc_channelz_get_channel_type) GetProcAddress(library, "grpc_channelz_get_channel");
grpc_channelz_get_subchannel_import = (grpc_channelz_get_subchannel_type) GetProcAddress(library, "grpc_channelz_get_subchannel");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index e075db89e8..56d222c7ec 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -269,6 +269,9 @@ extern grpc_channelz_get_top_channels_type grpc_channelz_get_top_channels_import
typedef char*(*grpc_channelz_get_servers_type)(intptr_t start_server_id);
extern grpc_channelz_get_servers_type grpc_channelz_get_servers_import;
#define grpc_channelz_get_servers grpc_channelz_get_servers_import
+typedef char*(*grpc_channelz_get_server_type)(intptr_t server_id);
+extern grpc_channelz_get_server_type grpc_channelz_get_server_import;
+#define grpc_channelz_get_server grpc_channelz_get_server_import
typedef char*(*grpc_channelz_get_server_sockets_type)(intptr_t server_id, intptr_t start_socket_id);
extern grpc_channelz_get_server_sockets_type grpc_channelz_get_server_sockets_import;
#define grpc_channelz_get_server_sockets grpc_channelz_get_server_sockets_import
diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb
index 4764217406..169a62f11d 100644
--- a/src/ruby/lib/grpc/generic/service.rb
+++ b/src/ruby/lib/grpc/generic/service.rb
@@ -95,7 +95,7 @@ module GRPC
rpc_descs[name] = RpcDesc.new(name, input, output,
marshal_class_method,
unmarshal_class_method)
- define_method(GenericService.underscore(name.to_s).to_sym) do |_, _|
+ define_method(GenericService.underscore(name.to_s).to_sym) do |*|
fail GRPC::BadStatus.new_status_exception(
GRPC::Core::StatusCodes::UNIMPLEMENTED)
end
diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb
index 243d566645..a4ed052d85 100644
--- a/src/ruby/lib/grpc/version.rb
+++ b/src/ruby/lib/grpc/version.rb
@@ -14,5 +14,5 @@
# GRPC contains the General RPC module.
module GRPC
- VERSION = '1.17.0.dev'
+ VERSION = '1.18.0.dev'
end
diff --git a/src/ruby/pb/grpc/health/checker.rb b/src/ruby/pb/grpc/health/checker.rb
index c492455d8f..7ad68409dd 100644
--- a/src/ruby/pb/grpc/health/checker.rb
+++ b/src/ruby/pb/grpc/health/checker.rb
@@ -14,7 +14,6 @@
require 'grpc'
require 'grpc/health/v1/health_services_pb'
-require 'thread'
module Grpc
# Health contains classes and modules that support providing a health check
@@ -37,9 +36,9 @@ module Grpc
@status_mutex.synchronize do
status = @statuses["#{req.service}"]
end
- if status.nil?
+ if status.nil?
fail GRPC::BadStatus.new_status_exception(StatusCodes::NOT_FOUND)
- end
+ end
HealthCheckResponse.new(status: status)
end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index 44a6134086..924d747a79 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -342,6 +342,28 @@ describe GRPC::RpcServer do
t.join
end
+ it 'should return UNIMPLEMENTED on unimplemented ' \
+ 'methods for client_streamer', server: true do
+ @srv.handle(EchoService)
+ t = Thread.new { @srv.run }
+ @srv.wait_till_running
+ blk = proc do
+ stub = EchoStub.new(@host, :this_channel_is_insecure, **client_opts)
+ requests = [EchoMsg.new, EchoMsg.new]
+ stub.a_client_streaming_rpc_unimplemented(requests)
+ end
+
+ begin
+ expect(&blk).to raise_error do |error|
+ expect(error).to be_a(GRPC::BadStatus)
+ expect(error.code).to eq(GRPC::Core::StatusCodes::UNIMPLEMENTED)
+ end
+ ensure
+ @srv.stop # should be call not to crash
+ t.join
+ end
+ end
+
it 'should handle multiple sequential requests', server: true do
@srv.handle(EchoService)
t = Thread.new { @srv.run }
diff --git a/src/ruby/spec/support/services.rb b/src/ruby/spec/support/services.rb
index 6e693f1cde..438459dfd7 100644
--- a/src/ruby/spec/support/services.rb
+++ b/src/ruby/spec/support/services.rb
@@ -33,6 +33,7 @@ class EchoService
rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg
rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg)
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
+ rpc :a_client_streaming_rpc_unimplemented, stream(EchoMsg), EchoMsg
attr_reader :received_md
def initialize(**kw)
diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb
index 92e85eb882..389fb70684 100644
--- a/src/ruby/tools/version.rb
+++ b/src/ruby/tools/version.rb
@@ -14,6 +14,6 @@
module GRPC
module Tools
- VERSION = '1.17.0.dev'
+ VERSION = '1.18.0.dev'
end
end