aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-06-20 08:09:52 -0700
committerGravatar Craig Tiller <ctiller@google.com>2016-06-20 08:09:52 -0700
commita4d2d0c798a182d5cc57ce630d31614173f2d67b (patch)
treea1f0f4e324f8bc65ced4e7b49af3af266f6ad900 /src
parentccc8ec54b0cf75f52c0a19a2674d6a6bc8e93743 (diff)
parent4e29480e430b94392105934750adfd85cb7849ce (diff)
Merge github.com:grpc/grpc into reuse_port
Diffstat (limited to 'src')
-rw-r--r--src/compiler/config.h29
-rw-r--r--src/compiler/cpp_generator.cc7
-rw-r--r--src/compiler/cpp_generator_helpers.h7
-rw-r--r--src/compiler/cpp_plugin.cc2
-rw-r--r--src/compiler/csharp_generator.cc119
-rw-r--r--src/compiler/csharp_generator_helpers.h7
-rw-r--r--src/compiler/generator_helpers.h11
-rw-r--r--src/compiler/node_generator.cc103
-rw-r--r--src/compiler/node_generator.h6
-rw-r--r--src/compiler/node_generator_helpers.h7
-rw-r--r--src/compiler/node_plugin.cc8
-rw-r--r--src/compiler/objective_c_generator.cc46
-rw-r--r--src/compiler/python_generator.cc148
-rw-r--r--src/compiler/python_generator.h1
-rw-r--r--src/compiler/python_plugin.cc1
-rw-r--r--src/compiler/ruby_generator.cc18
-rw-r--r--src/compiler/ruby_generator_helpers-inl.h8
-rw-r--r--src/core/ext/census/gen/README.md6
-rw-r--r--src/core/ext/census/gen/census.pb.c179
-rw-r--r--src/core/ext/census/gen/census.pb.h294
-rw-r--r--src/core/ext/census/grpc_filter.c12
-rw-r--r--src/core/ext/client_config/channel_connectivity.c23
-rw-r--r--src/core/ext/client_config/client_channel.c43
-rw-r--r--src/core/ext/client_config/connector.h2
-rw-r--r--src/core/ext/client_config/lb_policy.c9
-rw-r--r--src/core/ext/client_config/lb_policy.h6
-rw-r--r--src/core/ext/client_config/subchannel.c28
-rw-r--r--src/core/ext/client_config/subchannel.h3
-rw-r--r--src/core/ext/client_config/subchannel_call_holder.c9
-rw-r--r--src/core/ext/client_config/subchannel_call_holder.h3
-rw-r--r--src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h4
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c62
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c90
-rw-r--r--src/core/ext/load_reporting/load_reporting.c133
-rw-r--r--src/core/ext/load_reporting/load_reporting.h75
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c151
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.h41
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c15
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c4
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c4
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c95
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c22
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.c16
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c82
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c21
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.c232
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.h66
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c311
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h3
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c10
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c4
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c42
-rw-r--r--src/core/lib/channel/channel_args.c26
-rw-r--r--src/core/lib/channel/channel_args.h2
-rw-r--r--src/core/lib/channel/channel_stack.c20
-rw-r--r--src/core/lib/channel/channel_stack.h38
-rw-r--r--src/core/lib/channel/channel_stack_builder.c4
-rw-r--r--src/core/lib/channel/compress_filter.c47
-rw-r--r--src/core/lib/channel/compress_filter.h6
-rw-r--r--src/core/lib/channel/connected_channel.c12
-rw-r--r--src/core/lib/channel/http_client_filter.c37
-rw-r--r--src/core/lib/channel/http_client_filter.h2
-rw-r--r--src/core/lib/channel/http_server_filter.c6
-rw-r--r--src/core/lib/compression/compression.c (renamed from src/core/lib/compression/compression_algorithm.c)44
-rw-r--r--src/core/lib/http/httpcli.c28
-rw-r--r--src/core/lib/http/httpcli.h7
-rw-r--r--src/core/lib/http/httpcli_security_connector.c6
-rw-r--r--src/core/lib/iomgr/closure.h11
-rw-r--r--src/core/lib/iomgr/endpoint.h2
-rw-r--r--src/core/lib/iomgr/error.c19
-rw-r--r--src/core/lib/iomgr/error.h77
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c2043
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.h41
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c44
-rw-r--r--src/core/lib/iomgr/ev_posix.c7
-rw-r--r--src/core/lib/iomgr/ev_posix.h6
-rw-r--r--src/core/lib/iomgr/exec_ctx.c6
-rw-r--r--src/core/lib/iomgr/exec_ctx.h6
-rw-r--r--src/core/lib/iomgr/iocp_windows.c2
-rw-r--r--src/core/lib/iomgr/iomgr.c9
-rw-r--r--src/core/lib/iomgr/iomgr_windows.c2
-rw-r--r--src/core/lib/iomgr/polling_entity.c104
-rw-r--r--src/core/lib/iomgr/polling_entity.h81
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.c5
-rw-r--r--src/core/lib/iomgr/pollset_windows.c6
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c2
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c4
-rw-r--r--src/core/lib/iomgr/sockaddr.h4
-rw-r--r--src/core/lib/iomgr/sockaddr_windows.h (renamed from src/core/lib/iomgr/sockaddr_win32.h)6
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.c32
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h9
-rw-r--r--src/core/lib/iomgr/socket_windows.c6
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c12
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c8
-rw-r--r--src/core/lib/iomgr/tcp_posix.c15
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c14
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c4
-rw-r--r--src/core/lib/iomgr/tcp_windows.c28
-rw-r--r--src/core/lib/iomgr/timer.c12
-rw-r--r--src/core/lib/iomgr/udp_server.c14
-rw-r--r--src/core/lib/iomgr/workqueue.h6
-rw-r--r--src/core/lib/iomgr/workqueue_posix.c4
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c4
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.c13
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.h3
-rw-r--r--src/core/lib/security/credentials/credentials.c4
-rw-r--r--src/core/lib/security/credentials/credentials.h6
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.c2
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.h2
-rw-r--r--src/core/lib/security/credentials/google_default/credentials_windows.c (renamed from src/core/lib/security/credentials/google_default/credentials_win32.c)4
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c28
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h3
-rw-r--r--src/core/lib/security/credentials/iam/iam_credentials.c2
-rw-r--r--src/core/lib/security/credentials/iam/iam_credentials.h2
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.c2
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.h2
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c34
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c16
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.h4
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.c2
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.h2
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c39
-rw-r--r--src/core/lib/security/transport/handshake.c65
-rw-r--r--src/core/lib/security/transport/handshake.h12
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c12
-rw-r--r--src/core/lib/security/transport/security_connector.c25
-rw-r--r--src/core/lib/security/transport/security_connector.h10
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c34
-rw-r--r--src/core/lib/security/transport/tsi_error.c2
-rw-r--r--src/core/lib/security/transport/tsi_error.h2
-rw-r--r--src/core/lib/security/util/json_util.h2
-rw-r--r--src/core/lib/support/cpu_windows.c4
-rw-r--r--src/core/lib/support/env_windows.c (renamed from src/core/lib/support/env_win32.c)6
-rw-r--r--src/core/lib/support/log.c4
-rw-r--r--src/core/lib/support/log_linux.c4
-rw-r--r--src/core/lib/support/log_windows.c (renamed from src/core/lib/support/log_win32.c)8
-rw-r--r--src/core/lib/support/string.c10
-rw-r--r--src/core/lib/support/string.h4
-rw-r--r--src/core/lib/support/string_util_windows.c (renamed from src/core/lib/support/string_util_win32.c)4
-rw-r--r--src/core/lib/support/string_windows.c (renamed from src/core/lib/support/string_win32.c)4
-rw-r--r--src/core/lib/support/string_windows.h (renamed from src/core/lib/support/string_win32.h)10
-rw-r--r--src/core/lib/support/subprocess_windows.c2
-rw-r--r--src/core/lib/support/sync_windows.c (renamed from src/core/lib/support/sync_win32.c)4
-rw-r--r--src/core/lib/support/thd_windows.c (renamed from src/core/lib/support/thd_win32.c)4
-rw-r--r--src/core/lib/support/time_windows.c (renamed from src/core/lib/support/time_win32.c)4
-rw-r--r--src/core/lib/support/tmpfile_msys.c2
-rw-r--r--src/core/lib/support/tmpfile_windows.c (renamed from src/core/lib/support/tmpfile_win32.c)6
-rw-r--r--src/core/lib/surface/call.c398
-rw-r--r--src/core/lib/surface/call.h2
-rw-r--r--src/core/lib/surface/call_log_batch.c2
-rw-r--r--src/core/lib/surface/channel.c62
-rw-r--r--src/core/lib/surface/channel.h9
-rw-r--r--src/core/lib/surface/completion_queue.c8
-rw-r--r--src/core/lib/surface/completion_queue.h2
-rw-r--r--src/core/lib/surface/init.c1
-rw-r--r--src/core/lib/surface/lame_client.c17
-rw-r--r--src/core/lib/surface/server.c34
-rw-r--r--src/core/lib/transport/connectivity_state.c26
-rw-r--r--src/core/lib/transport/metadata.c43
-rw-r--r--src/core/lib/transport/static_metadata.c11
-rw-r--r--src/core/lib/transport/static_metadata.h124
-rw-r--r--src/core/lib/transport/transport.c29
-rw-r--r--src/core/lib/transport/transport.h6
-rw-r--r--src/core/lib/transport/transport_impl.h4
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.c46
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.c4
-rw-r--r--src/core/plugin_registry/grpc_unsecure_plugin_registry.c4
-rw-r--r--src/cpp/client/client_context.cc10
-rw-r--r--src/cpp/client/create_channel_posix.cc56
-rw-r--r--src/cpp/codegen/codegen_init.cc6
-rw-r--r--src/cpp/common/core_codegen.cc2
-rw-r--r--src/cpp/common/core_codegen.h83
-rw-r--r--src/cpp/ext/proto_server_reflection.cc227
-rw-r--r--src/cpp/ext/proto_server_reflection.h94
-rw-r--r--src/cpp/ext/proto_server_reflection_plugin.cc97
-rw-r--r--src/cpp/ext/reflection.grpc.pb.cc97
-rw-r--r--src/cpp/ext/reflection.pb.cc3946
-rw-r--r--src/cpp/server/server.cc10
-rw-r--r--src/cpp/server/server_builder.cc87
-rw-r--r--src/cpp/server/server_context.cc6
-rw-r--r--src/cpp/server/server_posix.cc49
-rw-r--r--src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs90
-rw-r--r--src/csharp/Grpc.Core.Tests/ChannelTest.cs6
-rw-r--r--src/csharp/Grpc.Core.Tests/ClientServerTest.cs24
-rw-r--r--src/csharp/Grpc.Core.Tests/ContextPropagationTest.cs10
-rw-r--r--src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj4
-rw-r--r--src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs20
-rw-r--r--src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs20
-rw-r--r--src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs37
-rw-r--r--src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs4
-rw-r--r--src/csharp/Grpc.Core.Tests/MarshallingErrorsTest.cs10
-rw-r--r--src/csharp/Grpc.Core.Tests/MockServiceHelper.cs2
-rw-r--r--src/csharp/Grpc.Core.Tests/PInvokeTest.cs2
-rw-r--r--src/csharp/Grpc.Core.Tests/ServerTest.cs17
-rw-r--r--src/csharp/Grpc.Core.Tests/ShutdownHookClientTest.cs (renamed from src/proto/grpc/testing/perf_db.proto)60
-rw-r--r--src/csharp/Grpc.Core.Tests/ShutdownHookPendingCallTest.cs (renamed from src/csharp/Grpc.Core/Internal/AsyncCompletion.cs)69
-rw-r--r--src/csharp/Grpc.Core.Tests/ShutdownHookServerTest.cs58
-rw-r--r--src/csharp/Grpc.Core/AsyncClientStreamingCall.cs4
-rw-r--r--src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs4
-rw-r--r--src/csharp/Grpc.Core/AsyncServerStreamingCall.cs4
-rw-r--r--src/csharp/Grpc.Core/AsyncUnaryCall.cs4
-rw-r--r--src/csharp/Grpc.Core/CallOptions.cs8
-rw-r--r--src/csharp/Grpc.Core/Channel.cs68
-rw-r--r--src/csharp/Grpc.Core/ChannelState.cs2
-rw-r--r--src/csharp/Grpc.Core/Grpc.Core.csproj3
-rw-r--r--src/csharp/Grpc.Core/GrpcEnvironment.cs173
-rw-r--r--src/csharp/Grpc.Core/IAsyncStreamReader.cs16
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCall.cs102
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCallBase.cs96
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCallServer.cs36
-rw-r--r--src/csharp/Grpc.Core/Internal/CallSafeHandle.cs26
-rw-r--r--src/csharp/Grpc.Core/Internal/ChannelSafeHandle.cs9
-rw-r--r--src/csharp/Grpc.Core/Internal/ClientRequestStream.cs8
-rw-r--r--src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs16
-rw-r--r--src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs94
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeMethods.cs11
-rw-r--r--src/csharp/Grpc.Core/Internal/ServerCallHandler.cs32
-rw-r--r--src/csharp/Grpc.Core/Internal/ServerResponseStream.cs8
-rw-r--r--src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs27
-rw-r--r--src/csharp/Grpc.Core/Logging/LogLevel.cs59
-rw-r--r--src/csharp/Grpc.Core/Logging/LogLevelFilterLogger.cs160
-rw-r--r--src/csharp/Grpc.Core/Metadata.cs5
-rw-r--r--src/csharp/Grpc.Core/Server.cs129
-rw-r--r--src/csharp/Grpc.Core/ServerServiceDefinition.cs10
-rw-r--r--src/csharp/Grpc.Core/WriteOptions.cs4
-rw-r--r--src/csharp/Grpc.Examples/MathGrpc.cs137
-rw-r--r--src/csharp/Grpc.HealthCheck/HealthGrpc.cs34
-rw-r--r--src/csharp/Grpc.IntegrationTesting/GenericService.cs2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/InteropClient.cs39
-rw-r--r--src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs65
-rw-r--r--src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs196
-rw-r--r--src/csharp/Grpc.IntegrationTesting/TestGrpc.cs232
-rw-r--r--src/csharp/ext/grpc_csharp_ext.c24
-rw-r--r--src/csharp/tests.json4
-rw-r--r--src/node/ext/byte_buffer.cc14
-rw-r--r--src/node/ext/node_grpc.cc2
-rw-r--r--src/node/ext/server.cc38
-rw-r--r--src/node/performance/benchmark_client.js50
-rw-r--r--src/node/test/math/math_grpc_pb.js75
-rw-r--r--src/node/test/math/math_pb.js10
-rw-r--r--src/objective-c/CronetFramework.podspec43
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+Cronet.h57
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+Cronet.m56
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.h2
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.m1
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.h7
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.m38
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.m20
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.m2
-rw-r--r--src/objective-c/ProtoRPC/ProtoMethod.h9
-rw-r--r--src/objective-c/ProtoRPC/ProtoMethod.m4
-rw-r--r--src/objective-c/ProtoRPC/ProtoRPC.h15
-rw-r--r--src/objective-c/ProtoRPC/ProtoRPC.m6
-rw-r--r--src/objective-c/ProtoRPC/ProtoService.h15
-rw-r--r--src/objective-c/ProtoRPC/ProtoService.m4
-rw-r--r--src/objective-c/tests/InteropTests.m16
-rw-r--r--src/objective-c/tests/Podfile25
-rw-r--r--src/objective-c/tests/RemoteTestClient/RemoteTest.podspec4
-rw-r--r--src/objective-c/tests/Tests.xcodeproj/project.pbxproj252
-rwxr-xr-xsrc/objective-c/tests/build_tests.sh3
-rwxr-xr-x[-rw-r--r--]src/php/bin/stress_client.sh (renamed from src/python/grpcio/grpc/_adapter/_implementations.py)25
-rw-r--r--src/php/ext/grpc/call.c1
-rw-r--r--src/php/ext/grpc/php_grpc.c2
-rw-r--r--src/php/lib/Grpc/AbstractCall.php10
-rwxr-xr-xsrc/php/lib/Grpc/BaseStub.php13
-rw-r--r--src/php/lib/Grpc/BidiStreamingCall.php1
-rw-r--r--src/php/lib/Grpc/ClientStreamingCall.php4
-rw-r--r--src/php/lib/Grpc/ServerStreamingCall.php1
-rw-r--r--src/php/lib/Grpc/UnaryCall.php4
-rwxr-xr-xsrc/php/tests/interop/interop_client.php115
-rw-r--r--src/php/tests/interop/messages.proto38
-rw-r--r--src/php/tests/interop/metrics_client.php6
-rw-r--r--src/php/tests/interop/stress_client.php103
-rw-r--r--src/php/tests/interop/test.proto9
-rw-r--r--src/proto/census/census.options3
-rw-r--r--src/proto/census/census.proto313
-rw-r--r--src/proto/grpc/testing/messages.proto19
-rw-r--r--src/proto/math/math.proto6
-rw-r--r--src/python/grpcio/commands.py2
-rw-r--r--src/python/grpcio/grpc/__init__.py1210
-rw-r--r--src/python/grpcio/grpc/_adapter/_low.py76
-rw-r--r--src/python/grpcio/grpc/_adapter/_types.py2
-rw-r--r--src/python/grpcio/grpc/_auth.py86
-rw-r--r--src/python/grpcio/grpc/_channel.py852
-rw-r--r--src/python/grpcio/grpc/_common.py154
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi7
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi24
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi3
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi59
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi4
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi55
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi10
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi39
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi71
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi21
-rw-r--r--src/python/grpcio/grpc/_cython/cygrpc.pyx1
-rw-r--r--src/python/grpcio/grpc/_cython/imports.generated.c10
-rw-r--r--src/python/grpcio/grpc/_cython/imports.generated.h23
-rw-r--r--src/python/grpcio/grpc/_cython/loader.c4
-rw-r--r--src/python/grpcio/grpc/_links/service.py6
-rw-r--r--src/python/grpcio/grpc/_plugin_wrapping.py123
-rw-r--r--src/python/grpcio/grpc/_server.py755
-rw-r--r--src/python/grpcio/grpc/_utilities.py171
-rw-r--r--src/python/grpcio/grpc/beta/_client_adaptations.py563
-rw-r--r--src/python/grpcio/grpc/beta/_server_adaptations.py367
-rw-r--r--src/python/grpcio/grpc/beta/implementations.py197
-rw-r--r--src/python/grpcio/grpc/beta/interfaces.py90
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py31
-rw-r--r--src/python/grpcio/tests/interop/client.py42
-rw-r--r--src/python/grpcio/tests/interop/methods.py43
-rw-r--r--src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py583
-rw-r--r--src/python/grpcio/tests/qps/benchmark_client.py102
-rw-r--r--src/python/grpcio/tests/qps/client_runner.py14
-rw-r--r--src/python/grpcio/tests/qps/qps_worker.py4
-rw-r--r--src/python/grpcio/tests/qps/worker_server.py5
-rw-r--r--src/python/grpcio/tests/stress/client.py6
-rw-r--r--src/python/grpcio/tests/tests.json15
-rw-r--r--src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py43
-rw-r--r--src/python/grpcio/tests/unit/_adapter/_low_test.py16
-rw-r--r--src/python/grpcio/tests/unit/_api_test.py104
-rw-r--r--src/python/grpcio/tests/unit/_auth_test.py96
-rw-r--r--src/python/grpcio/tests/unit/_channel_connectivity_test.py161
-rw-r--r--src/python/grpcio/tests/unit/_channel_ready_future_test.py103
-rw-r--r--src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py222
-rw-r--r--src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py251
-rw-r--r--src/python/grpcio/tests/unit/_cython/cygrpc_test.py286
-rw-r--r--src/python/grpcio/tests/unit/_cython/test_utilities.py34
-rw-r--r--src/python/grpcio/tests/unit/_empty_message_test.py137
-rw-r--r--src/python/grpcio/tests/unit/_from_grpc_import_star.py38
-rw-r--r--src/python/grpcio/tests/unit/_links/_transmission_test.py2
-rw-r--r--src/python/grpcio/tests/unit/_metadata_test.py216
-rw-r--r--src/python/grpcio/tests/unit/_rpc_test.py768
-rw-r--r--src/python/grpcio/tests/unit/_thread_cleanup_test.py117
-rw-r--r--src/python/grpcio/tests/unit/beta/_beta_features_test.py4
-rw-r--r--src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py10
-rw-r--r--src/python/grpcio/tests/unit/beta/_implementations_test.py17
-rw-r--r--src/python/grpcio/tests/unit/beta/_not_found_test.py2
-rw-r--r--src/python/grpcio/tests/unit/beta/test_utilities.py13
-rw-r--r--src/python/grpcio/tests/unit/framework/common/test_control.py26
-rw-r--r--src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py6
-rw-r--r--src/python/grpcio/tests/unit/test_common.py4
-rwxr-xr-xsrc/ruby/bin/math_services.rb43
-rw-r--r--src/ruby/ext/grpc/rb_call.c82
-rw-r--r--src/ruby/ext/grpc/rb_channel.c2
-rw-r--r--src/ruby/ext/grpc/rb_completion_queue.c13
-rw-r--r--src/ruby/ext/grpc/rb_grpc.c2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c10
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h21
-rw-r--r--src/ruby/ext/grpc/rb_loader.c2
-rw-r--r--src/ruby/ext/grpc/rb_server.c6
-rw-r--r--src/ruby/lib/grpc/generic/active_call.rb4
-rw-r--r--src/ruby/lib/grpc/generic/bidi_call.rb20
-rw-r--r--src/ruby/lib/grpc/generic/rpc_server.rb12
-rw-r--r--src/ruby/pb/grpc/health/v1/health_services.rb32
-rw-r--r--src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb38
-rw-r--r--src/ruby/pb/grpc/testing/metrics_services.rb41
-rw-r--r--src/ruby/pb/src/proto/grpc/testing/empty.rb15
-rw-r--r--src/ruby/pb/src/proto/grpc/testing/messages.rb84
-rw-r--r--src/ruby/pb/src/proto/grpc/testing/test.rb14
-rw-r--r--src/ruby/pb/src/proto/grpc/testing/test_services.rb110
-rw-r--r--src/ruby/qps/src/proto/grpc/testing/services_services.rb54
361 files changed, 20858 insertions, 3562 deletions
diff --git a/src/compiler/config.h b/src/compiler/config.h
index d8b95818db..1cbd842f0a 100644
--- a/src/compiler/config.h
+++ b/src/compiler/config.h
@@ -34,19 +34,7 @@
#ifndef SRC_COMPILER_CONFIG_H
#define SRC_COMPILER_CONFIG_H
-#include <grpc++/support/config.h>
-#include <grpc++/support/config_protobuf.h>
-
-#ifndef GRPC_CUSTOM_DESCRIPTOR
-#include <google/protobuf/descriptor.h>
-#include <google/protobuf/descriptor.pb.h>
-#define GRPC_CUSTOM_DESCRIPTOR ::google::protobuf::Descriptor
-#define GRPC_CUSTOM_FILEDESCRIPTOR ::google::protobuf::FileDescriptor
-#define GRPC_CUSTOM_FILEDESCRIPTORPROTO ::google::protobuf::FileDescriptorProto
-#define GRPC_CUSTOM_METHODDESCRIPTOR ::google::protobuf::MethodDescriptor
-#define GRPC_CUSTOM_SERVICEDESCRIPTOR ::google::protobuf::ServiceDescriptor
-#define GRPC_CUSTOM_SOURCELOCATION ::google::protobuf::SourceLocation
-#endif
+#include <grpc++/impl/codegen/config_protobuf.h>
#ifndef GRPC_CUSTOM_CODEGENERATOR
#include <google/protobuf/compiler/code_generator.h>
@@ -75,14 +63,17 @@
#define GRPC_CUSTOM_PARSEGENERATORPARAMETER ::google::protobuf::compiler::ParseGeneratorParameter
#endif
+#ifndef GRPC_CUSTOM_STRING
+#include <string>
+#define GRPC_CUSTOM_STRING std::string
+#endif
+
namespace grpc {
+
+typedef GRPC_CUSTOM_STRING string;
+
namespace protobuf {
-typedef GRPC_CUSTOM_DESCRIPTOR Descriptor;
-typedef GRPC_CUSTOM_FILEDESCRIPTOR FileDescriptor;
-typedef GRPC_CUSTOM_FILEDESCRIPTORPROTO FileDescriptorProto;
-typedef GRPC_CUSTOM_METHODDESCRIPTOR MethodDescriptor;
-typedef GRPC_CUSTOM_SERVICEDESCRIPTOR ServiceDescriptor;
-typedef GRPC_CUSTOM_SOURCELOCATION SourceLocation;
+
namespace compiler {
typedef GRPC_CUSTOM_CODEGENERATOR CodeGenerator;
typedef GRPC_CUSTOM_GENERATORCONTEXT GeneratorContext;
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index e2f127094a..2288ba4163 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -73,9 +73,10 @@ void PrintIncludes(Printer *printer, const std::vector<grpc::string>& headers, c
vars["l"] = params.use_system_headers ? '<' : '"';
vars["r"] = params.use_system_headers ? '>' : '"';
- if (!params.grpc_search_path.empty()) {
- vars["l"] += params.grpc_search_path;
- if (params.grpc_search_path.back() != '/') {
+ auto& s = params.grpc_search_path;
+ if (!s.empty()) {
+ vars["l"] += s;
+ if (s[s.size()-1] != '/') {
vars["l"] += '/';
}
}
diff --git a/src/compiler/cpp_generator_helpers.h b/src/compiler/cpp_generator_helpers.h
index be68cbe695..87e278f1b9 100644
--- a/src/compiler/cpp_generator_helpers.h
+++ b/src/compiler/cpp_generator_helpers.h
@@ -65,6 +65,13 @@ inline grpc::string ClassName(const grpc::protobuf::Descriptor *descriptor,
}
}
+// Get leading or trailing comments in a string. Comment lines start with "// ".
+// Leading detached comments are put in in front of leading comments.
+template <typename DescriptorType>
+inline grpc::string GetCppComments(const DescriptorType *desc, bool leading) {
+ return grpc_generator::GetPrefixedComments(desc, leading, "//");
+}
+
} // namespace grpc_cpp_generator
#endif // GRPC_INTERNAL_COMPILER_CPP_GENERATOR_HELPERS_H
diff --git a/src/compiler/cpp_plugin.cc b/src/compiler/cpp_plugin.cc
index 0ec183e474..fc0296cd28 100644
--- a/src/compiler/cpp_plugin.cc
+++ b/src/compiler/cpp_plugin.cc
@@ -43,7 +43,7 @@
#include "src/compiler/cpp_generator_helpers.h"
#include "src/compiler/generator_helpers.h"
-using grpc_generator::GetCppComments;
+using grpc_cpp_generator::GetCppComments;
class ProtoBufMethod : public grpc_cpp_generator::Method {
public:
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index 29c359c539..fc8feaf0fc 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -52,7 +52,6 @@ using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc_generator::MethodType;
-using grpc_generator::GetCppComments;
using grpc_generator::GetMethodType;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_CLIENT_STREAMING;
@@ -120,18 +119,10 @@ std::string GetServiceClassName(const ServiceDescriptor* service) {
return service->name();
}
-std::string GetClientInterfaceName(const ServiceDescriptor* service) {
- return "I" + service->name() + "Client";
-}
-
std::string GetClientClassName(const ServiceDescriptor* service) {
return service->name() + "Client";
}
-std::string GetServerInterfaceName(const ServiceDescriptor* service) {
- return "I" + service->name();
-}
-
std::string GetServerClassName(const ServiceDescriptor* service) {
return service->name() + "Base";
}
@@ -303,86 +294,6 @@ void GenerateServiceDescriptorProperty(Printer* out, const ServiceDescriptor *se
out->Print("\n");
}
-void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) {
- out->Print("/// <summary>Client for $servicename$</summary>\n",
- "servicename", GetServiceClassName(service));
- out->Print("[System.Obsolete(\"Client side interfaced will be removed "
- "in the next release. Use client class directly.\")]\n");
- out->Print("public interface $name$\n", "name",
- GetClientInterfaceName(service));
- out->Print("{\n");
- out->Indent();
- for (int i = 0; i < service->method_count(); i++) {
- const MethodDescriptor *method = service->method(i);
- MethodType method_type = GetMethodType(method);
-
- if (method_type == METHODTYPE_NO_STREAMING) {
- // unary calls have an extra synchronous stub method
- GenerateDocCommentBody(out, method);
- out->Print(
- "$response$ $methodname$($request$ request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));\n",
- "methodname", method->name(), "request",
- GetClassName(method->input_type()), "response",
- GetClassName(method->output_type()));
-
- // overload taking CallOptions as a param
- GenerateDocCommentBody(out, method);
- out->Print(
- "$response$ $methodname$($request$ request, CallOptions options);\n",
- "methodname", method->name(), "request",
- GetClassName(method->input_type()), "response",
- GetClassName(method->output_type()));
- }
-
- std::string method_name = method->name();
- if (method_type == METHODTYPE_NO_STREAMING) {
- method_name += "Async"; // prevent name clash with synchronous method.
- }
- GenerateDocCommentBody(out, method);
- out->Print(
- "$returntype$ $methodname$($request_maybe$Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));\n",
- "methodname", method_name, "request_maybe",
- GetMethodRequestParamMaybe(method), "returntype",
- GetMethodReturnTypeClient(method));
-
- // overload taking CallOptions as a param
- GenerateDocCommentBody(out, method);
- out->Print(
- "$returntype$ $methodname$($request_maybe$CallOptions options);\n",
- "methodname", method_name, "request_maybe",
- GetMethodRequestParamMaybe(method), "returntype",
- GetMethodReturnTypeClient(method));
- }
- out->Outdent();
- out->Print("}\n");
- out->Print("\n");
-}
-
-void GenerateServerInterface(Printer* out, const ServiceDescriptor *service) {
- out->Print("/// <summary>Interface of server-side implementations of $servicename$</summary>\n",
- "servicename", GetServiceClassName(service));
- out->Print("[System.Obsolete(\"Service implementations should inherit"
- " from the generated abstract base class instead.\")]\n");
- out->Print("public interface $name$\n", "name",
- GetServerInterfaceName(service));
- out->Print("{\n");
- out->Indent();
- for (int i = 0; i < service->method_count(); i++) {
- const MethodDescriptor *method = service->method(i);
- GenerateDocCommentBody(out, method);
- out->Print(
- "$returntype$ $methodname$($request$$response_stream_maybe$, "
- "ServerCallContext context);\n",
- "methodname", method->name(), "returntype",
- GetMethodReturnTypeServer(method), "request",
- GetMethodRequestParamServer(method), "response_stream_maybe",
- GetMethodResponseStreamMaybe(method));
- }
- out->Outdent();
- out->Print("}\n");
- out->Print("\n");
-}
-
void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
out->Print("/// <summary>Base class for server-side implementations of $servicename$</summary>\n",
"servicename", GetServiceClassName(service));
@@ -415,12 +326,9 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor *service) {
void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
out->Print("/// <summary>Client for $servicename$</summary>\n",
"servicename", GetServiceClassName(service));
- out->Print("#pragma warning disable 0618\n");
out->Print(
- "public class $name$ : ClientBase<$name$>, $interface$\n",
- "name", GetClientClassName(service),
- "interface", GetClientInterfaceName(service));
- out->Print("#pragma warning restore 0618\n");
+ "public class $name$ : ClientBase<$name$>\n",
+ "name", GetClientClassName(service));
out->Print("{\n");
out->Indent();
@@ -547,22 +455,16 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
out->Print("\n");
}
-void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service,
- bool use_server_class) {
+void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) {
out->Print(
"/// <summary>Creates service definition that can be registered with a server</summary>\n");
- out->Print("#pragma warning disable 0618\n");
out->Print(
- "public static ServerServiceDefinition BindService($interface$ serviceImpl)\n",
- "interface", use_server_class ? GetServerClassName(service) :
- GetServerInterfaceName(service));
- out->Print("#pragma warning restore 0618\n");
+ "public static ServerServiceDefinition BindService($implclass$ serviceImpl)\n",
+ "implclass", GetServerClassName(service));
out->Print("{\n");
out->Indent();
- out->Print(
- "return ServerServiceDefinition.CreateBuilder($servicenamefield$)\n",
- "servicenamefield", GetServiceNameFieldName());
+ out->Print("return ServerServiceDefinition.CreateBuilder()\n");
out->Indent();
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
@@ -617,11 +519,7 @@ void GenerateService(Printer* out, const ServiceDescriptor *service,
}
GenerateServiceDescriptorProperty(out, service);
- if (generate_client) {
- GenerateClientInterface(out, service);
- }
if (generate_server) {
- GenerateServerInterface(out, service);
GenerateServerClass(out, service);
}
if (generate_client) {
@@ -629,8 +527,7 @@ void GenerateService(Printer* out, const ServiceDescriptor *service,
GenerateNewStubMethods(out, service);
}
if (generate_server) {
- GenerateBindServiceMethod(out, service, false);
- GenerateBindServiceMethod(out, service, true);
+ GenerateBindServiceMethod(out, service);
}
out->Outdent();
@@ -659,7 +556,7 @@ grpc::string GetServices(const FileDescriptor *file, bool generate_client,
out.Print("// source: $filename$\n", "filename", file->name());
// use C++ style as there are no file-level XML comments in .NET
- grpc::string leading_comments = GetCppComments(file, true);
+ grpc::string leading_comments = GetCsharpComments(file, true);
if (!leading_comments.empty()) {
out.Print("// Original file comments:\n");
out.Print(leading_comments.c_str());
diff --git a/src/compiler/csharp_generator_helpers.h b/src/compiler/csharp_generator_helpers.h
index 5639ea058b..9bdf6fb535 100644
--- a/src/compiler/csharp_generator_helpers.h
+++ b/src/compiler/csharp_generator_helpers.h
@@ -45,6 +45,13 @@ inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
return true;
}
+// Get leading or trailing comments in a string. Comment lines start with "// ".
+// Leading detached comments are put in in front of leading comments.
+template <typename DescriptorType>
+inline grpc::string GetCsharpComments(const DescriptorType *desc, bool leading) {
+ return grpc_generator::GetPrefixedComments(desc, leading, "//");
+}
+
} // namespace grpc_csharp_generator
#endif // GRPC_INTERNAL_COMPILER_CSHARP_GENERATOR_HELPERS_H
diff --git a/src/compiler/generator_helpers.h b/src/compiler/generator_helpers.h
index bd077cf798..9a88c2bfcc 100644
--- a/src/compiler/generator_helpers.h
+++ b/src/compiler/generator_helpers.h
@@ -253,7 +253,8 @@ inline void GetComment(const grpc::protobuf::FileDescriptor *desc,
inline grpc::string GenerateCommentsWithPrefix(
const std::vector<grpc::string> &in, const grpc::string &prefix) {
std::ostringstream oss;
- for (const grpc::string &elem : in) {
+ for (auto it = in.begin(); it != in.end(); it++) {
+ const grpc::string& elem = *it;
if (elem.empty()) {
oss << prefix << "\n";
} else if (elem[0] == ' ') {
@@ -265,10 +266,10 @@ inline grpc::string GenerateCommentsWithPrefix(
return oss.str();
}
-// Get leading or trailing comments in a string. Comment lines start with "// ".
-// Leading detached comments are put in in front of leading comments.
template <typename DescriptorType>
-inline grpc::string GetCppComments(const DescriptorType *desc, bool leading) {
+inline grpc::string GetPrefixedComments(const DescriptorType *desc,
+ bool leading,
+ const grpc::string &prefix) {
std::vector<grpc::string> out;
if (leading) {
grpc_generator::GetComment(
@@ -281,7 +282,7 @@ inline grpc::string GetCppComments(const DescriptorType *desc, bool leading) {
grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_TRAILING,
&out);
}
- return GenerateCommentsWithPrefix(out, "//");
+ return GenerateCommentsWithPrefix(out, prefix);
}
} // namespace grpc_generator
diff --git a/src/compiler/node_generator.cc b/src/compiler/node_generator.cc
index 822622cccf..986b97c26e 100644
--- a/src/compiler/node_generator.cc
+++ b/src/compiler/node_generator.cc
@@ -181,62 +181,67 @@ void PrintMethod(const MethodDescriptor *method, Printer *out) {
// Prints out the service descriptor object
void PrintService(const ServiceDescriptor *service, Printer *out) {
map<grpc::string, grpc::string> template_vars;
+ out->Print(GetNodeComments(service, true).c_str());
template_vars["name"] = service->name();
out->Print(template_vars, "var $name$Service = exports.$name$Service = {\n");
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
grpc::string method_name = grpc_generator::LowercaseFirstLetter(
service->method(i)->name());
+ out->Print(GetNodeComments(service->method(i), true).c_str());
out->Print("$method_name$: ",
"method_name", method_name);
PrintMethod(service->method(i), out);
out->Print(",\n");
+ out->Print(GetNodeComments(service->method(i), false).c_str());
}
out->Outdent();
out->Print("};\n\n");
out->Print(template_vars, "exports.$name$Client = "
"grpc.makeGenericClientConstructor($name$Service);\n");
+ out->Print(GetNodeComments(service, false).c_str());
}
-}
-
-grpc::string GetImports(const FileDescriptor *file) {
- grpc::string output;
- {
- StringOutputStream output_stream(&output);
- Printer out(&output_stream, '$');
-
- if (file->service_count() == 0) {
- return output;
- }
-
- out.Print("// GENERATED CODE -- DO NOT EDIT!\n\n");
+void PrintImports(const FileDescriptor *file, Printer *out) {
+ out->Print("var grpc = require('grpc');\n");
+ if (file->message_type_count() > 0) {
+ grpc::string file_path = GetRelativePath(file->name(),
+ GetJSMessageFilename(
+ file->name()));
+ out->Print("var $module_alias$ = require('$file_path$');\n",
+ "module_alias", ModuleAlias(file->name()),
+ "file_path", file_path);
+ }
- out.Print("'use strict';\n");
+ for (int i = 0; i < file->dependency_count(); i++) {
+ grpc::string file_path = GetRelativePath(
+ file->name(), GetJSMessageFilename(file->dependency(i)->name()));
+ out->Print("var $module_alias$ = require('$file_path$');\n",
+ "module_alias", ModuleAlias(file->dependency(i)->name()),
+ "file_path", file_path);
+ }
+ out->Print("\n");
+}
- out.Print("var grpc = require('grpc');\n");
- if (file->message_type_count() > 0) {
- grpc::string file_path = GetRelativePath(file->name(),
- GetJSMessageFilename(
- file->name()));
- out.Print("var $module_alias$ = require('$file_path$');\n",
- "module_alias", ModuleAlias(file->name()),
- "file_path", file_path);
- }
+void PrintTransformers(const FileDescriptor *file, Printer *out) {
+ map<grpc::string, const Descriptor*> messages = GetAllMessages(file);
+ for (std::map<grpc::string, const Descriptor*>::iterator it =
+ messages.begin();
+ it != messages.end(); it++) {
+ PrintMessageTransformer(it->second, out);
+ }
+ out->Print("\n");
+}
- for (int i = 0; i < file->dependency_count(); i++) {
- grpc::string file_path = GetRelativePath(
- file->name(), GetJSMessageFilename(file->dependency(i)->name()));
- out.Print("var $module_alias$ = require('$file_path$');\n",
- "module_alias", ModuleAlias(file->dependency(i)->name()),
- "file_path", file_path);
- }
- out.Print("\n");
+void PrintServices(const FileDescriptor *file, Printer *out) {
+ for (int i = 0; i < file->service_count(); i++) {
+ PrintService(file->service(i), out);
}
- return output;
}
-grpc::string GetTransformers(const FileDescriptor *file) {
+}
+
+grpc::string GenerateFile(const FileDescriptor *file) {
grpc::string output;
{
StringOutputStream output_stream(&output);
@@ -245,31 +250,23 @@ grpc::string GetTransformers(const FileDescriptor *file) {
if (file->service_count() == 0) {
return output;
}
+ out.Print("// GENERATED CODE -- DO NOT EDIT!\n\n");
- map<grpc::string, const Descriptor*> messages = GetAllMessages(file);
- for (std::map<grpc::string, const Descriptor*>::iterator it =
- messages.begin();
- it != messages.end(); it++) {
- PrintMessageTransformer(it->second, &out);
+ grpc::string leading_comments = GetNodeComments(file, true);
+ if (!leading_comments.empty()) {
+ out.Print("// Original file comments:\n");
+ out.Print(leading_comments.c_str());
}
- out.Print("\n");
- }
- return output;
-}
-grpc::string GetServices(const FileDescriptor *file) {
- grpc::string output;
- {
- StringOutputStream output_stream(&output);
- Printer out(&output_stream, '$');
+ out.Print("'use strict';\n");
- if (file->service_count() == 0) {
- return output;
- }
+ PrintImports(file, &out);
- for (int i = 0; i < file->service_count(); i++) {
- PrintService(file->service(i), &out);
- }
+ PrintTransformers(file, &out);
+
+ PrintServices(file, &out);
+
+ out.Print(GetNodeComments(file, false).c_str());
}
return output;
}
diff --git a/src/compiler/node_generator.h b/src/compiler/node_generator.h
index 249a0d011f..d7765e2d28 100644
--- a/src/compiler/node_generator.h
+++ b/src/compiler/node_generator.h
@@ -38,11 +38,7 @@
namespace grpc_node_generator {
-grpc::string GetImports(const grpc::protobuf::FileDescriptor *file);
-
-grpc::string GetTransformers(const grpc::protobuf::FileDescriptor *file);
-
-grpc::string GetServices(const grpc::protobuf::FileDescriptor *file);
+grpc::string GenerateFile(const grpc::protobuf::FileDescriptor *file);
} // namespace grpc_node_generator
diff --git a/src/compiler/node_generator_helpers.h b/src/compiler/node_generator_helpers.h
index f41a2bcf59..5862772841 100644
--- a/src/compiler/node_generator_helpers.h
+++ b/src/compiler/node_generator_helpers.h
@@ -45,6 +45,13 @@ inline grpc::string GetJSServiceFilename(const grpc::string& filename) {
return grpc_generator::StripProto(filename) + "_grpc_pb.js";
}
+// Get leading or trailing comments in a string. Comment lines start with "// ".
+// Leading detached comments are put in in front of leading comments.
+template <typename DescriptorType>
+inline grpc::string GetNodeComments(const DescriptorType *desc, bool leading) {
+ return grpc_generator::GetPrefixedComments(desc, leading, "//");
+}
+
} // namespace grpc_node_generator
#endif // GRPC_INTERNAL_COMPILER_NODE_GENERATOR_HELPERS_H
diff --git a/src/compiler/node_plugin.cc b/src/compiler/node_plugin.cc
index ac5ced3558..39dfa77b8d 100644
--- a/src/compiler/node_plugin.cc
+++ b/src/compiler/node_plugin.cc
@@ -39,10 +39,8 @@
#include "src/compiler/node_generator.h"
#include "src/compiler/node_generator_helpers.h"
-using grpc_node_generator::GetImports;
+using grpc_node_generator::GenerateFile;
using grpc_node_generator::GetJSServiceFilename;
-using grpc_node_generator::GetServices;
-using grpc_node_generator::GetTransformers;
class NodeGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
@@ -53,9 +51,7 @@ class NodeGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
const grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
- grpc::string code = GetImports(file) +
- GetTransformers(file) +
- GetServices(file);
+ grpc::string code = GenerateFile(file);
if (code.size() == 0) {
return true;
}
diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc
index 465491e385..4be8cb4187 100644
--- a/src/compiler/objective_c_generator.cc
+++ b/src/compiler/objective_c_generator.cc
@@ -60,9 +60,34 @@ void PrintProtoRpcDeclarationAsPragma(Printer *printer,
" returns ($server_stream$$response_type$)\n\n");
}
+template <typename DescriptorType>
+static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
+ std::vector<grpc::string> comments;
+ grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED,
+ &comments);
+ grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING,
+ &comments);
+ grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_TRAILING,
+ &comments);
+ if (comments.empty()) {
+ return;
+ }
+ printer->Print("/**\n");
+ for (auto it = comments.begin(); it != comments.end(); ++it) {
+ printer->Print(" * ");
+ size_t start_pos = it->find_first_not_of(' ');
+ if (start_pos != grpc::string::npos) {
+ printer->Print(it->c_str() + start_pos);
+ }
+ printer->Print("\n");
+ }
+ printer->Print(" */\n");
+}
+
void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
const map< ::grpc::string, ::grpc::string> &vars) {
- // TODO(jcanizales): Print method comments.
+ // Print comment
+ PrintAllComments(method, printer);
printer->Print(vars, "- ($return_type$)$method_name$With");
if (method->client_streaming()) {
@@ -94,7 +119,7 @@ void PrintSimpleSignature(Printer *printer, const MethodDescriptor *method,
void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method,
map< ::grpc::string, ::grpc::string> vars) {
vars["method_name"] = "RPCTo" + vars["method_name"];
- vars["return_type"] = "ProtoRPC *";
+ vars["return_type"] = "GRPCProtoCall *";
PrintMethodSignature(printer, method, vars);
}
@@ -195,11 +220,13 @@ void PrintMethodImplementations(Printer *printer,
printer.Print("@end\n\n");
printer.Print(
- "// Basic service implementation, over gRPC, that only does"
- " marshalling and parsing.\n");
+ "/**\n"
+ " * Basic service implementation, over gRPC, that only does\n"
+ " * marshalling and parsing.\n"
+ " */\n");
printer.Print(vars,
"@interface $service_class$ :"
- " ProtoService<$service_class$>\n");
+ " GRPCProtoService<$service_class$>\n");
printer.Print(
"- (instancetype)initWithHost:(NSString *)host"
" NS_DESIGNATED_INITIALIZER;\n");
@@ -220,18 +247,13 @@ void PrintMethodImplementations(Printer *printer,
{"service_class", ServiceClassName(service)},
{"package", service->file()->package()}};
- printer.Print(vars,
- "static NSString *const kPackageName = @\"$package$\";\n");
- printer.Print(
- vars, "static NSString *const kServiceName = @\"$service_name$\";\n\n");
-
printer.Print(vars, "@implementation $service_class$\n\n");
printer.Print("// Designated initializer\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host {\n");
- printer.Print(
+ printer.Print(vars,
" return (self = [super initWithHost:host"
- " packageName:kPackageName serviceName:kServiceName]);\n");
+ " packageName:@\"$package$\" serviceName:@\"$service_name$\"]);\n");
printer.Print("}\n\n");
printer.Print(
"// Override superclass initializer to disallow different"
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index cd5ddd8832..15cda474cc 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -342,7 +342,7 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name,
out->Print("}\n");
out->Print("method_implementations = {\n");
for (auto name_and_implementation_constructor =
- method_implementation_constructors.begin();
+ method_implementation_constructors.begin();
name_and_implementation_constructor !=
method_implementation_constructors.end();
name_and_implementation_constructor++) {
@@ -457,8 +457,149 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name,
return true;
}
+bool PrintStub(const grpc::string& package_qualified_service_name,
+ const ServiceDescriptor* service, Printer* out) {
+ out->Print("\n\n");
+ out->Print("class $Service$Stub(object):\n", "Service", service->name());
+ {
+ IndentScope raii_class_indent(out);
+ PrintAllComments(service, out);
+ out->Print("\n");
+ out->Print("def __init__(self, channel):\n");
+ {
+ IndentScope raii_init_indent(out);
+ out->Print("\"\"\"Constructor.\n");
+ out->Print("\n");
+ out->Print("Args:\n");
+ {
+ IndentScope raii_args_indent(out);
+ out->Print("channel: A grpc.Channel.\n");
+ }
+ out->Print("\"\"\"\n");
+ for (int i = 0; i < service->method_count(); ++i) {
+ auto method = service->method(i);
+ auto multi_callable_constructor =
+ grpc::string(method->client_streaming() ? "stream" : "unary") +
+ "_" +
+ grpc::string(method->server_streaming() ? "stream" : "unary");
+ grpc::string request_module_and_class;
+ if (!GetModuleAndMessagePath(method->input_type(), service,
+ &request_module_and_class)) {
+ return false;
+ }
+ grpc::string response_module_and_class;
+ if (!GetModuleAndMessagePath(method->output_type(), service,
+ &response_module_and_class)) {
+ return false;
+ }
+ out->Print("self.$Method$ = channel.$MultiCallableConstructor$(\n",
+ "Method", method->name(),
+ "MultiCallableConstructor", multi_callable_constructor);
+ {
+ IndentScope raii_first_attribute_indent(out);
+ IndentScope raii_second_attribute_indent(out);
+ out->Print(
+ "'/$PackageQualifiedService$/$Method$',\n",
+ "PackageQualifiedService", package_qualified_service_name,
+ "Method", method->name());
+ out->Print(
+ "request_serializer=$RequestModuleAndClass$.SerializeToString,\n",
+ "RequestModuleAndClass", request_module_and_class);
+ out->Print(
+ "response_deserializer=$ResponseModuleAndClass$.FromString,\n",
+ "ResponseModuleAndClass", response_module_and_class);
+ out->Print(")\n");
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool PrintServicer(const ServiceDescriptor* service, Printer* out) {
+ out->Print("\n\n");
+ out->Print("class $Service$Servicer(object):\n", "Service", service->name());
+ {
+ IndentScope raii_class_indent(out);
+ PrintAllComments(service, out);
+ for (int i = 0; i < service->method_count(); ++i) {
+ auto method = service->method(i);
+ grpc::string arg_name = method->client_streaming() ?
+ "request_iterator" : "request";
+ out->Print("\n");
+ out->Print("def $Method$(self, $ArgName$, context):\n",
+ "Method", method->name(), "ArgName", arg_name);
+ {
+ IndentScope raii_method_indent(out);
+ PrintAllComments(method, out);
+ out->Print("context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n");
+ out->Print("context.set_details('Method not implemented!')\n");
+ out->Print("raise NotImplementedError('Method not implemented!')\n");
+ }
+ }
+ }
+ return true;
+}
+
+bool PrintAddServicerToServer(const grpc::string& package_qualified_service_name,
+ const ServiceDescriptor* service, Printer* out) {
+ out->Print("\n\n");
+ out->Print("def add_$Service$Servicer_to_server(servicer, server):\n",
+ "Service", service->name());
+ {
+ IndentScope raii_class_indent(out);
+ out->Print("rpc_method_handlers = {\n");
+ {
+ IndentScope raii_dict_first_indent(out);
+ IndentScope raii_dict_second_indent(out);
+ for (int i = 0; i < service->method_count(); ++i) {
+ auto method = service->method(i);
+ auto method_handler_constructor =
+ grpc::string(method->client_streaming() ? "stream" : "unary") +
+ "_" +
+ grpc::string(method->server_streaming() ? "stream" : "unary") +
+ "_rpc_method_handler";
+ grpc::string request_module_and_class;
+ if (!GetModuleAndMessagePath(method->input_type(), service,
+ &request_module_and_class)) {
+ return false;
+ }
+ grpc::string response_module_and_class;
+ if (!GetModuleAndMessagePath(method->output_type(), service,
+ &response_module_and_class)) {
+ return false;
+ }
+ out->Print("'$Method$': grpc.$MethodHandlerConstructor$(\n",
+ "Method", method->name(),
+ "MethodHandlerConstructor", method_handler_constructor);
+ {
+ IndentScope raii_call_first_indent(out);
+ IndentScope raii_call_second_indent(out);
+ out->Print("servicer.$Method$,\n", "Method", method->name());
+ out->Print("request_deserializer=$RequestModuleAndClass$.FromString,\n",
+ "RequestModuleAndClass", request_module_and_class);
+ out->Print("response_serializer=$ResponseModuleAndClass$.SerializeToString,\n",
+ "ResponseModuleAndClass", response_module_and_class);
+ }
+ out->Print("),\n");
+ }
+ }
+ out->Print("}\n");
+ out->Print("generic_handler = grpc.method_handlers_generic_handler(\n");
+ {
+ IndentScope raii_call_first_indent(out);
+ IndentScope raii_call_second_indent(out);
+ out->Print("'$PackageQualifiedServiceName$', rpc_method_handlers)\n",
+ "PackageQualifiedServiceName", package_qualified_service_name);
+ }
+ out->Print("server.add_generic_rpc_handlers((generic_handler,))\n");
+ }
+ return true;
+}
+
bool PrintPreamble(const FileDescriptor* file,
const GeneratorConfiguration& config, Printer* out) {
+ out->Print("import $Package$\n", "Package", config.grpc_package_root);
out->Print("from $Package$ import implementations as beta_implementations\n",
"Package", config.beta_package_root);
out->Print("from $Package$ import interfaces as beta_interfaces\n",
@@ -487,7 +628,10 @@ pair<bool, grpc::string> GetServices(const FileDescriptor* file,
for (int i = 0; i < file->service_count(); ++i) {
auto service = file->service(i);
auto package_qualified_service_name = package + service->name();
- if (!(PrintBetaServicer(service, &out) &&
+ if (!(PrintStub(package_qualified_service_name, service, &out) &&
+ PrintServicer(service, &out) &&
+ PrintAddServicerToServer(package_qualified_service_name, service, &out) &&
+ PrintBetaServicer(service, &out) &&
PrintBetaStub(service, &out) &&
PrintBetaServerFactory(package_qualified_service_name, service, &out) &&
PrintBetaStubFactory(package_qualified_service_name, service, &out))) {
diff --git a/src/compiler/python_generator.h b/src/compiler/python_generator.h
index e56b6790ef..fc51b48dae 100644
--- a/src/compiler/python_generator.h
+++ b/src/compiler/python_generator.h
@@ -43,6 +43,7 @@ namespace grpc_python_generator {
// Data pertaining to configuration of the generator with respect to anything
// that may be used internally at Google.
struct GeneratorConfiguration {
+ grpc::string grpc_package_root;
grpc::string beta_package_root;
};
diff --git a/src/compiler/python_plugin.cc b/src/compiler/python_plugin.cc
index 92a07b2f80..fc76ee5ec6 100644
--- a/src/compiler/python_plugin.cc
+++ b/src/compiler/python_plugin.cc
@@ -38,6 +38,7 @@
int main(int argc, char* argv[]) {
grpc_python_generator::GeneratorConfiguration config;
+ config.grpc_package_root = "grpc";
config.beta_package_root = "grpc.beta";
grpc_python_generator::PythonGrpcGenerator generator(config);
return grpc::protobuf::compiler::PluginMain(argc, argv, &generator);
diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc
index 936a186beb..1501c3f3e0 100644
--- a/src/compiler/ruby_generator.cc
+++ b/src/compiler/ruby_generator.cc
@@ -66,7 +66,9 @@ void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
std::map<grpc::string, grpc::string> method_vars =
ListToDict({"mth.name", method->name(), "input.type", input_type,
"output.type", output_type, });
+ out->Print(GetRubyComments(method, true).c_str());
out->Print(method_vars, "rpc :$mth.name$, $input.type$, $output.type$\n");
+ out->Print(GetRubyComments(method, false).c_str());
}
// Prints out the service using the ruby gRPC DSL.
@@ -82,12 +84,7 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
out->Print(module_vars, "module $module.name$\n");
out->Indent();
- // TODO(temiola): add documentation
- grpc::string doc = "TODO: add proto service documentation here";
- std::map<grpc::string, grpc::string> template_vars =
- ListToDict({"Documentation", doc, });
- out->Print("\n");
- out->Print(template_vars, "# $Documentation$\n");
+ out->Print(GetRubyComments(service, true).c_str());
out->Print("class Service\n");
// Write the indented class body.
@@ -113,6 +110,7 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
// End the service module
out->Outdent();
out->Print("end\n");
+ out->Print(GetRubyComments(service, false).c_str());
}
} // namespace
@@ -138,6 +136,12 @@ grpc::string GetServices(const FileDescriptor *file) {
out.Print(header_comment_vars,
"# Source: $file.name$ for package '$file.package$'\n");
+ grpc::string leading_comments = GetRubyComments(file, true);
+ if (!leading_comments.empty()) {
+ out.Print("# Original file comments:\n");
+ out.Print(leading_comments.c_str());
+ }
+
out.Print("\n");
out.Print("require 'grpc'\n");
// Write out require statemment to import the separately generated file
@@ -164,6 +168,8 @@ grpc::string GetServices(const FileDescriptor *file) {
out.Outdent();
out.Print("end\n");
}
+
+ out.Print(GetRubyComments(file, false).c_str());
}
return output;
}
diff --git a/src/compiler/ruby_generator_helpers-inl.h b/src/compiler/ruby_generator_helpers-inl.h
index 9da7cab3c7..ff6939ed9f 100644
--- a/src/compiler/ruby_generator_helpers-inl.h
+++ b/src/compiler/ruby_generator_helpers-inl.h
@@ -35,6 +35,7 @@
#define GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_HELPERS_INL_H
#include "src/compiler/config.h"
+#include "src/compiler/generator_helpers.h"
#include "src/compiler/ruby_generator_string-inl.h"
namespace grpc_ruby_generator {
@@ -60,6 +61,13 @@ inline grpc::string MessagesRequireName(
return Replace(file->name(), ".proto", "");
}
+// Get leading or trailing comments in a string. Comment lines start with "# ".
+// Leading detached comments are put in in front of leading comments.
+template <typename DescriptorType>
+inline grpc::string GetRubyComments(const DescriptorType *desc, bool leading) {
+ return grpc_generator::GetPrefixedComments(desc, leading, "#");
+}
+
} // namespace grpc_ruby_generator
#endif // GRPC_INTERNAL_COMPILER_RUBY_GENERATOR_HELPERS_INL_H
diff --git a/src/core/ext/census/gen/README.md b/src/core/ext/census/gen/README.md
new file mode 100644
index 0000000000..72bef6542d
--- /dev/null
+++ b/src/core/ext/census/gen/README.md
@@ -0,0 +1,6 @@
+Files generated for use by Census stats and trace recording subsystem.
+
+#Files
+* census.pb.{h,c} - Generated from src/core/ext/census/census.proto, using the
+ script `tools/codegen/core/gen_nano_proto.sh src/proto/census/census.proto
+ $PWD/src/core/ext/census/gen src/core/ext/census/gen`
diff --git a/src/core/ext/census/gen/census.pb.c b/src/core/ext/census/gen/census.pb.c
new file mode 100644
index 0000000000..d614636c90
--- /dev/null
+++ b/src/core/ext/census/gen/census.pb.c
@@ -0,0 +1,179 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb constant definitions */
+/* Generated by nanopb-0.3.5-dev */
+
+#include "src/core/ext/census/gen/census.pb.h"
+
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+
+
+const pb_field_t google_census_Duration_fields[3] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Duration, seconds, seconds, 0),
+ PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Duration, nanos, seconds, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Timestamp_fields[3] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Timestamp, seconds, seconds, 0),
+ PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Timestamp, nanos, seconds, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Metric_fields[5] = {
+ PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Metric, name, name, 0),
+ PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Metric, description, name, 0),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, unit, description, &google_census_Metric_MeasurementUnit_fields),
+ PB_FIELD( 4, INT32 , OPTIONAL, STATIC , OTHER, google_census_Metric, id, unit, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Metric_BasicUnit_fields[2] = {
+ PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, google_census_Metric_BasicUnit, type, type, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Metric_MeasurementUnit_fields[4] = {
+ PB_FIELD( 1, INT32 , OPTIONAL, STATIC , FIRST, google_census_Metric_MeasurementUnit, prefix, prefix, 0),
+ PB_FIELD( 2, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Metric_MeasurementUnit, numerator, prefix, &google_census_Metric_BasicUnit_fields),
+ PB_FIELD( 3, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Metric_MeasurementUnit, denominator, numerator, &google_census_Metric_BasicUnit_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_AggregationDescriptor_fields[3] = {
+ PB_ONEOF_FIELD(options, 1, MESSAGE , ONEOF, STATIC , FIRST, google_census_AggregationDescriptor, bucket_boundaries, bucket_boundaries, &google_census_AggregationDescriptor_BucketBoundaries_fields),
+ PB_ONEOF_FIELD(options, 2, MESSAGE , ONEOF, STATIC , FIRST, google_census_AggregationDescriptor, interval_boundaries, interval_boundaries, &google_census_AggregationDescriptor_IntervalBoundaries_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2] = {
+ PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_BucketBoundaries, bounds, bounds, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2] = {
+ PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_IntervalBoundaries, window_size, window_size, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Distribution_fields[5] = {
+ PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Distribution, count, count, 0),
+ PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution, mean, count, 0),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Distribution, range, mean, &google_census_Distribution_Range_fields),
+ PB_FIELD( 4, INT64 , REPEATED, CALLBACK, OTHER, google_census_Distribution, bucket_count, range, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Distribution_Range_fields[3] = {
+ PB_FIELD( 1, DOUBLE , OPTIONAL, STATIC , FIRST, google_census_Distribution_Range, min, min, 0),
+ PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution_Range, max, min, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_IntervalStats_fields[2] = {
+ PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, google_census_IntervalStats, window, window, &google_census_IntervalStats_Window_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_IntervalStats_Window_fields[4] = {
+ PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_census_IntervalStats_Window, window_size, window_size, &google_census_Duration_fields),
+ PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, count, window_size, 0),
+ PB_FIELD( 3, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, mean, count, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Tag_fields[3] = {
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, google_census_Tag, key, key, 0),
+ PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, google_census_Tag, value, key, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_View_fields[6] = {
+ PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_View, name, name, 0),
+ PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, description, name, 0),
+ PB_FIELD( 3, INT32 , OPTIONAL, STATIC , OTHER, google_census_View, metric_id, description, 0),
+ PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_View, aggregation, metric_id, &google_census_AggregationDescriptor_fields),
+ PB_FIELD( 5, STRING , REPEATED, CALLBACK, OTHER, google_census_View, tag_key, aggregation, 0),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_Aggregation_fields[6] = {
+ PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Aggregation, name, name, 0),
+ PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Aggregation, description, name, 0),
+ PB_ONEOF_FIELD(data, 3, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, distribution, description, &google_census_Distribution_fields),
+ PB_ONEOF_FIELD(data, 4, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, interval_stats, description, &google_census_IntervalStats_fields),
+ PB_FIELD( 5, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Aggregation, tag, data.interval_stats, &google_census_Tag_fields),
+ PB_LAST_FIELD
+};
+
+const pb_field_t google_census_ViewAggregations_fields[4] = {
+ PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, google_census_ViewAggregations, aggregation, aggregation, &google_census_Aggregation_fields),
+ PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_ViewAggregations, start, aggregation, &google_census_Timestamp_fields),
+ PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_ViewAggregations, end, start, &google_census_Timestamp_fields),
+ PB_LAST_FIELD
+};
+
+
+/* Check that field information fits in pb_field_t */
+#if !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_32BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in 8 or 16 bit
+ * field descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(google_census_Metric, unit) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Metric, unit) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Metric, unit) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Metric, unit) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 65536 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Aggregation, tag) < 65536 && pb_membersize(google_census_ViewAggregations, aggregation) < 65536 && pb_membersize(google_census_ViewAggregations, start) < 65536 && pb_membersize(google_census_ViewAggregations, end) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Metric_google_census_Metric_BasicUnit_google_census_Metric_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_ViewAggregations)
+#endif
+
+#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
+/* If you get an error here, it means that you need to define PB_FIELD_16BIT
+ * compile-time option. You can do that in pb.h or on compiler command line.
+ *
+ * The reason you need to do this is that some of your messages contain tag
+ * numbers or field sizes that are larger than what can fit in the default
+ * 8 bit descriptors.
+ */
+PB_STATIC_ASSERT((pb_membersize(google_census_Metric, unit) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Metric, unit) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Metric, unit) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Metric, unit) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, numerator) < 256 && pb_membersize(google_census_Metric_MeasurementUnit, denominator) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Aggregation, tag) < 256 && pb_membersize(google_census_ViewAggregations, aggregation) < 256 && pb_membersize(google_census_ViewAggregations, start) < 256 && pb_membersize(google_census_ViewAggregations, end) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Metric_google_census_Metric_BasicUnit_google_census_Metric_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_ViewAggregations)
+#endif
+
+
+/* On some platforms (such as AVR), double is really float.
+ * These are not directly supported by nanopb, but see example_avr_double.
+ * To get rid of this error, remove any double fields from your .proto.
+ */
+PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
+
diff --git a/src/core/ext/census/gen/census.pb.h b/src/core/ext/census/gen/census.pb.h
new file mode 100644
index 0000000000..d040fe29e7
--- /dev/null
+++ b/src/core/ext/census/gen/census.pb.h
@@ -0,0 +1,294 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/* Automatically generated nanopb header */
+/* Generated by nanopb-0.3.5-dev */
+
+#ifndef GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
+#define GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
+#include "third_party/nanopb/pb.h"
+#if PB_PROTO_HEADER_VERSION != 30
+#error Regenerate this file with the current version of nanopb generator.
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Enum definitions */
+typedef enum _google_census_Metric_BasicUnit_Measure {
+ google_census_Metric_BasicUnit_Measure_UNKNOWN = 0,
+ google_census_Metric_BasicUnit_Measure_BITS = 1,
+ google_census_Metric_BasicUnit_Measure_BYTES = 2,
+ google_census_Metric_BasicUnit_Measure_SECS = 3,
+ google_census_Metric_BasicUnit_Measure_CORES = 4,
+ google_census_Metric_BasicUnit_Measure_MAX_UNITS = 5
+} google_census_Metric_BasicUnit_Measure;
+
+/* Struct definitions */
+typedef struct _google_census_AggregationDescriptor_BucketBoundaries {
+ pb_callback_t bounds;
+} google_census_AggregationDescriptor_BucketBoundaries;
+
+typedef struct _google_census_AggregationDescriptor_IntervalBoundaries {
+ pb_callback_t window_size;
+} google_census_AggregationDescriptor_IntervalBoundaries;
+
+typedef struct _google_census_IntervalStats {
+ pb_callback_t window;
+} google_census_IntervalStats;
+
+typedef struct _google_census_AggregationDescriptor {
+ pb_size_t which_options;
+ union {
+ google_census_AggregationDescriptor_BucketBoundaries bucket_boundaries;
+ google_census_AggregationDescriptor_IntervalBoundaries interval_boundaries;
+ } options;
+} google_census_AggregationDescriptor;
+
+typedef struct _google_census_Distribution_Range {
+ bool has_min;
+ double min;
+ bool has_max;
+ double max;
+} google_census_Distribution_Range;
+
+typedef struct _google_census_Duration {
+ bool has_seconds;
+ int64_t seconds;
+ bool has_nanos;
+ int32_t nanos;
+} google_census_Duration;
+
+typedef struct _google_census_Metric_BasicUnit {
+ bool has_type;
+ google_census_Metric_BasicUnit_Measure type;
+} google_census_Metric_BasicUnit;
+
+typedef struct _google_census_Metric_MeasurementUnit {
+ bool has_prefix;
+ int32_t prefix;
+ pb_callback_t numerator;
+ pb_callback_t denominator;
+} google_census_Metric_MeasurementUnit;
+
+typedef struct _google_census_Tag {
+ bool has_key;
+ char key[255];
+ bool has_value;
+ char value[255];
+} google_census_Tag;
+
+typedef struct _google_census_Timestamp {
+ bool has_seconds;
+ int64_t seconds;
+ bool has_nanos;
+ int32_t nanos;
+} google_census_Timestamp;
+
+typedef struct _google_census_Distribution {
+ bool has_count;
+ int64_t count;
+ bool has_mean;
+ double mean;
+ bool has_range;
+ google_census_Distribution_Range range;
+ pb_callback_t bucket_count;
+} google_census_Distribution;
+
+typedef struct _google_census_IntervalStats_Window {
+ bool has_window_size;
+ google_census_Duration window_size;
+ bool has_count;
+ int64_t count;
+ bool has_mean;
+ double mean;
+} google_census_IntervalStats_Window;
+
+typedef struct _google_census_Metric {
+ pb_callback_t name;
+ pb_callback_t description;
+ bool has_unit;
+ google_census_Metric_MeasurementUnit unit;
+ bool has_id;
+ int32_t id;
+} google_census_Metric;
+
+typedef struct _google_census_View {
+ pb_callback_t name;
+ pb_callback_t description;
+ bool has_metric_id;
+ int32_t metric_id;
+ bool has_aggregation;
+ google_census_AggregationDescriptor aggregation;
+ pb_callback_t tag_key;
+} google_census_View;
+
+typedef struct _google_census_ViewAggregations {
+ pb_callback_t aggregation;
+ bool has_start;
+ google_census_Timestamp start;
+ bool has_end;
+ google_census_Timestamp end;
+} google_census_ViewAggregations;
+
+typedef struct _google_census_Aggregation {
+ pb_callback_t name;
+ pb_callback_t description;
+ pb_size_t which_data;
+ union {
+ google_census_Distribution distribution;
+ google_census_IntervalStats interval_stats;
+ } data;
+ pb_callback_t tag;
+} google_census_Aggregation;
+
+/* Default values for struct fields */
+
+/* Initializer values for message structs */
+#define google_census_Duration_init_default {false, 0, false, 0}
+#define google_census_Timestamp_init_default {false, 0, false, 0}
+#define google_census_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Metric_MeasurementUnit_init_default, false, 0}
+#define google_census_Metric_BasicUnit_init_default {false, (google_census_Metric_BasicUnit_Measure)0}
+#define google_census_Metric_MeasurementUnit_init_default {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
+#define google_census_AggregationDescriptor_init_default {0, {google_census_AggregationDescriptor_BucketBoundaries_init_default}}
+#define google_census_AggregationDescriptor_BucketBoundaries_init_default {{{NULL}, NULL}}
+#define google_census_AggregationDescriptor_IntervalBoundaries_init_default {{{NULL}, NULL}}
+#define google_census_Distribution_init_default {false, 0, false, 0, false, google_census_Distribution_Range_init_default, {{NULL}, NULL}}
+#define google_census_Distribution_Range_init_default {false, 0, false, 0}
+#define google_census_IntervalStats_init_default {{{NULL}, NULL}}
+#define google_census_IntervalStats_Window_init_default {false, google_census_Duration_init_default, false, 0, false, 0}
+#define google_census_Tag_init_default {false, "", false, ""}
+#define google_census_View_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, 0, false, google_census_AggregationDescriptor_init_default, {{NULL}, NULL}}
+#define google_census_Aggregation_init_default {{{NULL}, NULL}, {{NULL}, NULL}, 0, {google_census_Distribution_init_default}, {{NULL}, NULL}}
+#define google_census_ViewAggregations_init_default {{{NULL}, NULL}, false, google_census_Timestamp_init_default, false, google_census_Timestamp_init_default}
+#define google_census_Duration_init_zero {false, 0, false, 0}
+#define google_census_Timestamp_init_zero {false, 0, false, 0}
+#define google_census_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Metric_MeasurementUnit_init_zero, false, 0}
+#define google_census_Metric_BasicUnit_init_zero {false, (google_census_Metric_BasicUnit_Measure)0}
+#define google_census_Metric_MeasurementUnit_init_zero {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
+#define google_census_AggregationDescriptor_init_zero {0, {google_census_AggregationDescriptor_BucketBoundaries_init_zero}}
+#define google_census_AggregationDescriptor_BucketBoundaries_init_zero {{{NULL}, NULL}}
+#define google_census_AggregationDescriptor_IntervalBoundaries_init_zero {{{NULL}, NULL}}
+#define google_census_Distribution_init_zero {false, 0, false, 0, false, google_census_Distribution_Range_init_zero, {{NULL}, NULL}}
+#define google_census_Distribution_Range_init_zero {false, 0, false, 0}
+#define google_census_IntervalStats_init_zero {{{NULL}, NULL}}
+#define google_census_IntervalStats_Window_init_zero {false, google_census_Duration_init_zero, false, 0, false, 0}
+#define google_census_Tag_init_zero {false, "", false, ""}
+#define google_census_View_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, 0, false, google_census_AggregationDescriptor_init_zero, {{NULL}, NULL}}
+#define google_census_Aggregation_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, 0, {google_census_Distribution_init_zero}, {{NULL}, NULL}}
+#define google_census_ViewAggregations_init_zero {{{NULL}, NULL}, false, google_census_Timestamp_init_zero, false, google_census_Timestamp_init_zero}
+
+/* Field tags (for use in manual encoding/decoding) */
+#define google_census_AggregationDescriptor_BucketBoundaries_bounds_tag 1
+#define google_census_AggregationDescriptor_IntervalBoundaries_window_size_tag 1
+#define google_census_IntervalStats_window_tag 1
+#define google_census_AggregationDescriptor_bucket_boundaries_tag 1
+
+#define google_census_AggregationDescriptor_interval_boundaries_tag 2
+#define google_census_Distribution_Range_min_tag 1
+#define google_census_Distribution_Range_max_tag 2
+#define google_census_Duration_seconds_tag 1
+#define google_census_Duration_nanos_tag 2
+#define google_census_Metric_BasicUnit_type_tag 1
+#define google_census_Metric_MeasurementUnit_prefix_tag 1
+#define google_census_Metric_MeasurementUnit_numerator_tag 2
+#define google_census_Metric_MeasurementUnit_denominator_tag 3
+#define google_census_Tag_key_tag 1
+#define google_census_Tag_value_tag 2
+#define google_census_Timestamp_seconds_tag 1
+#define google_census_Timestamp_nanos_tag 2
+#define google_census_Distribution_count_tag 1
+#define google_census_Distribution_mean_tag 2
+#define google_census_Distribution_range_tag 3
+#define google_census_Distribution_bucket_count_tag 4
+#define google_census_IntervalStats_Window_window_size_tag 1
+#define google_census_IntervalStats_Window_count_tag 2
+#define google_census_IntervalStats_Window_mean_tag 3
+#define google_census_Metric_name_tag 1
+#define google_census_Metric_description_tag 2
+#define google_census_Metric_unit_tag 3
+#define google_census_Metric_id_tag 4
+#define google_census_View_name_tag 1
+#define google_census_View_description_tag 2
+#define google_census_View_metric_id_tag 3
+#define google_census_View_aggregation_tag 4
+#define google_census_View_tag_key_tag 5
+#define google_census_ViewAggregations_aggregation_tag 1
+#define google_census_ViewAggregations_start_tag 2
+#define google_census_ViewAggregations_end_tag 3
+#define google_census_Aggregation_distribution_tag 3
+
+#define google_census_Aggregation_interval_stats_tag 4
+#define google_census_Aggregation_name_tag 1
+#define google_census_Aggregation_description_tag 2
+#define google_census_Aggregation_tag_tag 5
+
+/* Struct field encoding specification for nanopb */
+extern const pb_field_t google_census_Duration_fields[3];
+extern const pb_field_t google_census_Timestamp_fields[3];
+extern const pb_field_t google_census_Metric_fields[5];
+extern const pb_field_t google_census_Metric_BasicUnit_fields[2];
+extern const pb_field_t google_census_Metric_MeasurementUnit_fields[4];
+extern const pb_field_t google_census_AggregationDescriptor_fields[3];
+extern const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2];
+extern const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2];
+extern const pb_field_t google_census_Distribution_fields[5];
+extern const pb_field_t google_census_Distribution_Range_fields[3];
+extern const pb_field_t google_census_IntervalStats_fields[2];
+extern const pb_field_t google_census_IntervalStats_Window_fields[4];
+extern const pb_field_t google_census_Tag_fields[3];
+extern const pb_field_t google_census_View_fields[6];
+extern const pb_field_t google_census_Aggregation_fields[6];
+extern const pb_field_t google_census_ViewAggregations_fields[4];
+
+/* Maximum encoded size of messages (where known) */
+#define google_census_Duration_size 22
+#define google_census_Timestamp_size 22
+#define google_census_Metric_BasicUnit_size 2
+#define google_census_Distribution_Range_size 18
+#define google_census_IntervalStats_Window_size 44
+#define google_census_Tag_size 516
+
+/* Message IDs (where set with "msgid" option) */
+#ifdef PB_MSGID
+
+#define CENSUS_MESSAGES \
+
+
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index df7016efe3..72e4e5427e 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -134,7 +134,9 @@ static void client_init_call_elem(grpc_exec_ctx *exec_ctx,
}
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem, void *ignored) {
+ grpc_call_element *elem,
+ const grpc_call_stats *stats,
+ void *ignored) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
@@ -152,7 +154,9 @@ static void server_init_call_elem(grpc_exec_ctx *exec_ctx,
}
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem, void *ignored) {
+ grpc_call_element *elem,
+ const grpc_call_stats *stats,
+ void *ignored) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
@@ -176,7 +180,7 @@ const grpc_channel_filter grpc_client_census_filter = {
grpc_channel_next_op,
sizeof(call_data),
client_init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
client_destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
@@ -189,7 +193,7 @@ const grpc_channel_filter grpc_server_census_filter = {
grpc_channel_next_op,
sizeof(call_data),
server_init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
server_destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/ext/client_config/channel_connectivity.c b/src/core/ext/client_config/channel_connectivity.c
index b452cc33ac..20c01a9a7c 100644
--- a/src/core/ext/client_config/channel_connectivity.c
+++ b/src/core/ext/client_config/channel_connectivity.c
@@ -62,7 +62,7 @@ grpc_connectivity_state grpc_channel_check_connectivity_state(
"not a (u)client channel, but '%s'",
client_channel_elem->filter->name);
grpc_exec_ctx_finish(&exec_ctx);
- return GRPC_CHANNEL_FATAL_FAILURE;
+ return GRPC_CHANNEL_SHUTDOWN;
}
typedef enum {
@@ -75,7 +75,6 @@ typedef enum {
typedef struct {
gpr_mu mu;
callback_phase phase;
- grpc_error *error;
grpc_closure on_complete;
grpc_timer alarm;
grpc_connectivity_state state;
@@ -95,7 +94,6 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
abort();
}
gpr_mu_destroy(&w->mu);
- GRPC_ERROR_UNREF(w->error);
gpr_free(w);
}
@@ -131,17 +129,25 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
}
gpr_mu_lock(&w->mu);
- const char *msg = grpc_error_string(error);
- grpc_error_free_string(msg);
if (due_to_completion) {
- GRPC_ERROR_UNREF(w->error);
- w->error = GRPC_ERROR_NONE;
+ if (grpc_trace_operation_failures) {
+ GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
+ }
+ GRPC_ERROR_UNREF(error);
+ error = GRPC_ERROR_NONE;
+ } else {
+ if (error == GRPC_ERROR_NONE) {
+ error =
+ GRPC_ERROR_CREATE("Timed out waiting for connection state change");
+ } else if (error == GRPC_ERROR_CANCELLED) {
+ error = GRPC_ERROR_NONE;
+ }
}
switch (w->phase) {
case WAITING:
w->phase = CALLING_BACK;
- grpc_cq_end_op(exec_ctx, w->cq, w->tag, GRPC_ERROR_REF(w->error),
+ grpc_cq_end_op(exec_ctx, w->cq, w->tag, GRPC_ERROR_REF(error),
finished_completion, w, &w->completion_storage);
break;
case CALLING_BACK:
@@ -194,7 +200,6 @@ void grpc_channel_watch_connectivity_state(
grpc_closure_init(&w->on_complete, watch_complete, w);
w->phase = WAITING;
w->state = last_observed_state;
- w->error = GRPC_ERROR_CREATE("Timeout waiting for channel state");
w->cq = cq;
w->tag = tag;
w->channel = channel;
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_config/client_channel.c
index d00b966812..1d5a7d5224 100644
--- a/src/core/ext/client_config/client_channel.c
+++ b/src/core/ext/client_config/client_channel.c
@@ -120,7 +120,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
grpc_error *error,
const char *reason) {
if ((state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
- state == GRPC_CHANNEL_FATAL_FAILURE) &&
+ state == GRPC_CHANNEL_SHUTDOWN) &&
chand->lb_policy != NULL) {
/* cancel fail-fast picks */
grpc_lb_policy_cancel_picks(
@@ -139,8 +139,7 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
/* check if the notification is for a stale policy */
if (w->lb_policy != w->chand->lb_policy) return;
- if (publish_state == GRPC_CHANNEL_FATAL_FAILURE &&
- w->chand->resolver != NULL) {
+ if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
@@ -148,7 +147,7 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
}
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
GRPC_ERROR_REF(error), "lb_changed");
- if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
+ if (w->state != GRPC_CHANNEL_SHUTDOWN) {
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
}
}
@@ -246,7 +245,7 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_error *refs[] = {error, state_error};
set_channel_connectivity_state_locked(
- exec_ctx, chand, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
GPR_ARRAY_SIZE(refs)),
"resolver_gone");
@@ -277,7 +276,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
- grpc_exec_ctx_push(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -296,9 +295,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
- grpc_exec_ctx_push(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("Ping with no load balancing"),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE("Ping with no load balancing"),
+ NULL);
} else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
@@ -309,7 +308,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (chand->resolver != NULL) {
set_channel_connectivity_state_locked(
- exec_ctx, chand, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
grpc_resolver_shutdown(exec_ctx, chand->resolver);
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
@@ -354,11 +353,11 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_push(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
+ grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->initial_metadata_flags,
cpa->connected_subchannel, cpa->on_ready)) {
- grpc_exec_ctx_push(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
}
gpr_free(cpa);
}
@@ -387,8 +386,8 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
- grpc_exec_ctx_push(exec_ctx, cpa->on_ready,
- GRPC_ERROR_CREATE("Pick cancelled"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, cpa->on_ready,
+ GRPC_ERROR_CREATE("Pick cancelled"), NULL);
}
}
gpr_mu_unlock(&chand->mu_config);
@@ -399,7 +398,7 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
int r;
GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
gpr_mu_unlock(&chand->mu_config);
- r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollset,
+ r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollent,
initial_metadata, initial_metadata_flags,
connected_subchannel, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
@@ -423,8 +422,8 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE);
} else {
- grpc_exec_ctx_push(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
+ NULL);
}
gpr_mu_unlock(&chand->mu_config);
return 0;
@@ -439,6 +438,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ const grpc_call_stats *stats,
void *and_free_memory) {
grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
gpr_free(and_free_memory);
@@ -484,10 +484,11 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
gpr_mu_destroy(&chand->mu_config);
}
-static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_pollset *pollset) {
+static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_polling_entity *pollent) {
call_data *calld = elem->call_data;
- calld->pollset = pollset;
+ calld->pollent = pollent;
}
const grpc_channel_filter grpc_client_channel_filter = {
@@ -495,7 +496,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_op,
sizeof(call_data),
init_call_elem,
- cc_set_pollset,
+ cc_set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/ext/client_config/connector.h b/src/core/ext/client_config/connector.h
index dd85dfcb7d..ea9d23706e 100644
--- a/src/core/ext/client_config/connector.h
+++ b/src/core/ext/client_config/connector.h
@@ -64,7 +64,7 @@ typedef struct {
grpc_transport *transport;
/** channel arguments (to be passed to the filters) */
- const grpc_channel_args *channel_args;
+ grpc_channel_args *channel_args;
} grpc_connect_out_args;
struct grpc_connector_vtable {
diff --git a/src/core/ext/client_config/lb_policy.c b/src/core/ext/client_config/lb_policy.c
index fcff0c9a1b..8b980b2cca 100644
--- a/src/core/ext/client_config/lb_policy.c
+++ b/src/core/ext/client_config/lb_policy.c
@@ -60,8 +60,9 @@ static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "LB_POLICY: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
- old_val + delta, reason);
+ "LB_POLICY: 0x%" PRIxPTR " %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR
+ " [%s]",
+ (intptr_t)c, purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}
@@ -99,12 +100,12 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
}
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **target,
grpc_closure *on_complete) {
- return policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata,
+ return policy->vtable->pick(exec_ctx, policy, pollent, initial_metadata,
initial_metadata_flags, target, on_complete);
}
diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_config/lb_policy.h
index 13b9abc474..3cfd041d3a 100644
--- a/src/core/ext/client_config/lb_policy.h
+++ b/src/core/ext/client_config/lb_policy.h
@@ -35,6 +35,7 @@
#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H
#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
/** A load balancing policy: specified by a vtable and a struct (which
@@ -59,7 +60,8 @@ struct grpc_lb_policy_vtable {
/** implement grpc_lb_policy_pick */
int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_polling_entity *pollent,
+ grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **target, grpc_closure *on_complete);
void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
@@ -125,7 +127,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
\a target.
Picking can be asynchronous. Any IO should be done under \a pollset. */
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **target,
diff --git a/src/core/ext/client_config/subchannel.c b/src/core/ext/client_config/subchannel.c
index dd61caea94..468067ea57 100644
--- a/src/core/ext/client_config/subchannel.c
+++ b/src/core/ext/client_config/subchannel.c
@@ -290,8 +290,8 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
- grpc_exec_ctx_push(exec_ctx, grpc_closure_create(subchannel_destroy, c),
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c),
+ GRPC_ERROR_NONE, NULL);
}
}
@@ -481,12 +481,12 @@ static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
/* if we failed just leave this closure */
if (sw->connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* any errors on a subchannel ==> we're done, create a new one */
- sw->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
+ sw->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
}
grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
sw->connectivity_state, GRPC_ERROR_REF(error),
"reflect_child");
- if (sw->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
+ if (sw->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, GET_CONNECTED_SUBCHANNEL(c, no_barrier), NULL,
&sw->connectivity_state, &sw->closure);
@@ -621,6 +621,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_subchannel *c = arg;
+ grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
gpr_mu_lock(&c->mu);
@@ -638,13 +639,20 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
"connect_failed");
gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
const char *errmsg = grpc_error_string(error);
- gpr_log(GPR_INFO, "Connect failed, retry in %d.%09d seconds: %s",
- time_til_next.tv_sec, time_til_next.tv_nsec, errmsg);
- grpc_error_free_string(errmsg);
+ gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
+ if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
+ 0) {
+ gpr_log(GPR_INFO, "Retry immediately");
+ } else {
+ gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
+ time_til_next.tv_sec, time_til_next.tv_nsec);
+ }
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, on_alarm, c, now);
+ grpc_error_free_string(errmsg);
}
gpr_mu_unlock(&c->mu);
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+ grpc_channel_args_destroy(delete_channel_args);
}
/*
@@ -656,7 +664,7 @@ static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_subchannel_call *c = call;
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
- grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), c);
+ grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL, c);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call");
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
@@ -694,7 +702,7 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_subchannel_call *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_pollset *pollset) {
+ grpc_polling_entity *pollent) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
grpc_subchannel_call *call =
gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
@@ -703,7 +711,7 @@ grpc_subchannel_call *grpc_connected_subchannel_create_call(
GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, call,
NULL, NULL, callstk);
- grpc_call_stack_set_pollset(exec_ctx, callstk, pollset);
+ grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, pollent);
return call;
}
diff --git a/src/core/ext/client_config/subchannel.h b/src/core/ext/client_config/subchannel.h
index e73f394584..b6d39f5dc5 100644
--- a/src/core/ext/client_config/subchannel.h
+++ b/src/core/ext/client_config/subchannel.h
@@ -36,6 +36,7 @@
#include "src/core/ext/client_config/connector.h"
#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
/** A (sub-)channel that knows how to connect to exactly one target
@@ -109,7 +110,7 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_subchannel_call *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_pollset *pollset);
+ grpc_polling_entity *pollent);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
diff --git a/src/core/ext/client_config/subchannel_call_holder.c b/src/core/ext/client_config/subchannel_call_holder.c
index 07b7813482..e31800edd9 100644
--- a/src/core/ext/client_config/subchannel_call_holder.c
+++ b/src/core/ext/client_config/subchannel_call_holder.c
@@ -68,6 +68,7 @@ void grpc_subchannel_call_holder_init(
holder->waiting_ops_capacity = 0;
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
holder->owning_call = owning_call;
+ holder->pollent = NULL;
}
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
@@ -163,7 +164,7 @@ retry:
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
- exec_ctx, holder->connected_subchannel, holder->pollset));
+ exec_ctx, holder->connected_subchannel, holder->pollent));
retry_waiting_locked(exec_ctx, holder);
goto retry;
}
@@ -194,7 +195,7 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
- exec_ctx, holder->connected_subchannel, holder->pollset));
+ exec_ctx, holder->connected_subchannel, holder->pollent));
retry_waiting_locked(exec_ctx, holder);
}
gpr_mu_unlock(&holder->mu);
@@ -222,8 +223,8 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
- grpc_exec_ctx_push(exec_ctx, grpc_closure_create(retry_ops, a),
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
+ GRPC_ERROR_NONE, NULL);
}
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
diff --git a/src/core/ext/client_config/subchannel_call_holder.h b/src/core/ext/client_config/subchannel_call_holder.h
index 9299908788..8d2deb02f3 100644
--- a/src/core/ext/client_config/subchannel_call_holder.h
+++ b/src/core/ext/client_config/subchannel_call_holder.h
@@ -35,6 +35,7 @@
#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_CALL_HOLDER_H
#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/lib/iomgr/polling_entity.h"
/** Pick a subchannel for grpc_subchannel_call_holder;
Return 1 if subchannel is available immediately (in which case on_ready
@@ -71,7 +72,7 @@ typedef struct grpc_subchannel_call_holder {
grpc_subchannel_call_holder_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
- grpc_pollset *pollset;
+ grpc_polling_entity *pollent;
grpc_transport_stream_op *waiting_ops;
size_t waiting_ops_count;
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index d5dc39ab94..46fe588f72 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -33,8 +33,8 @@
/* Automatically generated nanopb header */
/* Generated by nanopb-0.3.5-dev */
-#ifndef PB_LOAD_BALANCER_PB_H_INCLUDED
-#define PB_LOAD_BALANCER_PB_H_INCLUDED
+#ifndef GRPC_CORE_EXT_LB_POLICY_GRPCLB_PROTO_GRPC_LB_V1_LOAD_BALANCER_PB_H
+#define GRPC_CORE_EXT_LB_POLICY_GRPCLB_PROTO_GRPC_LB_V1_LOAD_BALANCER_PB_H
#include "third_party/nanopb/pb.h"
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index b5c2c426de..9decf70692 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -39,7 +39,7 @@
typedef struct pending_pick {
struct pending_pick *next;
- grpc_pollset *pollset;
+ grpc_polling_entity *pollent;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
@@ -104,7 +104,7 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pp = p->pending_picks;
p->pending_picks = NULL;
grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
/* cancel subscription */
if (selected != NULL) {
@@ -119,9 +119,9 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
pp = next;
}
@@ -137,11 +137,11 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
*target = NULL;
- grpc_exec_ctx_push(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE("Pick Cancelled"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE("Pick Cancelled"), NULL);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -164,10 +164,10 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
- grpc_exec_ctx_push(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE("Pick Cancelled"), NULL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE("Pick Cancelled"), NULL);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -199,7 +199,8 @@ static void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_polling_entity *pollent,
+ grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **target,
grpc_closure *on_complete) {
@@ -224,10 +225,11 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, pollent,
+ p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
- pp->pollset = pollset;
+ pp->pollent = pollent;
pp->target = target;
pp->initial_metadata_flags = initial_metadata_flags;
pp->on_complete = on_complete;
@@ -279,12 +281,12 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
} else if (selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
- p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
+ p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
p->checking_connectivity, GRPC_ERROR_REF(error),
"selected_changed");
- if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
+ if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, selected, p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
@@ -306,16 +308,16 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
- grpc_exec_ctx_push(exec_ctx,
- grpc_closure_create(destroy_subchannels, p),
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx,
+ grpc_closure_create(destroy_subchannels, p),
+ GRPC_ERROR_NONE, NULL);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = selected;
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
@@ -353,7 +355,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
break;
- case GRPC_CHANNEL_FATAL_FAILURE:
+ case GRPC_CHANNEL_SHUTDOWN:
p->num_subchannels--;
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels]);
@@ -361,15 +363,15 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"pick_first");
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING("Pick first exhausted channels",
&error, 1),
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
+ NULL);
gpr_free(pp);
}
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@@ -421,8 +423,8 @@ static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (selected) {
grpc_connected_subchannel_ping(exec_ctx, selected, closure);
} else {
- grpc_exec_ctx_push(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"),
+ NULL);
}
}
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 95305e9cdd..7bcf608ab9 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -31,6 +31,34 @@
*
*/
+/** Round Robin Policy.
+ *
+ * This policy keeps:
+ * - A circular list of ready (connected) subchannels, the *readylist*. An empty
+ * readylist consists solely of its root (dummy) node.
+ * - A pointer to the last element picked from the readylist, the *lastpick*.
+ * Initially set to point to the readylist's root.
+ *
+ * Behavior:
+ * - When a subchannel connects, it's *prepended* to the readylist's root node.
+ * Ie, if readylist = A <-> B <-> ROOT <-> C
+ * ^ ^
+ * |____________________|
+ * and subchannel D becomes connected, the addition of D to the readylist
+ * results in readylist = A <-> B <-> D <-> ROOT <-> C
+ * ^ ^
+ * |__________________________|
+ * - When a subchannel disconnects, it's removed from the readylist. If the
+ * subchannel being removed was the most recently picked, the *lastpick*
+ * pointer moves to the removed node's previous element. Note that if the
+ * readylist only had one element, this is still legal, as the lastpick would
+ * point to the dummy root node, for an empty readylist.
+ * - Upon picking, *lastpick* is updated to point to the returned (connected)
+ * subchannel. Note that it's possible that the selected subchannel becomes
+ * disconnected in the interim between the selection and the actual usage of
+ * the subchannel by the caller.
+ */
+
#include <string.h>
#include <grpc/support/alloc.h>
@@ -48,7 +76,7 @@ int grpc_lb_round_robin_trace = 0;
* Once a pick is available, \a target is updated and \a on_complete called. */
typedef struct pending_pick {
struct pending_pick *next;
- grpc_pollset *pollset;
+ grpc_polling_entity *pollent;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
@@ -173,9 +201,7 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
return;
}
if (node == p->ready_list_last_pick) {
- /* If removing the lastly picked node, reset the last pick pointer to the
- * dummy root of the list */
- p->ready_list_last_pick = &p->ready_list;
+ p->ready_list_last_pick = p->ready_list_last_pick->prev;
}
/* removing last item */
@@ -239,12 +265,12 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_push(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE("Channel Shutdown"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE("Channel Shutdown"), NULL);
gpr_free(pp);
}
grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel Shutdown"), "shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
@@ -264,10 +290,11 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
*target = NULL;
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_CANCELLED,
+ NULL);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -290,10 +317,11 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
*pp->target = NULL;
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_CANCELLED,
+ NULL);
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -309,7 +337,7 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
p->started_picking = 1;
if (grpc_lb_round_robin_trace) {
- gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
+ gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%" PRIuPTR, p,
p->num_subchannels);
}
@@ -333,7 +361,8 @@ static void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
+ grpc_polling_entity *pollent,
+ grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **target,
grpc_closure *on_complete) {
@@ -346,8 +375,8 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
*target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG,
- "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
- selected->subchannel, selected);
+ "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)", *target,
+ selected);
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
@@ -356,10 +385,11 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, pollent,
+ p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
- pp->pollset = pollset;
+ pp->pollent = pollent;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = initial_metadata_flags;
@@ -410,9 +440,9 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
- grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
- pp->pollset);
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
+ p->base.interested_parties);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
@@ -443,7 +473,7 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connecting_transient_failure");
break;
- case GRPC_CHANNEL_FATAL_FAILURE:
+ case GRPC_CHANNEL_SHUTDOWN:
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
@@ -459,15 +489,15 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
unref = 1;
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING("Round Robin Channels Exhausted",
&error, 1),
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_push(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
+ NULL);
gpr_free(pp);
}
} else {
@@ -521,8 +551,8 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
} else {
gpr_mu_unlock(&p->mu);
- grpc_exec_ctx_push(exec_ctx, closure,
- GRPC_ERROR_CREATE("Round Robin not connected"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE("Round Robin not connected"), NULL);
}
}
@@ -535,7 +565,7 @@ static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
-static grpc_lb_policy *create_round_robin(grpc_exec_ctx *exec_ctx,
+static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->addresses != NULL);
@@ -591,7 +621,7 @@ static grpc_lb_policy *create_round_robin(grpc_exec_ctx *exec_ctx,
}
static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
- round_robin_factory_ref, round_robin_factory_unref, create_round_robin,
+ round_robin_factory_ref, round_robin_factory_unref, round_robin_create,
"round_robin"};
static grpc_lb_policy_factory round_robin_lb_policy_factory = {
diff --git a/src/core/ext/load_reporting/load_reporting.c b/src/core/ext/load_reporting/load_reporting.c
new file mode 100644
index 0000000000..9e4d32676f
--- /dev/null
+++ b/src/core/ext/load_reporting/load_reporting.c
@@ -0,0 +1,133 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <limits.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/ext/load_reporting/load_reporting.h"
+#include "src/core/ext/load_reporting/load_reporting_filter.h"
+#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/surface/channel_init.h"
+
+struct grpc_load_reporting_config {
+ grpc_load_reporting_fn fn;
+ void *user_data;
+};
+
+grpc_load_reporting_config *grpc_load_reporting_config_create(
+ grpc_load_reporting_fn fn, void *user_data) {
+ GPR_ASSERT(fn != NULL);
+ grpc_load_reporting_config *lrc =
+ gpr_malloc(sizeof(grpc_load_reporting_config));
+ lrc->fn = fn;
+ lrc->user_data = user_data;
+ return lrc;
+}
+
+grpc_load_reporting_config *grpc_load_reporting_config_copy(
+ grpc_load_reporting_config *src) {
+ return grpc_load_reporting_config_create(src->fn, src->user_data);
+}
+
+void grpc_load_reporting_config_destroy(grpc_load_reporting_config *lrc) {
+ gpr_free(lrc);
+}
+
+void grpc_load_reporting_config_call(
+ grpc_load_reporting_config *lrc,
+ const grpc_load_reporting_call_data *call_data) {
+ lrc->fn(call_data, lrc->user_data);
+}
+
+static bool is_load_reporting_enabled(const grpc_channel_args *a) {
+ if (a == NULL) return false;
+ for (size_t i = 0; i < a->num_args; i++) {
+ if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_LOAD_REPORTING)) {
+ return a->args[i].type == GRPC_ARG_POINTER &&
+ a->args[i].value.pointer.p != NULL;
+ }
+ }
+ return false;
+}
+
+static bool maybe_add_load_reporting_filter(grpc_channel_stack_builder *builder,
+ void *arg) {
+ const grpc_channel_args *args =
+ grpc_channel_stack_builder_get_channel_arguments(builder);
+ if (is_load_reporting_enabled(args)) {
+ return grpc_channel_stack_builder_prepend_filter(
+ builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ }
+ return true;
+}
+
+static void lrd_arg_destroy(void *p) { grpc_load_reporting_config_destroy(p); }
+
+static void *lrd_arg_copy(void *p) {
+ return grpc_load_reporting_config_copy(p);
+}
+
+static int lrd_arg_cmp(void *a, void *b) {
+ grpc_load_reporting_config *lhs = a;
+ grpc_load_reporting_config *rhs = b;
+ return !(lhs->fn == rhs->fn && lhs->user_data == rhs->user_data);
+}
+
+static const grpc_arg_pointer_vtable lrd_ptr_vtable = {
+ lrd_arg_copy, lrd_arg_destroy, lrd_arg_cmp};
+
+grpc_arg grpc_load_reporting_config_create_arg(
+ grpc_load_reporting_config *lrc) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_ENABLE_LOAD_REPORTING;
+ arg.value.pointer.p = lrc;
+ arg.value.pointer.vtable = &lrd_ptr_vtable;
+ return arg;
+}
+
+/* Plugin registration */
+
+void grpc_load_reporting_plugin_init(void) {
+ grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
+ maybe_add_load_reporting_filter,
+ (void *)&grpc_load_reporting_filter);
+ grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+ maybe_add_load_reporting_filter,
+ (void *)&grpc_load_reporting_filter);
+}
+
+void grpc_load_reporting_plugin_shutdown() {}
diff --git a/src/core/ext/load_reporting/load_reporting.h b/src/core/ext/load_reporting/load_reporting.h
new file mode 100644
index 0000000000..316cd89bd7
--- /dev/null
+++ b/src/core/ext/load_reporting/load_reporting.h
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_H
+#define GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_H
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/surface/call.h"
+
+typedef struct grpc_load_reporting_config grpc_load_reporting_config;
+
+/** Call information to be passed to the provided load reporting function upon
+ * completion of the call */
+typedef struct grpc_load_reporting_call_data {
+ const grpc_call_stats *stats; /**< Stats for the call */
+ const char *trailing_md_string; /**< LR trailing metadata info */
+} grpc_load_reporting_call_data;
+
+/** Custom function to be called by the load reporting filter. */
+typedef void (*grpc_load_reporting_fn)(
+ const grpc_load_reporting_call_data *call_data, void *user_data);
+
+/** Register \a fn as the function to be invoked by the load reporting filter.
+ * \a fn will be invoked at the beginning and at the end of the call.
+ *
+ * For the first invocation, \a fn's first argument
+ * (grpc_load_reporting_call_data*) will be NULL. \a user_data is always passed
+ * as-is. */
+grpc_load_reporting_config *grpc_load_reporting_config_create(
+ grpc_load_reporting_fn fn, void *user_data);
+
+grpc_load_reporting_config *grpc_load_reporting_config_copy(
+ grpc_load_reporting_config *src);
+
+void grpc_load_reporting_config_destroy(grpc_load_reporting_config *lrc);
+
+/** Invoke the function registered by \a grpc_load_reporting_init. */
+void grpc_load_reporting_config_call(
+ grpc_load_reporting_config *lrc,
+ const grpc_load_reporting_call_data *call_data);
+
+/** Return a \a grpc_arg enabling load reporting */
+grpc_arg grpc_load_reporting_config_create_arg(grpc_load_reporting_config *lrc);
+
+#endif /* GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_H */
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
new file mode 100644
index 0000000000..f372f88c3a
--- /dev/null
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -0,0 +1,151 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+#include <string.h>
+
+#include "src/core/ext/load_reporting/load_reporting.h"
+#include "src/core/ext/load_reporting/load_reporting_filter.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+typedef struct call_data { const char *trailing_md_string; } call_data;
+typedef struct channel_data {
+ gpr_mu mu;
+ grpc_load_reporting_config *lrc;
+} channel_data;
+
+static void invoke_lr_fn_locked(grpc_load_reporting_config *lrc,
+ grpc_load_reporting_call_data *lr_call_data) {
+ GPR_TIMER_BEGIN("load_reporting_config_fn", 0);
+ grpc_load_reporting_config_call(lrc, lr_call_data);
+ GPR_TIMER_END("load_reporting_config_fn", 0);
+}
+
+/* Constructor for call_data */
+static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_call_element_args *args) {
+ call_data *calld = elem->call_data;
+ memset(calld, 0, sizeof(call_data));
+}
+
+/* Destructor for call_data */
+static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ const grpc_call_stats *stats, void *ignored) {
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+
+ grpc_load_reporting_call_data lr_call_data = {stats,
+ calld->trailing_md_string};
+
+ gpr_mu_lock(&chand->mu);
+ invoke_lr_fn_locked(chand->lrc, &lr_call_data);
+ gpr_mu_unlock(&chand->mu);
+}
+
+/* Constructor for channel_data */
+static void init_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem,
+ grpc_channel_element_args *args) {
+ GPR_ASSERT(!args->is_last);
+
+ channel_data *chand = elem->channel_data;
+ memset(chand, 0, sizeof(channel_data));
+
+ gpr_mu_init(&chand->mu);
+ for (size_t i = 0; i < args->channel_args->num_args; i++) {
+ if (0 == strcmp(args->channel_args->args[i].key,
+ GRPC_ARG_ENABLE_LOAD_REPORTING)) {
+ grpc_load_reporting_config *arg_lrc =
+ args->channel_args->args[i].value.pointer.p;
+ chand->lrc = grpc_load_reporting_config_copy(arg_lrc);
+ GPR_ASSERT(chand->lrc != NULL);
+ break;
+ }
+ }
+ GPR_ASSERT(chand->lrc != NULL); /* arg actually found */
+
+ gpr_mu_lock(&chand->mu);
+ invoke_lr_fn_locked(chand->lrc, NULL);
+ gpr_mu_unlock(&chand->mu);
+}
+
+/* Destructor for channel data */
+static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
+ grpc_channel_element *elem) {
+ channel_data *chand = elem->channel_data;
+ gpr_mu_destroy(&chand->mu);
+ grpc_load_reporting_config_destroy(chand->lrc);
+}
+
+static grpc_mdelem *lr_trailing_md_filter(void *user_data, grpc_mdelem *md) {
+ grpc_call_element *elem = user_data;
+ call_data *calld = elem->call_data;
+
+ if (md->key == GRPC_MDSTR_LOAD_REPORTING) {
+ calld->trailing_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
+ return NULL;
+ }
+
+ return md;
+}
+
+static void lr_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op *op) {
+ GPR_TIMER_BEGIN("lr_start_transport_stream_op", 0);
+
+ if (op->send_trailing_metadata) {
+ grpc_metadata_batch_filter(op->send_trailing_metadata,
+ lr_trailing_md_filter, elem);
+ }
+ grpc_call_next_op(exec_ctx, elem, op);
+
+ GPR_TIMER_END("lr_start_transport_stream_op", 0);
+}
+
+const grpc_channel_filter grpc_load_reporting_filter = {
+ lr_start_transport_stream_op,
+ grpc_channel_next_op,
+ sizeof(call_data),
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ destroy_call_elem,
+ sizeof(channel_data),
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_call_next_get_peer,
+ "load_reporting"};
diff --git a/src/core/ext/load_reporting/load_reporting_filter.h b/src/core/ext/load_reporting/load_reporting_filter.h
new file mode 100644
index 0000000000..f69cd6fdc6
--- /dev/null
+++ b/src/core/ext/load_reporting/load_reporting_filter.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_FILTER_H
+#define GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_FILTER_H
+
+#include "src/core/lib/channel/channel_stack.h"
+
+extern const grpc_channel_filter grpc_load_reporting_filter;
+
+#endif /* GRPC_CORE_EXT_LOAD_REPORTING_LOAD_REPORTING_FILTER_H */
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index adb9c3af84..31ac968670 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -111,8 +111,8 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
if (r->next_completion != NULL) {
*r->target_config = NULL;
- grpc_exec_ctx_push(exec_ctx, r->next_completion,
- GRPC_ERROR_CREATE("Resolver Shutdown"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE("Resolver Shutdown"), NULL);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@@ -189,12 +189,17 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
gpr_timespec timeout = gpr_time_sub(next_try, now);
const char *msg = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "dns resolution failed: retrying in %d.%09d seconds: %s",
- timeout.tv_sec, timeout.tv_nsec, msg);
+ gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_error_free_string(msg);
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) <= 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
+ timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "retrying immediately");
+ }
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, dns_on_retry_timer, r,
now);
}
@@ -227,7 +232,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
if (r->resolved_config) {
grpc_client_config_ref(r->resolved_config);
}
- grpc_exec_ctx_push(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index 90da6a93a0..1f7cce2f43 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -92,7 +92,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
- grpc_exec_ctx_push(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
@@ -133,7 +133,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "sockaddr");
r->published = 1;
*r->target_config = cfg;
- grpc_exec_ctx_push(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
}
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index 28d1be878f..85f9efb3b6 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -103,13 +103,13 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
0);
GPR_ASSERT(c->result->transport);
- c->result->channel_args = c->args.channel_args;
+ c->result->channel_args = grpc_channel_args_copy(c->args.channel_args);
} else {
memset(c->result, 0, sizeof(*c->result));
}
notify = c->notify;
c->notify = NULL;
- notify->cb(exec_ctx, notify->cb_arg, error);
+ grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL);
}
static void connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *con) {}
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
new file mode 100644
index 0000000000..ca435c25ce
--- /dev/null
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
@@ -0,0 +1,95 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/grpc_posix.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
+
+#include <fcntl.h>
+
+#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/tcp_posix.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/transport/transport.h"
+
+grpc_channel *grpc_insecure_channel_create_from_fd(
+ const char *target, int fd, const grpc_channel_args *args) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_API_TRACE("grpc_insecure_channel_create(target=%p, fd=%d, args=%p)", 3,
+ (target, fd, args));
+
+ grpc_arg default_authority_arg;
+ default_authority_arg.type = GRPC_ARG_STRING;
+ default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
+ default_authority_arg.value.string = "test.authority";
+ grpc_channel_args *final_args =
+ grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
+
+ int flags = fcntl(fd, F_GETFL, 0);
+ GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
+
+ grpc_endpoint *client =
+ grpc_tcp_create(grpc_fd_create(fd, "client"),
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "fd-client");
+
+ grpc_transport *transport =
+ grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);
+ GPR_ASSERT(transport);
+ grpc_channel *channel = grpc_channel_create(
+ &exec_ctx, target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
+ grpc_channel_args_destroy(final_args);
+ grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL, 0);
+
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ return channel != NULL ? channel : grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INTERNAL,
+ "Failed to create client channel");
+}
+
+#else // !GPR_SUPPORT_CHANNELS_FROM_FD
+
+grpc_channel *grpc_insecure_channel_create_from_fd(
+ const char *target, int fd, const grpc_channel_args *args) {
+ GPR_ASSERT(0);
+ return NULL;
+}
+
+#endif // GPR_SUPPORT_CHANNELS_FROM_FD
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index bceef152be..721ba82d8f 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -90,7 +90,6 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_auth_context *auth_context) {
connector *c = arg;
grpc_closure *notify;
- grpc_channel_args *args_copy = NULL;
gpr_mu_lock(&c->mu);
if (c->connecting_endpoint == NULL) {
memset(c->result, 0, sizeof(*c->result));
@@ -109,23 +108,20 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
0);
auth_context_arg = grpc_auth_context_to_arg(auth_context);
- args_copy = grpc_channel_args_copy_and_add(c->args.channel_args,
- &auth_context_arg, 1);
- c->result->channel_args = args_copy;
+ c->result->channel_args = grpc_channel_args_copy_and_add(
+ c->args.channel_args, &auth_context_arg, 1);
}
notify = c->notify;
c->notify = NULL;
- /* look at c->args which are connector args. */
- notify->cb(exec_ctx, notify->cb_arg, GRPC_ERROR_NONE);
- if (args_copy != NULL) grpc_channel_args_destroy(args_copy);
+ grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_NONE, NULL);
}
static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
connector *c = arg;
- grpc_channel_security_connector_do_handshake(exec_ctx, c->security_connector,
- c->connecting_endpoint,
- on_secure_handshake_done, c);
+ grpc_channel_security_connector_do_handshake(
+ exec_ctx, c->security_connector, c->connecting_endpoint, c->args.deadline,
+ on_secure_handshake_done, c);
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -147,13 +143,14 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
&c->initial_string_sent);
} else {
grpc_channel_security_connector_do_handshake(
- exec_ctx, c->security_connector, tcp, on_secure_handshake_done, c);
+ exec_ctx, c->security_connector, tcp, c->args.deadline,
+ on_secure_handshake_done, c);
}
} else {
memset(c->result, 0, sizeof(*c->result));
notify = c->notify;
c->notify = NULL;
- notify->cb(exec_ctx, notify->cb_arg, GRPC_ERROR_NONE);
+ grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL);
}
}
@@ -175,7 +172,6 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
grpc_closure *notify) {
connector *c = (connector *)con;
GPR_ASSERT(c->notify == NULL);
- GPR_ASSERT(notify->cb);
c->notify = notify;
c->args = *args;
c->result = result;
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
index 9efc922a8d..e5c987925c 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@@ -75,14 +75,14 @@ static void destroy(grpc_exec_ctx *exec_ctx, grpc_server *server, void *tcpp,
grpc_closure *destroy_done) {
grpc_tcp_server *tcp = tcpp;
grpc_tcp_server_unref(exec_ctx, tcp);
- grpc_exec_ctx_push(exec_ctx, destroy_done, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, destroy_done, GRPC_ERROR_NONE, NULL);
}
int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
grpc_resolved_addresses *resolved = NULL;
grpc_tcp_server *tcp = NULL;
size_t i;
- unsigned count = 0;
+ size_t count = 0;
int port_num = -1;
int port_temp;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -120,13 +120,15 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
}
if (count == 0) {
char *msg;
- gpr_asprintf(&msg, "No address added out of total %d resolved", naddrs);
+ gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
+ naddrs);
err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
gpr_free(msg);
goto error;
} else if (count != naddrs) {
char *msg;
- gpr_asprintf(&msg, "Only %d addresses added out of total %d resolved",
+ gpr_asprintf(&msg, "Only %" PRIuPTR
+ " addresses added out of total %" PRIuPTR " resolved",
count, naddrs);
err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
gpr_free(msg);
@@ -153,6 +155,11 @@ error:
}
port_num = 0;
+ const char *msg = grpc_error_string(err);
+ gpr_log(GPR_ERROR, "%s", msg);
+ grpc_error_free_string(msg);
+ GRPC_ERROR_UNREF(err);
+
done:
grpc_exec_ctx_finish(&exec_ctx);
if (errors != NULL) {
@@ -160,7 +167,6 @@ done:
GRPC_ERROR_UNREF(errors[i]);
}
}
- GRPC_ERROR_UNREF(err);
gpr_free(errors);
return port_num;
}
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
new file mode 100644
index 0000000000..96bf4d6f30
--- /dev/null
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/grpc_posix.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/tcp_posix.h"
+#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/surface/server.h"
+
+void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
+ grpc_completion_queue *cq,
+ int fd) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
+ char *name;
+ gpr_asprintf(&name, "fd:%d", fd);
+
+ grpc_endpoint *server_endpoint = grpc_tcp_create(
+ grpc_fd_create(fd, name), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+
+ gpr_free(name);
+
+ const grpc_channel_args *server_args = grpc_server_get_channel_args(server);
+ grpc_transport *transport = grpc_create_chttp2_transport(
+ &exec_ctx, server_args, server_endpoint, 0 /* is_client */);
+ grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, grpc_cq_pollset(cq));
+ grpc_server_setup_transport(&exec_ctx, server, transport, NULL, server_args);
+ grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL, 0);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+#else // !GPR_SUPPORT_CHANNELS_FROM_FD
+
+void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
+ grpc_completion_queue *cq,
+ int fd) {
+ GPR_ASSERT(0);
+}
+
+#endif // GPR_SUPPORT_CHANNELS_FROM_FD
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index 3286b220d4..c42810e913 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -129,9 +129,11 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *statep, grpc_endpoint *tcp,
state->state = statep;
state_ref(state->state);
state->accepting_pollset = accepting_pollset;
- grpc_server_security_connector_do_handshake(exec_ctx, state->state->sc,
- acceptor, tcp,
- on_secure_handshake_done, state);
+ grpc_server_security_connector_do_handshake(
+ exec_ctx, state->state->sc, acceptor, tcp,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_seconds(120, GPR_TIMESPAN)),
+ on_secure_handshake_done, state);
}
/* Server callback: start listening on our ports */
@@ -173,7 +175,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_tcp_server *tcp = NULL;
server_secure_state *state = NULL;
size_t i;
- unsigned count = 0;
+ size_t count = 0;
int port_num = -1;
int port_temp;
grpc_security_status status = GRPC_SECURITY_ERROR;
@@ -188,7 +190,11 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
3, (server, addr, creds));
/* create security context */
- if (creds == NULL) goto error;
+ if (creds == NULL) {
+ err = GRPC_ERROR_CREATE(
+ "No credentials specified for secure server port (creds==NULL)");
+ goto error;
+ }
status = grpc_server_credentials_create_security_connector(creds, &sc);
if (status != GRPC_SECURITY_OK) {
char *msg;
@@ -240,14 +246,15 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
}
if (count == 0) {
char *msg;
- gpr_asprintf(&msg, "No address added out of total %d resolved",
+ gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
resolved->naddrs);
err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, resolved->naddrs);
gpr_free(msg);
goto error;
} else if (count != resolved->naddrs) {
char *msg;
- gpr_asprintf(&msg, "Only %d addresses added out of total %d resolved",
+ gpr_asprintf(&msg, "Only %" PRIuPTR
+ " addresses added out of total %" PRIuPTR " resolved",
count, resolved->naddrs);
err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, resolved->naddrs);
gpr_free(msg);
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.c b/src/core/ext/transport/chttp2/transport/bin_decoder.c
new file mode 100644
index 0000000000..2d90b01cd8
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.c
@@ -0,0 +1,232 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/ext/transport/chttp2/transport/bin_decoder.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include "src/core/lib/support/string.h"
+
+static uint8_t decode_table[] = {
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 62, 0x40, 0x40, 0x40, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40};
+
+static const uint8_t tail_xtra[4] = {0, 0, 1, 2};
+
+static bool input_is_valid(uint8_t *input_ptr, size_t length) {
+ size_t i;
+
+ for (i = 0; i < length; ++i) {
+ if ((decode_table[input_ptr[i]] & 0xC0) != 0) {
+ gpr_log(GPR_ERROR,
+ "Base64 decoding failed, invalid character '%c' in base64 "
+ "input.\n",
+ (char)(*input_ptr));
+ return false;
+ }
+ }
+ return true;
+}
+
+#define COMPOSE_OUTPUT_BYTE_0(input_ptr) \
+ (uint8_t)((decode_table[input_ptr[0]] << 2) | \
+ (decode_table[input_ptr[1]] >> 4))
+
+#define COMPOSE_OUTPUT_BYTE_1(input_ptr) \
+ (uint8_t)((decode_table[input_ptr[1]] << 4) | \
+ (decode_table[input_ptr[2]] >> 2))
+
+#define COMPOSE_OUTPUT_BYTE_2(input_ptr) \
+ (uint8_t)((decode_table[input_ptr[2]] << 6) | decode_table[input_ptr[3]])
+
+bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) {
+ size_t input_tail;
+
+ if (ctx->input_cur > ctx->input_end || ctx->output_cur > ctx->output_end) {
+ return false;
+ }
+
+ // Process a block of 4 input characters and 3 output bytes
+ while (ctx->input_end >= ctx->input_cur + 4 &&
+ ctx->output_end >= ctx->output_cur + 3) {
+ if (!input_is_valid(ctx->input_cur, 4)) return false;
+ ctx->output_cur[0] = COMPOSE_OUTPUT_BYTE_0(ctx->input_cur);
+ ctx->output_cur[1] = COMPOSE_OUTPUT_BYTE_1(ctx->input_cur);
+ ctx->output_cur[2] = COMPOSE_OUTPUT_BYTE_2(ctx->input_cur);
+ ctx->output_cur += 3;
+ ctx->input_cur += 4;
+ }
+
+ // Process the tail of input data
+ input_tail = (size_t)(ctx->input_end - ctx->input_cur);
+ if (input_tail == 4) {
+ // Process the input data with pad chars
+ if (ctx->input_cur[3] == '=') {
+ if (ctx->input_cur[2] == '=' && ctx->output_end >= ctx->output_cur + 1) {
+ if (!input_is_valid(ctx->input_cur, 2)) return false;
+ *(ctx->output_cur++) = COMPOSE_OUTPUT_BYTE_0(ctx->input_cur);
+ ctx->input_cur += 4;
+ } else if (ctx->output_end >= ctx->output_cur + 2) {
+ if (!input_is_valid(ctx->input_cur, 3)) return false;
+ *(ctx->output_cur++) = COMPOSE_OUTPUT_BYTE_0(ctx->input_cur);
+ *(ctx->output_cur++) = COMPOSE_OUTPUT_BYTE_1(ctx->input_cur);
+ ;
+ ctx->input_cur += 4;
+ }
+ }
+
+ } else if (ctx->contains_tail && input_tail > 1) {
+ // Process the input data without pad chars, but constains_tail is set
+ if (ctx->output_end >= ctx->output_cur + tail_xtra[input_tail]) {
+ if (!input_is_valid(ctx->input_cur, input_tail)) return false;
+ switch (input_tail) {
+ case 3:
+ ctx->output_cur[1] = COMPOSE_OUTPUT_BYTE_1(ctx->input_cur);
+ case 2:
+ ctx->output_cur[0] = COMPOSE_OUTPUT_BYTE_0(ctx->input_cur);
+ }
+ ctx->output_cur += tail_xtra[input_tail];
+ ctx->input_cur += input_tail;
+ }
+ }
+
+ return true;
+}
+
+gpr_slice grpc_chttp2_base64_decode(gpr_slice input) {
+ size_t input_length = GPR_SLICE_LENGTH(input);
+ size_t output_length = input_length / 4 * 3;
+ struct grpc_base64_decode_context ctx;
+ gpr_slice output;
+
+ if (input_length % 4 != 0) {
+ gpr_log(GPR_ERROR,
+ "Base64 decoding failed, input of "
+ "grpc_chttp2_base64_decode has a length of %d, which is not a "
+ "multiple of 4.\n",
+ (int)input_length);
+ return gpr_empty_slice();
+ }
+
+ if (input_length > 0) {
+ uint8_t *input_end = GPR_SLICE_END_PTR(input);
+ if (*(--input_end) == '=') {
+ output_length--;
+ if (*(--input_end) == '=') {
+ output_length--;
+ }
+ }
+ }
+ output = gpr_slice_malloc(output_length);
+
+ ctx.input_cur = GPR_SLICE_START_PTR(input);
+ ctx.input_end = GPR_SLICE_END_PTR(input);
+ ctx.output_cur = GPR_SLICE_START_PTR(output);
+ ctx.output_end = GPR_SLICE_END_PTR(output);
+ ctx.contains_tail = false;
+
+ if (!grpc_base64_decode_partial(&ctx)) {
+ char *s = gpr_dump_slice(input, GPR_DUMP_ASCII);
+ gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
+ gpr_free(s);
+ gpr_slice_unref(output);
+ return gpr_empty_slice();
+ }
+ GPR_ASSERT(ctx.output_cur == GPR_SLICE_END_PTR(output));
+ GPR_ASSERT(ctx.input_cur == GPR_SLICE_END_PTR(input));
+ return output;
+}
+
+gpr_slice grpc_chttp2_base64_decode_with_length(gpr_slice input,
+ size_t output_length) {
+ size_t input_length = GPR_SLICE_LENGTH(input);
+ gpr_slice output = gpr_slice_malloc(output_length);
+ struct grpc_base64_decode_context ctx;
+
+ // The length of a base64 string cannot be 4 * n + 1
+ if (input_length % 4 == 1) {
+ gpr_log(GPR_ERROR,
+ "Base64 decoding failed, input of "
+ "grpc_chttp2_base64_decode_with_length has a length of %d, which "
+ "has a tail of 1 byte.\n",
+ (int)input_length);
+ gpr_slice_unref(output);
+ return gpr_empty_slice();
+ }
+
+ if (output_length > input_length / 4 * 3 + tail_xtra[input_length % 4]) {
+ gpr_log(GPR_ERROR,
+ "Base64 decoding failed, output_length %d is longer "
+ "than the max possible output length %d.\n",
+ (int)output_length,
+ (int)(input_length / 4 * 3 + tail_xtra[input_length % 4]));
+ gpr_slice_unref(output);
+ return gpr_empty_slice();
+ }
+
+ ctx.input_cur = GPR_SLICE_START_PTR(input);
+ ctx.input_end = GPR_SLICE_END_PTR(input);
+ ctx.output_cur = GPR_SLICE_START_PTR(output);
+ ctx.output_end = GPR_SLICE_END_PTR(output);
+ ctx.contains_tail = true;
+
+ if (!grpc_base64_decode_partial(&ctx)) {
+ char *s = gpr_dump_slice(input, GPR_DUMP_ASCII);
+ gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
+ gpr_free(s);
+ gpr_slice_unref(output);
+ return gpr_empty_slice();
+ }
+ GPR_ASSERT(ctx.output_cur == GPR_SLICE_END_PTR(output));
+ GPR_ASSERT(ctx.input_cur <= GPR_SLICE_END_PTR(input));
+ return output;
+}
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.h b/src/core/ext/transport/chttp2/transport/bin_decoder.h
new file mode 100644
index 0000000000..b9d40c9b74
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.h
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H
+#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H
+
+#include <grpc/support/slice.h>
+#include <stdbool.h>
+
+struct grpc_base64_decode_context {
+ /* input/output: */
+ uint8_t *input_cur;
+ uint8_t *input_end;
+ uint8_t *output_cur;
+ uint8_t *output_end;
+ /* Indicate if the decoder should handle the tail of input data*/
+ bool contains_tail;
+};
+
+/* base64 decode a grpc_base64_decode_context util either input_end is reached
+ or output_end is reached. When input_end is reached, (input_end - input_cur)
+ is less than 4. When output_end is reached, (output_end - output_cur) is less
+ than 3. Returns false if decoding is failed. */
+bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx);
+
+/* base64 decode a slice with pad chars. Returns a new slice, does not take
+ ownership of the input. Returns an empty slice if decoding is failed. */
+gpr_slice grpc_chttp2_base64_decode(gpr_slice input);
+
+/* base64 decode a slice without pad chars, data length is needed. Returns a new
+ slice, does not take ownership of the input. Returns an empty slice if
+ decoding is failed. */
+gpr_slice grpc_chttp2_base64_decode_with_length(gpr_slice input,
+ size_t output_length);
+
+#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 5264b9e1f8..9aa39ba26c 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -47,6 +47,7 @@
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
#include "src/core/ext/transport/chttp2/transport/timeout_encoding.h"
+#include "src/core/lib/http/parser.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -105,7 +106,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
static void cancel_from_api(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
- grpc_status_code status);
+ grpc_status_code status,
+ gpr_slice *optional_message);
static void close_from_api(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
@@ -161,6 +163,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(t->ep == NULL);
+ gpr_slice_unref(t->optional_drop_message);
+
gpr_slice_buffer_destroy(&t->global.qbuf);
gpr_slice_buffer_destroy(&t->writing.outbuf);
@@ -190,8 +194,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
and maybe they hold resources that need to be freed */
while (t->global.pings.next != &t->global.pings) {
grpc_chttp2_outstanding_ping *ping = t->global.pings.next;
- grpc_exec_ctx_push(exec_ctx, ping->on_recv,
- GRPC_ERROR_CREATE("Transport closed"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, ping->on_recv,
+ GRPC_ERROR_CREATE("Transport closed"), NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@@ -261,6 +265,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->parsing.deframe_state =
is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->writing.is_client = is_client;
+ t->optional_drop_message = gpr_empty_slice();
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
@@ -444,7 +449,7 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_error *error) {
if (!t->closed) {
t->closed = 1;
- connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_FATAL_FAILURE,
+ connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
if (t->ep) {
allow_endpoint_shutdown_locked(exec_ctx, t);
@@ -643,7 +648,7 @@ static void finish_global_actions(grpc_exec_ctx *exec_ctx,
t->executor.writing_active = 1;
REF_TRANSPORT(t, "writing");
prevent_endpoint_shutdown(t);
- grpc_exec_ctx_push(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL);
}
check_read_ops(exec_ctx, &t->global);
@@ -807,8 +812,10 @@ void grpc_chttp2_add_incoming_goaway(
gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
gpr_slice_unref(goaway_text);
transport_global->seen_goaway = 1;
+ /* lie: use transient failure from the transport to indicate goaway has been
+ * received */
connectivity_state_set(
- exec_ctx, transport_global, GRPC_CHANNEL_FATAL_FAILURE,
+ exec_ctx, transport_global, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_str(
grpc_error_set_int(GRPC_ERROR_CREATE("GOAWAY received"),
GRPC_ERROR_INT_HTTP2_ERROR,
@@ -869,7 +876,7 @@ static void maybe_start_some_streams(
grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
&stream_global)) {
cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_STATUS_UNAVAILABLE);
+ GRPC_STATUS_UNAVAILABLE, NULL);
}
}
@@ -907,7 +914,7 @@ void grpc_chttp2_complete_closure_step(
stream_global->collecting_stats);
stream_global->collecting_stats = NULL;
}
- grpc_exec_ctx_push(exec_ctx, closure, closure->error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, closure->error, NULL);
}
*pclosure = NULL;
}
@@ -953,7 +960,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
if (op->cancel_with_status != GRPC_STATUS_OK) {
cancel_from_api(exec_ctx, transport_global, stream_global,
- op->cancel_with_status);
+ op->cancel_with_status, op->optional_close_message);
}
if (op->close_with_status != GRPC_STATUS_OK) {
@@ -974,10 +981,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
if (metadata_size > metadata_peer_limit) {
gpr_log(GPR_DEBUG,
"to-be-sent initial metadata size exceeds peer limit "
- "(%lu vs. %lu)",
+ "(%" PRIuPTR " vs. %" PRIuPTR ")",
metadata_size, metadata_peer_limit);
cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_STATUS_RESOURCE_EXHAUSTED);
+ GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
} else {
if (contains_non_ok_status(transport_global, op->send_initial_metadata)) {
stream_global->seen_error = true;
@@ -1033,10 +1040,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx,
if (metadata_size > metadata_peer_limit) {
gpr_log(GPR_DEBUG,
"to-be-sent trailing metadata size exceeds peer limit "
- "(%lu vs. %lu)",
+ "(%" PRIuPTR " vs. %" PRIuPTR ")",
metadata_size, metadata_peer_limit);
cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_STATUS_RESOURCE_EXHAUSTED);
+ GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
} else {
if (contains_non_ok_status(transport_global,
op->send_trailing_metadata)) {
@@ -1127,7 +1134,7 @@ static void ack_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
for (ping = transport_global->pings.next; ping != &transport_global->pings;
ping = ping->next) {
if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
- grpc_exec_ctx_push(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
@@ -1160,7 +1167,7 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
return;
}
- grpc_exec_ctx_push(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@@ -1228,14 +1235,14 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
}
if (stream_global->exceeded_metadata_size) {
cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_STATUS_RESOURCE_EXHAUSTED);
+ GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
}
}
grpc_chttp2_incoming_metadata_buffer_publish(
&stream_global->received_initial_metadata,
stream_global->recv_initial_metadata);
- grpc_exec_ctx_push(exec_ctx, stream_global->recv_initial_metadata_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, stream_global->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE, NULL);
stream_global->recv_initial_metadata_ready = NULL;
}
if (stream_global->recv_message_ready != NULL) {
@@ -1248,13 +1255,13 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
*stream_global->recv_message = grpc_chttp2_incoming_frame_queue_pop(
&stream_global->incoming_frames);
GPR_ASSERT(*stream_global->recv_message != NULL);
- grpc_exec_ctx_push(exec_ctx, stream_global->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, stream_global->recv_message_ready,
+ GRPC_ERROR_NONE, NULL);
stream_global->recv_message_ready = NULL;
} else if (stream_global->published_trailing_metadata) {
*stream_global->recv_message = NULL;
- grpc_exec_ctx_push(exec_ctx, stream_global->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, stream_global->recv_message_ready,
+ GRPC_ERROR_NONE, NULL);
stream_global->recv_message_ready = NULL;
}
}
@@ -1267,7 +1274,7 @@ static void check_read_ops(grpc_exec_ctx *exec_ctx,
}
if (stream_global->exceeded_metadata_size) {
cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_STATUS_RESOURCE_EXHAUSTED);
+ GRPC_STATUS_RESOURCE_EXHAUSTED, NULL);
}
}
if (stream_global->all_incoming_byte_streams_finished) {
@@ -1334,7 +1341,8 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void cancel_from_api(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global,
- grpc_status_code status) {
+ grpc_status_code status,
+ gpr_slice *optional_message) {
if (!stream_global->read_closed || !stream_global->write_closed) {
if (stream_global->id != 0) {
gpr_slice_buffer_add(
@@ -1344,8 +1352,12 @@ static void cancel_from_api(grpc_exec_ctx *exec_ctx,
(uint32_t)grpc_chttp2_grpc_status_to_http2_error(status),
&stream_global->stats.outgoing));
}
+
+ if (optional_message) {
+ gpr_slice_ref(*optional_message);
+ }
grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
- NULL);
+ optional_message);
}
if (status != GRPC_STATUS_OK && !stream_global->seen_error) {
stream_global->seen_error = true;
@@ -1465,93 +1477,95 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(status >= 0 && (int)status < 100);
- GPR_ASSERT(stream_global->id != 0);
-
- /* Hand roll a header block.
- This is unnecessarily ugly - at some point we should find a more elegant
- solution.
- It's complicated by the fact that our send machinery would be dead by the
- time we got around to sending this, so instead we ignore HPACK compression
- and just write the uncompressed bytes onto the wire. */
- status_hdr = gpr_slice_malloc(15 + (status >= 10));
- p = GPR_SLICE_START_PTR(status_hdr);
- *p++ = 0x40; /* literal header */
- *p++ = 11; /* len(grpc-status) */
- *p++ = 'g';
- *p++ = 'r';
- *p++ = 'p';
- *p++ = 'c';
- *p++ = '-';
- *p++ = 's';
- *p++ = 't';
- *p++ = 'a';
- *p++ = 't';
- *p++ = 'u';
- *p++ = 's';
- if (status < 10) {
- *p++ = 1;
- *p++ = (uint8_t)('0' + status);
- } else {
- *p++ = 2;
- *p++ = (uint8_t)('0' + (status / 10));
- *p++ = (uint8_t)('0' + (status % 10));
- }
- GPR_ASSERT(p == GPR_SLICE_END_PTR(status_hdr));
- len += (uint32_t)GPR_SLICE_LENGTH(status_hdr);
-
- if (optional_message) {
- GPR_ASSERT(GPR_SLICE_LENGTH(*optional_message) < 127);
- message_pfx = gpr_slice_malloc(15);
- p = GPR_SLICE_START_PTR(message_pfx);
- *p++ = 0x40;
- *p++ = 12; /* len(grpc-message) */
+ if (stream_global->id != 0 && !transport_global->is_client) {
+ /* Hand roll a header block.
+ This is unnecessarily ugly - at some point we should find a more elegant
+ solution.
+ It's complicated by the fact that our send machinery would be dead by the
+ time we got around to sending this, so instead we ignore HPACK
+ compression
+ and just write the uncompressed bytes onto the wire. */
+ status_hdr = gpr_slice_malloc(15 + (status >= 10));
+ p = GPR_SLICE_START_PTR(status_hdr);
+ *p++ = 0x40; /* literal header */
+ *p++ = 11; /* len(grpc-status) */
*p++ = 'g';
*p++ = 'r';
*p++ = 'p';
*p++ = 'c';
*p++ = '-';
- *p++ = 'm';
- *p++ = 'e';
- *p++ = 's';
*p++ = 's';
+ *p++ = 't';
*p++ = 'a';
- *p++ = 'g';
- *p++ = 'e';
- *p++ = (uint8_t)GPR_SLICE_LENGTH(*optional_message);
- GPR_ASSERT(p == GPR_SLICE_END_PTR(message_pfx));
- len += (uint32_t)GPR_SLICE_LENGTH(message_pfx);
- len += (uint32_t)GPR_SLICE_LENGTH(*optional_message);
- }
-
- hdr = gpr_slice_malloc(9);
- p = GPR_SLICE_START_PTR(hdr);
- *p++ = (uint8_t)(len >> 16);
- *p++ = (uint8_t)(len >> 8);
- *p++ = (uint8_t)(len);
- *p++ = GRPC_CHTTP2_FRAME_HEADER;
- *p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
- *p++ = (uint8_t)(stream_global->id >> 24);
- *p++ = (uint8_t)(stream_global->id >> 16);
- *p++ = (uint8_t)(stream_global->id >> 8);
- *p++ = (uint8_t)(stream_global->id);
- GPR_ASSERT(p == GPR_SLICE_END_PTR(hdr));
-
- gpr_slice_buffer_add(&transport_global->qbuf, hdr);
- gpr_slice_buffer_add(&transport_global->qbuf, status_hdr);
- if (optional_message) {
- gpr_slice_buffer_add(&transport_global->qbuf, message_pfx);
- gpr_slice_buffer_add(&transport_global->qbuf,
- gpr_slice_ref(*optional_message));
- }
+ *p++ = 't';
+ *p++ = 'u';
+ *p++ = 's';
+ if (status < 10) {
+ *p++ = 1;
+ *p++ = (uint8_t)('0' + status);
+ } else {
+ *p++ = 2;
+ *p++ = (uint8_t)('0' + (status / 10));
+ *p++ = (uint8_t)('0' + (status % 10));
+ }
+ GPR_ASSERT(p == GPR_SLICE_END_PTR(status_hdr));
+ len += (uint32_t)GPR_SLICE_LENGTH(status_hdr);
+
+ if (optional_message) {
+ GPR_ASSERT(GPR_SLICE_LENGTH(*optional_message) < 127);
+ message_pfx = gpr_slice_malloc(15);
+ p = GPR_SLICE_START_PTR(message_pfx);
+ *p++ = 0x40;
+ *p++ = 12; /* len(grpc-message) */
+ *p++ = 'g';
+ *p++ = 'r';
+ *p++ = 'p';
+ *p++ = 'c';
+ *p++ = '-';
+ *p++ = 'm';
+ *p++ = 'e';
+ *p++ = 's';
+ *p++ = 's';
+ *p++ = 'a';
+ *p++ = 'g';
+ *p++ = 'e';
+ *p++ = (uint8_t)GPR_SLICE_LENGTH(*optional_message);
+ GPR_ASSERT(p == GPR_SLICE_END_PTR(message_pfx));
+ len += (uint32_t)GPR_SLICE_LENGTH(message_pfx);
+ len += (uint32_t)GPR_SLICE_LENGTH(*optional_message);
+ }
- gpr_slice_buffer_add(
- &transport_global->qbuf,
- grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR,
- &stream_global->stats.outgoing));
+ hdr = gpr_slice_malloc(9);
+ p = GPR_SLICE_START_PTR(hdr);
+ *p++ = (uint8_t)(len >> 16);
+ *p++ = (uint8_t)(len >> 8);
+ *p++ = (uint8_t)(len);
+ *p++ = GRPC_CHTTP2_FRAME_HEADER;
+ *p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
+ *p++ = (uint8_t)(stream_global->id >> 24);
+ *p++ = (uint8_t)(stream_global->id >> 16);
+ *p++ = (uint8_t)(stream_global->id >> 8);
+ *p++ = (uint8_t)(stream_global->id);
+ GPR_ASSERT(p == GPR_SLICE_END_PTR(hdr));
+
+ gpr_slice_buffer_add(&transport_global->qbuf, hdr);
+ gpr_slice_buffer_add(&transport_global->qbuf, status_hdr);
+ if (optional_message) {
+ gpr_slice_buffer_add(&transport_global->qbuf, message_pfx);
+ gpr_slice_buffer_add(&transport_global->qbuf,
+ gpr_slice_ref(*optional_message));
+ }
- if (optional_message) {
- gpr_slice_ref(*optional_message);
+ gpr_slice_buffer_add(
+ &transport_global->qbuf,
+ grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR,
+ &stream_global->stats.outgoing));
+
+ if (optional_message) {
+ gpr_slice_ref(*optional_message);
+ }
}
+
grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global, status,
optional_message);
grpc_error *err = GRPC_ERROR_CREATE("Stream closed");
@@ -1569,8 +1583,12 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
static void cancel_stream_cb(grpc_chttp2_transport_global *transport_global,
void *user_data,
grpc_chttp2_stream_global *stream_global) {
+ grpc_chttp2_transport *transport = TRANSPORT_FROM_GLOBAL(transport_global);
cancel_from_api(user_data, transport_global, stream_global,
- GRPC_STATUS_UNAVAILABLE);
+ GRPC_STATUS_UNAVAILABLE,
+ GPR_SLICE_IS_EMPTY(transport->optional_drop_message)
+ ? NULL
+ : &transport->optional_drop_message);
}
static void end_all_the_calls(grpc_exec_ctx *exec_ctx,
@@ -1643,24 +1661,64 @@ static void reading_action_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_move_into(&t->new_stream_map,
&t->parsing_stream_map);
grpc_chttp2_prepare_to_read(transport_global, transport_parsing);
- grpc_exec_ctx_push(exec_ctx, &t->parsing_action, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &t->parsing_action, error, NULL);
} else {
post_reading_action_locked(exec_ctx, t, s_unused, arg);
}
}
+static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_http_parser parser;
+ size_t i = 0;
+ grpc_error *error = GRPC_ERROR_NONE;
+ grpc_http_response response;
+ memset(&response, 0, sizeof(response));
+
+ grpc_http_parser_init(&parser, GRPC_HTTP_RESPONSE, &response);
+
+ grpc_error *parse_error = GRPC_ERROR_NONE;
+ for (; i < t->read_buffer.count && parse_error == GRPC_ERROR_NONE; i++) {
+ parse_error = grpc_http_parser_parse(&parser, t->read_buffer.slices[i]);
+ }
+ if (parse_error == GRPC_ERROR_NONE &&
+ (parse_error = grpc_http_parser_eof(&parser)) == GRPC_ERROR_NONE) {
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE("Trying to connect an http1.x server"),
+ GRPC_ERROR_INT_HTTP_STATUS, response.status);
+ }
+ GRPC_ERROR_UNREF(parse_error);
+
+ grpc_http_parser_destroy(&parser);
+ grpc_http_response_destroy(&response);
+ return error;
+}
+
static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
GPR_TIMER_BEGIN("reading_action.parse", 0);
size_t i = 0;
- grpc_error *errors[2] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE};
+ grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
+ GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
errors[1] = grpc_chttp2_perform_read(exec_ctx, &t->parsing,
t->read_buffer.slices[i]);
};
+ if (i != t->read_buffer.count) {
+ gpr_slice_unref(t->optional_drop_message);
+ errors[2] = try_http_parsing(exec_ctx, t);
+ if (errors[2] != GRPC_ERROR_NONE) {
+ t->optional_drop_message = gpr_slice_from_copied_string(
+ "Connection dropped: received http1.x response");
+ } else {
+ t->optional_drop_message = gpr_slice_from_copied_string(
+ "Connection dropped: received unparseable response");
+ }
+ }
grpc_error *err =
- errors[0] == GRPC_ERROR_NONE && errors[1] == GRPC_ERROR_NONE
+ errors[0] == GRPC_ERROR_NONE && errors[1] == GRPC_ERROR_NONE &&
+ errors[2] == GRPC_ERROR_NONE
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
GPR_ARRAY_SIZE(errors));
@@ -1794,6 +1852,13 @@ static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
add_to_pollset_locked, pollset, 0);
}
+static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset_set *pollset_set) {
+ grpc_chttp2_run_with_global_lock(exec_ctx, (grpc_chttp2_transport *)gt,
+ (grpc_chttp2_stream *)gs,
+ add_to_pollset_set_locked, pollset_set, 0);
+}
+
/*******************************************************************************
* BYTE STREAM
*/
@@ -1870,10 +1935,10 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
}
if (bs->slices.count > 0) {
*arg->slice = gpr_slice_buffer_take_first(&bs->slices);
- grpc_exec_ctx_push(exec_ctx, arg->on_complete, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, arg->on_complete, GRPC_ERROR_NONE, NULL);
} else if (bs->error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_push(exec_ctx, arg->on_complete, GRPC_ERROR_REF(bs->error),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, arg->on_complete, GRPC_ERROR_REF(bs->error),
+ NULL);
} else {
bs->on_next = arg->on_complete;
bs->next = arg->slice;
@@ -1930,7 +1995,7 @@ static void incoming_byte_stream_push_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs = arg->byte_stream;
if (bs->on_next != NULL) {
*bs->next = arg->slice;
- grpc_exec_ctx_push(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
bs->on_next = NULL;
} else {
gpr_slice_buffer_add(&bs->slices, arg->slice);
@@ -1968,7 +2033,7 @@ static void incoming_byte_stream_finished_failed_locked(
grpc_chttp2_incoming_byte_stream *bs = a->bs;
grpc_error *error = a->error;
gpr_free(a);
- grpc_exec_ctx_push(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
+ grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
bs->on_next = NULL;
GRPC_ERROR_UNREF(bs->error);
bs->error = error;
@@ -2045,10 +2110,13 @@ static char *format_flowctl_context_var(const char *context, const char *var,
int64_t val, uint32_t id,
char **scope) {
char *underscore_pos;
+ char *buf;
char *result;
if (context == NULL) {
*scope = NULL;
- gpr_asprintf(&result, "%s(%lld)", var, val);
+ gpr_asprintf(&buf, "%s(%" PRId64 ")", var, val);
+ result = gpr_leftpad(buf, ' ', 40);
+ gpr_free(buf);
return result;
}
underscore_pos = strchr(context, '_');
@@ -2059,7 +2127,9 @@ static char *format_flowctl_context_var(const char *context, const char *var,
gpr_asprintf(scope, "%s[%d]", tmp, id);
gpr_free(tmp);
}
- gpr_asprintf(&result, "%s.%s(%lld)", underscore_pos + 1, var, val);
+ gpr_asprintf(&buf, "%s.%s(%" PRId64 ")", underscore_pos + 1, var, val);
+ result = gpr_leftpad(buf, ' ', 40);
+ gpr_free(buf);
return result;
}
@@ -2080,6 +2150,8 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
uint32_t stream_id, int64_t val1, int64_t val2) {
char *scope1;
char *scope2;
+ char *tmp_phase;
+ char *tmp_scope1;
char *label1 =
format_flowctl_context_var(context1, var1, val1, stream_id, &scope1);
char *label2 =
@@ -2087,14 +2159,18 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
char *clisvr = is_client ? "client" : "server";
char *prefix;
- gpr_asprintf(&prefix, "FLOW % 8s: %s % 11s ", phase, clisvr, scope1);
+ tmp_phase = gpr_leftpad(phase, ' ', 8);
+ tmp_scope1 = gpr_leftpad(scope1, ' ', 11);
+ gpr_asprintf(&prefix, "FLOW %s: %s %s ", phase, clisvr, scope1);
+ gpr_free(tmp_phase);
+ gpr_free(tmp_scope1);
switch (op) {
case GRPC_CHTTP2_FLOWCTL_MOVE:
GPR_ASSERT(samestr(scope1, scope2));
if (val2 != 0) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "%sMOVE % 40s <- % 40s giving %d", prefix, label1, label2,
+ "%sMOVE %s <- %s giving %" PRId64, prefix, label1, label2,
val1 + val2);
}
break;
@@ -2102,7 +2178,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
GPR_ASSERT(val2 >= 0);
if (val2 != 0) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "%sCREDIT % 40s by % 40s giving %d", prefix, label1, label2,
+ "%sCREDIT %s by %s giving %" PRId64, prefix, label1, label2,
val1 + val2);
}
break;
@@ -2110,7 +2186,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
GPR_ASSERT(val2 >= 0);
if (val2 != 0) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "%sDEBIT % 40s by % 40s giving %d", prefix, label1, label2,
+ "%sDEBIT %s by %s giving %" PRId64, prefix, label1, label2,
val1 - val2);
}
break;
@@ -2135,6 +2211,7 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
"chttp2",
init_stream,
set_pollset,
+ set_pollset_set,
perform_stream_op,
perform_transport_op,
destroy_stream,
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 7e5d58380e..54e72ebd24 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -383,6 +383,9 @@ struct grpc_chttp2_transport {
/** Transport op to be applied post-parsing */
grpc_transport_op *post_parsing_op;
+
+ /** Message explaining the reason of dropping connection */
+ gpr_slice optional_drop_message;
};
typedef struct {
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 72b3131d7b..785134091f 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -559,7 +559,7 @@ static grpc_error *update_incoming_window(
uint32_t incoming_frame_size = transport_parsing->incoming_frame_size;
if (incoming_frame_size > transport_parsing->incoming_window) {
char *msg;
- gpr_asprintf(&msg, "frame of size %d overflows incoming window of %d",
+ gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
transport_parsing->incoming_frame_size,
transport_parsing->incoming_window);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@@ -569,7 +569,7 @@ static grpc_error *update_incoming_window(
if (incoming_frame_size > stream_parsing->incoming_window) {
char *msg;
- gpr_asprintf(&msg, "frame of size %d overflows incoming window of %d",
+ gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
transport_parsing->incoming_frame_size,
stream_parsing->incoming_window);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@@ -678,7 +678,8 @@ static void on_initial_header(void *tp, grpc_mdelem *md) {
if (new_size > metadata_size_limit) {
if (!stream_parsing->exceeded_metadata_size) {
gpr_log(GPR_DEBUG,
- "received initial metadata size exceeds limit (%lu vs. %lu)",
+ "received initial metadata size exceeds limit (%" PRIuPTR
+ " vs. %" PRIuPTR ")",
new_size, metadata_size_limit);
stream_parsing->seen_error = true;
stream_parsing->exceeded_metadata_size = true;
@@ -724,7 +725,8 @@ static void on_trailing_header(void *tp, grpc_mdelem *md) {
if (new_size > metadata_size_limit) {
if (!stream_parsing->exceeded_metadata_size) {
gpr_log(GPR_DEBUG,
- "received trailing metadata size exceeds limit (%lu vs. %lu)",
+ "received trailing metadata size exceeds limit (%" PRIuPTR
+ " vs. %" PRIuPTR ")",
new_size, metadata_size_limit);
stream_parsing->seen_error = true;
stream_parsing->exceeded_metadata_size = true;
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index ca56debc84..add7678182 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -187,8 +187,8 @@ void grpc_chttp2_perform_writes(
grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
&transport_writing->done_cb);
} else {
- grpc_exec_ctx_push(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
+ NULL);
}
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index ea0f2bcba4..25d8aca250 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -152,14 +152,18 @@ static void next_recv_step(stream_obj *s, enum e_caller caller);
static void set_pollset_do_nothing(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_pollset *pollset) {}
+static void set_pollset_set_do_nothing(grpc_exec_ctx *exec_ctx,
+ grpc_transport *gt, grpc_stream *gs,
+ grpc_pollset_set *pollset_set) {}
+
static void enqueue_callbacks(grpc_closure *callback_list[]) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (callback_list[0]) {
- grpc_exec_ctx_push(&exec_ctx, callback_list[0], GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(&exec_ctx, callback_list[0], GRPC_ERROR_NONE, NULL);
callback_list[0] = NULL;
}
if (callback_list[1]) {
- grpc_exec_ctx_push(&exec_ctx, callback_list[1], GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(&exec_ctx, callback_list[1], GRPC_ERROR_NONE, NULL);
callback_list[1] = NULL;
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -218,8 +222,11 @@ static void on_write_completed(cronet_bidirectional_stream *stream,
static void process_recv_message(stream_obj *s, const uint8_t *recv_data) {
gpr_slice read_data_slice = gpr_slice_malloc((uint32_t)s->total_read_bytes);
uint8_t *dst_p = GPR_SLICE_START_PTR(read_data_slice);
- memcpy(dst_p, recv_data, (size_t)s->total_read_bytes);
- gpr_slice_buffer_add(&s->read_slice_buffer, read_data_slice);
+ if (s->total_read_bytes > 0) {
+ // Only copy if there is non-zero number of bytes
+ memcpy(dst_p, recv_data, (size_t)s->total_read_bytes);
+ gpr_slice_buffer_add(&s->read_slice_buffer, read_data_slice);
+ }
grpc_slice_buffer_stream_init(&s->sbs, &s->read_slice_buffer, 0);
*s->recv_message = (grpc_byte_buffer *)&s->sbs;
}
@@ -347,8 +354,17 @@ static void next_recv_step(stream_obj *s, enum e_caller caller) {
if (grpc_cronet_trace) {
gpr_log(GPR_DEBUG, "R: cronet_bidirectional_stream_read()");
}
- cronet_bidirectional_stream_read(s->cbs, (char *)s->read_buffer,
- s->remaining_read_bytes);
+ if (s->remaining_read_bytes > 0) {
+ cronet_bidirectional_stream_read(s->cbs, (char *)s->read_buffer,
+ s->remaining_read_bytes);
+ } else {
+ // Calling the closing callback directly since this is a 0 byte read
+ // for an empty message.
+ process_recv_message(s, NULL);
+ enqueue_callbacks(s->callback_list[CB_RECV_MESSAGE]);
+ invoke_closing_callback(s);
+ set_recv_state(s, CRONET_RECV_CLOSED);
+ }
}
}
break;
@@ -634,7 +650,13 @@ static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
}
}
-const grpc_transport_vtable grpc_cronet_vtable = {
- sizeof(stream_obj), "cronet_http", init_stream,
- set_pollset_do_nothing, perform_stream_op, NULL,
- destroy_stream, destroy_transport, NULL};
+const grpc_transport_vtable grpc_cronet_vtable = {sizeof(stream_obj),
+ "cronet_http",
+ init_stream,
+ set_pollset_do_nothing,
+ set_pollset_set_do_nothing,
+ perform_stream_op,
+ NULL,
+ destroy_stream,
+ destroy_transport,
+ NULL};
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 569be4dc28..79ceeb66b3 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -35,6 +35,7 @@
#include <grpc/grpc.h>
#include "src/core/lib/support/string.h"
+#include <grpc/compression.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -148,6 +149,7 @@ grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
void grpc_channel_args_destroy(grpc_channel_args *a) {
size_t i;
+ if (!a) return;
for (i = 0; i < a->num_args; i++) {
switch (a->args[i].type) {
case GRPC_ARG_STRING:
@@ -181,6 +183,7 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm) {
+ GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
@@ -200,7 +203,8 @@ static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
!strcmp(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
a->args[i].key)) {
*states_arg = &a->args[i].value.integer;
- return 1; /* GPR_TRUE */
+ **states_arg |= 0x1; /* forcefully enable support for no compression */
+ return 1;
}
}
}
@@ -209,15 +213,23 @@ static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
grpc_channel_args **a, grpc_compression_algorithm algorithm, int state) {
- int *states_arg;
+ int *states_arg = NULL;
grpc_channel_args *result = *a;
const int states_arg_found =
find_compression_algorithm_states_bitset(*a, &states_arg);
- if (states_arg_found) {
+ if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
+ state == 0) {
+ char *algo_name = NULL;
+ GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
+ gpr_log(GPR_ERROR,
+ "Tried to disable default compression algorithm '%s'. The "
+ "operation has been ignored.",
+ algo_name);
+ } else if (states_arg_found) {
if (state != 0) {
GPR_BITSET((unsigned *)states_arg, algorithm);
- } else {
+ } else if (algorithm != GRPC_COMPRESS_NONE) {
GPR_BITCLEAR((unsigned *)states_arg, algorithm);
}
} else {
@@ -229,7 +241,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
GPR_BITSET((unsigned *)&tmp.value.integer, algorithm);
- } else {
+ } else if (algorithm != GRPC_COMPRESS_NONE) {
GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm);
}
result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
@@ -239,11 +251,11 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
return result;
}
-int grpc_channel_args_compression_algorithm_get_states(
+uint32_t grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a) {
int *states_arg;
if (find_compression_algorithm_states_bitset(a, &states_arg)) {
- return *states_arg;
+ return (uint32_t)*states_arg;
} else {
return (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; /* All algs. enabled */
}
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 23c7b7b897..653d04f427 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -81,7 +81,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
*
* The i-th bit of the returned bitset corresponds to the i-th entry in the
* grpc_compression_algorithm enum. */
-int grpc_channel_args_compression_algorithm_get_states(
+uint32_t grpc_channel_args_compression_algorithm_get_states(
const grpc_channel_args *a);
int grpc_channel_args_compare(const grpc_channel_args *a,
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index ad182d1f69..bbba85d80b 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -106,6 +106,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
const grpc_channel_filter **filters,
size_t filter_count,
const grpc_channel_args *channel_args,
+ grpc_transport *optional_transport,
const char *name, grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
@@ -127,6 +128,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
for (i = 0; i < filter_count; i++) {
args.channel_stack = stack;
args.channel_args = channel_args;
+ args.optional_transport = optional_transport;
args.is_first = i == 0;
args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
@@ -189,9 +191,9 @@ void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
- grpc_call_stack *call_stack,
- grpc_pollset *pollset) {
+void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_call_stack *call_stack,
+ grpc_polling_entity *pollent) {
size_t count = call_stack->count;
grpc_call_element *call_elems;
char *user_data;
@@ -203,17 +205,19 @@ void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
/* init per-filter data */
for (i = 0; i < count; i++) {
- call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset);
+ call_elems[i].filter->set_pollset_or_pollset_set(exec_ctx, &call_elems[i],
+ pollent);
user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
}
}
-void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_pollset *pollset) {}
+void grpc_call_stack_ignore_set_pollset_or_pollset_set(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_polling_entity *pollent) {}
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
+ const grpc_call_stats *call_stats,
void *and_free_memory) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
@@ -221,7 +225,7 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
/* destroy per-filter data */
for (i = 0; i < count; i++) {
- elems[i].filter->destroy_call_elem(exec_ctx, &elems[i],
+ elems[i].filter->destroy_call_elem(exec_ctx, &elems[i], call_stats,
i == count - 1 ? and_free_memory : NULL);
}
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 36c17cb467..41dd4a0d8a 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -45,7 +45,10 @@
#include <grpc/grpc.h>
#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/transport.h"
typedef struct grpc_channel_element grpc_channel_element;
@@ -57,6 +60,8 @@ typedef struct grpc_call_stack grpc_call_stack;
typedef struct {
grpc_channel_stack *channel_stack;
const grpc_channel_args *channel_args;
+ /** Transport, iff it is known */
+ grpc_transport *optional_transport;
int is_first;
int is_last;
} grpc_channel_element_args;
@@ -67,6 +72,12 @@ typedef struct {
grpc_call_context_element *context;
} grpc_call_element_args;
+typedef struct {
+ grpc_transport_stream_stats transport_stream_stats;
+ gpr_timespec latency; /* From call creating to enqueing of received status */
+ grpc_status_code final_status;
+} grpc_call_stats;
+
/* Channel filters specify:
1. the amount of memory needed in the channel & call (via the sizeof_XXX
members)
@@ -101,14 +112,16 @@ typedef struct {
argument. */
void (*init_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args);
- void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_pollset *pollset);
+ void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_polling_entity *pollent);
/* Destroy per call data.
The filter does not need to do any chaining.
The bottom filter of a stack will be passed a non-NULL pointer to
\a and_free_memory that should be passed to gpr_free when destruction
is complete. */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ const grpc_call_stats *stats,
void *and_free_memory);
/* sizeof(per channel data) */
@@ -187,6 +200,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
size_t filter_count, const grpc_channel_args *args,
+ grpc_transport *optional_transport,
const char *name, grpc_channel_stack *stack);
/* Destroy a channel stack */
void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
@@ -201,10 +215,11 @@ void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
grpc_call_context_element *context,
const void *transport_server_data,
grpc_call_stack *call_stack);
-/* Set a pollset for a call stack: must occur before the first op is started */
-void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
- grpc_call_stack *call_stack,
- grpc_pollset *pollset);
+/* Set a pollset or a pollset_set for a call stack: must occur before the first
+ * op is started */
+void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_call_stack *call_stack,
+ grpc_polling_entity *pollent);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define GRPC_CALL_STACK_REF(call_stack, reason) \
@@ -228,13 +243,14 @@ void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
/* Destroy a call stack */
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
+ const grpc_call_stats *call_stats,
void *and_free_memory);
-/* Ignore set pollset - used by filters to implement the set_pollset method
- if they don't care about pollsets at all. Does nothing. */
-void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_pollset *pollset);
+/* Ignore set pollset{_set} - used by filters if they don't care about pollsets
+ * at all. Does nothing. */
+void grpc_call_stack_ignore_set_pollset_or_pollset_set(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_polling_entity *pollent);
/* Call the next operation in a call stack */
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op *op);
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c
index a8646c9565..eda4968f48 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.c
@@ -257,8 +257,8 @@ void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx,
// and initialize it
grpc_channel_stack_init(exec_ctx, initial_refs, destroy,
destroy_arg == NULL ? result : destroy_arg, filters,
- num_filters, builder->args, builder->name,
- channel_stack);
+ num_filters, builder->args, builder->transport,
+ builder->name, channel_stack);
// run post-initialization functions
i = 0;
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 27a87db0c2..32ebe53ee6 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -73,8 +73,8 @@ typedef struct call_data {
typedef struct channel_data {
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
- /** Compression options for the channel */
- grpc_compression_options compression_options;
+ /** Bitset of enabled algorithms */
+ uint32_t enabled_algorithms_bitset;
/** Supported compression algorithms */
uint32_t supported_compression_algorithms;
} channel_data;
@@ -96,9 +96,8 @@ static grpc_mdelem *compression_md_filter(void *user_data, grpc_mdelem *md) {
md_c_str);
calld->compression_algorithm = GRPC_COMPRESS_NONE;
}
- if (grpc_compression_options_is_algorithm_enabled(
- &channeld->compression_options, calld->compression_algorithm) ==
- 0) {
+ if (!GPR_BITGET(channeld->enabled_algorithms_bitset,
+ calld->compression_algorithm)) {
gpr_log(GPR_ERROR,
"Invalid compression algorithm: '%s' (previously disabled). "
"Ignoring.",
@@ -178,8 +177,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
- gpr_log(GPR_DEBUG,
- "Compressed[%s] %d bytes vs. %d bytes (%.2f%% savings)",
+ gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
+ " bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}
gpr_slice_buffer_swap(&calld->slices, &tmp);
@@ -189,10 +188,10 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
char *algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
- gpr_log(
- GPR_DEBUG,
- "Algorithm '%s' enabled but decided not to compress. Input size: %d",
- algo_name, calld->slices.length);
+ gpr_log(GPR_DEBUG,
+ "Algorithm '%s' enabled but decided not to compress. Input size: "
+ "%" PRIuPTR,
+ algo_name, calld->slices.length);
}
}
@@ -271,7 +270,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- void *ignored) {
+ const grpc_call_stats *stats, void *ignored) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
gpr_slice_buffer_destroy(&calld->slices);
@@ -282,32 +281,26 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
- grpc_compression_algorithm algo_idx;
- grpc_compression_options_init(&channeld->compression_options);
- channeld->compression_options.enabled_algorithms_bitset =
- (uint32_t)grpc_channel_args_compression_algorithm_get_states(
- args->channel_args);
+ channeld->enabled_algorithms_bitset =
+ grpc_channel_args_compression_algorithm_get_states(args->channel_args);
channeld->default_compression_algorithm =
grpc_channel_args_get_compression_algorithm(args->channel_args);
/* Make sure the default isn't disabled. */
- if (!grpc_compression_options_is_algorithm_enabled(
- &channeld->compression_options,
- channeld->default_compression_algorithm)) {
+ if (!GPR_BITGET(channeld->enabled_algorithms_bitset,
+ channeld->default_compression_algorithm)) {
gpr_log(GPR_DEBUG,
"compression algorithm %d not enabled: switching to none",
channeld->default_compression_algorithm);
channeld->default_compression_algorithm = GRPC_COMPRESS_NONE;
}
- channeld->compression_options.default_compression_algorithm =
- channeld->default_compression_algorithm;
- channeld->supported_compression_algorithms = 0;
- for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
+ channeld->supported_compression_algorithms = 1; /* always support identity */
+ for (grpc_compression_algorithm algo_idx = 1;
+ algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
/* skip disabled algorithms */
- if (grpc_compression_options_is_algorithm_enabled(
- &channeld->compression_options, algo_idx) == 0) {
+ if (!GPR_BITGET(channeld->enabled_algorithms_bitset, algo_idx)) {
continue;
}
channeld->supported_compression_algorithms |= 1u << algo_idx;
@@ -325,7 +318,7 @@ const grpc_channel_filter grpc_compress_filter = {
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/lib/channel/compress_filter.h b/src/core/lib/channel/compress_filter.h
index 0ce5d08837..e4a2a829d5 100644
--- a/src/core/lib/channel/compress_filter.h
+++ b/src/core/lib/channel/compress_filter.h
@@ -34,9 +34,9 @@
#ifndef GRPC_CORE_LIB_CHANNEL_COMPRESS_FILTER_H
#define GRPC_CORE_LIB_CHANNEL_COMPRESS_FILTER_H
-#include "src/core/lib/channel/channel_stack.h"
+#include <grpc/impl/codegen/compression_types.h>
-#define GRPC_COMPRESS_REQUEST_ALGORITHM_KEY "grpc-internal-encoding-request"
+#include "src/core/lib/channel/channel_stack.h"
extern int grpc_compression_trace;
@@ -48,7 +48,7 @@ extern int grpc_compression_trace;
* - Channel configuration, as established at channel creation time.
* - The metadata accompanying the outgoing data to be compressed. This is
* taken as a request only. We may choose not to honor it. The metadata key
- * is given by \a GRPC_COMPRESS_REQUEST_ALGORITHM_KEY.
+ * is given by \a GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY.
*
* Compression can be disabled for concrete messages (for instance in order to
* prevent CRIME/BEAST type attacks) by having the GRPC_WRITE_NO_COMPRESS set in
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 68a3a7d6fd..0a7d27a1dc 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -93,16 +93,18 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(r == 0);
}
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_pollset *pollset) {
+static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_polling_entity *pollent) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
- grpc_transport_set_pollset(exec_ctx, chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollset);
+ grpc_transport_set_pops(exec_ctx, chand->transport,
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ const grpc_call_stats *stats,
void *and_free_memory) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
@@ -137,7 +139,7 @@ static const grpc_channel_filter connected_channel_filter = {
con_start_transport_op,
sizeof(call_data),
init_call_elem,
- set_pollset,
+ set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index ced3d75cd2..ab6c6c9ef0 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -38,6 +38,10 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/transport_impl.h"
+
+#define EXPECTED_CONTENT_TYPE "application/grpc"
+#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
grpc_linked_mdelem method;
@@ -74,7 +78,24 @@ static grpc_mdelem *client_recv_filter(void *user_data, grpc_mdelem *md) {
} else if (md->key == GRPC_MDSTR_STATUS) {
grpc_call_element_send_cancel(a->exec_ctx, a->elem);
return NULL;
+ } else if (md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
+ return NULL;
} else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
+ const char *value_str = grpc_mdstr_as_c_string(md->value);
+ if (strncmp(value_str, EXPECTED_CONTENT_TYPE,
+ EXPECTED_CONTENT_TYPE_LENGTH) == 0 &&
+ (value_str[EXPECTED_CONTENT_TYPE_LENGTH] == '+' ||
+ value_str[EXPECTED_CONTENT_TYPE_LENGTH] == ';')) {
+ /* Although the C implementation doesn't (currently) generate them,
+ any custom +-suffix is explicitly valid. */
+ /* TODO(klempner): We should consider preallocating common values such
+ as +proto or +json, or at least stashing them if we see them. */
+ /* TODO(klempner): Should we be surfacing this to application code? */
+ } else {
+ /* TODO(klempner): We're currently allowing this, but we shouldn't
+ see it without a proxy so log for now. */
+ gpr_log(GPR_INFO, "Unexpected content-type '%s'", value_str);
+ }
return NULL;
}
return md;
@@ -157,7 +178,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- void *ignored) {}
+ const grpc_call_stats *stats, void *ignored) {}
static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
unsigned i;
@@ -180,7 +201,8 @@ static grpc_mdelem *scheme_from_args(const grpc_channel_args *args) {
return GRPC_MDELEM_SCHEME_HTTP;
}
-static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args) {
+static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args,
+ const char *transport_name) {
gpr_strvec v;
size_t i;
int is_first = 1;
@@ -202,8 +224,8 @@ static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args) {
}
}
- gpr_asprintf(&tmp, "%sgrpc-c/%s (%s)", is_first ? "" : " ",
- grpc_version_string(), GPR_PLATFORM_STRING);
+ gpr_asprintf(&tmp, "%sgrpc-c/%s (%s; %s)", is_first ? "" : " ",
+ grpc_version_string(), GPR_PLATFORM_STRING, transport_name);
is_first = 0;
gpr_strvec_add(&v, tmp);
@@ -234,9 +256,12 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(!args->is_last);
+ GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
chand->user_agent = grpc_mdelem_from_metadata_strings(
- GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args));
+ GRPC_MDSTR_USER_AGENT,
+ user_agent_from_args(args->channel_args,
+ args->optional_transport->vtable->name));
}
/* Destructor for channel data */
@@ -251,7 +276,7 @@ const grpc_channel_filter grpc_http_client_filter = {
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/lib/channel/http_client_filter.h b/src/core/lib/channel/http_client_filter.h
index a884b36318..47081175ea 100644
--- a/src/core/lib/channel/http_client_filter.h
+++ b/src/core/lib/channel/http_client_filter.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright 2015, Google Inc.
* All rights reserved.
*
@@ -39,6 +38,7 @@
/* Processes metadata on the client side for HTTP2 transports */
extern const grpc_channel_filter grpc_http_client_filter;
+/* Channel arg to override the http2 :scheme header */
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
#endif /* GRPC_CORE_LIB_CHANNEL_HTTP_CLIENT_FILTER_H */
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index 876eaddc1e..d0beebd817 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -108,7 +108,7 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
- gpr_log(GPR_INFO, "Unexpected content-type %s", value_str);
+ gpr_log(GPR_INFO, "Unexpected content-type '%s'", value_str);
}
return NULL;
} else if (md->key == GRPC_MDSTR_TE || md->key == GRPC_MDSTR_METHOD ||
@@ -235,7 +235,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- void *ignored) {}
+ const grpc_call_stats *stats, void *ignored) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -253,7 +253,7 @@ const grpc_channel_filter grpc_http_server_filter = {
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/lib/compression/compression_algorithm.c b/src/core/lib/compression/compression.c
index 820871d579..54efb5e855 100644
--- a/src/core/lib/compression/compression_algorithm.c
+++ b/src/core/lib/compression/compression.c
@@ -125,6 +125,28 @@ grpc_mdelem *grpc_compression_encoding_mdelem(
return NULL;
}
+void grpc_compression_options_init(grpc_compression_options *opts) {
+ memset(opts, 0, sizeof(*opts));
+ /* all enabled by default */
+ opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+}
+
+void grpc_compression_options_enable_algorithm(
+ grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
+ GPR_BITSET(&opts->enabled_algorithms_bitset, algorithm);
+}
+
+void grpc_compression_options_disable_algorithm(
+ grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
+ GPR_BITCLEAR(&opts->enabled_algorithms_bitset, algorithm);
+}
+
+int grpc_compression_options_is_algorithm_enabled(
+ const grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) {
+ return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm);
+}
+
/* TODO(dgq): Add the ability to specify parameters to the individual
* compression algorithms */
grpc_compression_algorithm grpc_compression_algorithm_for_level(
@@ -180,25 +202,3 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
abort();
};
}
-
-void grpc_compression_options_init(grpc_compression_options *opts) {
- opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
- opts->default_compression_algorithm = GRPC_COMPRESS_NONE;
-}
-
-void grpc_compression_options_enable_algorithm(
- grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
- GPR_BITSET(&opts->enabled_algorithms_bitset, algorithm);
-}
-
-void grpc_compression_options_disable_algorithm(
- grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
- GPR_BITCLEAR(&opts->enabled_algorithms_bitset, algorithm);
-}
-
-int grpc_compression_options_is_algorithm_enabled(
- const grpc_compression_options *opts,
- grpc_compression_algorithm algorithm) {
- if (algorithm >= GRPC_COMPRESS_ALGORITHMS_COUNT) return 0;
- return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm);
-}
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 4bda734bfa..18135bcb58 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -63,7 +63,7 @@ typedef struct {
const grpc_httpcli_handshaker *handshaker;
grpc_closure *on_done;
grpc_httpcli_context *context;
- grpc_pollset *pollset;
+ grpc_polling_entity *pollent;
grpc_iomgr_object iomgr_obj;
gpr_slice_buffer incoming;
gpr_slice_buffer outgoing;
@@ -78,6 +78,7 @@ static grpc_httpcli_post_override g_post_override = NULL;
static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint, const char *host,
+ gpr_timespec deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_endpoint *endpoint)) {
@@ -100,9 +101,9 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
grpc_error *error) {
- grpc_pollset_set_del_pollset(exec_ctx, req->context->pollset_set,
- req->pollset);
- grpc_exec_ctx_push(exec_ctx, req->on_done, error, NULL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
+ req->context->pollset_set);
+ grpc_exec_ctx_sched(exec_ctx, req->on_done, error, NULL);
grpc_http_parser_destroy(&req->parser);
if (req->addresses != NULL) {
grpc_resolved_addresses_destroy(req->addresses);
@@ -206,7 +207,7 @@ static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
req->handshaker->handshake(
exec_ctx, req, req->ep,
req->ssl_host_override ? req->ssl_host_override : req->host,
- on_handshake_done);
+ req->deadline, on_handshake_done);
}
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
@@ -240,7 +241,7 @@ static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *context,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
@@ -254,7 +255,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->handshaker =
request->handshaker ? request->handshaker : &grpc_httpcli_plaintext;
req->context = context;
- req->pollset = pollset;
+ req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
grpc_closure_init(&req->on_read, on_read, req);
grpc_closure_init(&req->done_write, done_write, req);
@@ -264,14 +265,15 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->host = gpr_strdup(request->host);
req->ssl_host_override = gpr_strdup(request->ssl_host_override);
- grpc_pollset_set_add_pollset(exec_ctx, req->context->pollset_set,
- req->pollset);
+ GPR_ASSERT(pollent);
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent,
+ req->context->pollset_set);
grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port,
grpc_closure_create(on_resolved, req), &req->addresses);
}
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
@@ -281,14 +283,14 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
return;
}
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path);
- internal_request_begin(exec_ctx, context, pollset, request, deadline, on_done,
+ internal_request_begin(exec_ctx, context, pollent, request, deadline, on_done,
response, name,
grpc_httpcli_format_get_request(request));
gpr_free(name);
}
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_done,
@@ -301,7 +303,7 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
}
gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path);
internal_request_begin(
- exec_ctx, context, pollset, request, deadline, on_done, response, name,
+ exec_ctx, context, pollent, request, deadline, on_done, response, name,
grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name);
}
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 2d57864a1f..662e176f4c 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -41,6 +41,7 @@
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset_set.h"
/* User agent this library reports */
@@ -56,7 +57,7 @@ typedef struct grpc_httpcli_context {
typedef struct {
const char *default_port;
void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint,
- const char *host,
+ const char *host, gpr_timespec deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint));
} grpc_httpcli_handshaker;
@@ -95,7 +96,7 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'on_response' is a callback to report results to (and 'user_data' is a user
supplied pointer to pass to said call) */
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_complete,
grpc_httpcli_response *response);
@@ -116,7 +117,7 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
supplied pointer to pass to said call)
Does not support ?var1=val1&var2=val2 in the path. */
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_complete,
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 5590928968..a57d93bb7b 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -61,6 +61,7 @@ static void httpcli_ssl_destroy(grpc_security_connector *sc) {
static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
+ gpr_timespec deadline,
grpc_security_handshake_done_cb cb,
void *user_data) {
grpc_httpcli_ssl_channel_security_connector *c =
@@ -79,7 +80,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, true,
- nonsecure_endpoint, cb, user_data);
+ nonsecure_endpoint, deadline, cb, user_data);
}
}
@@ -163,6 +164,7 @@ static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *tcp, const char *host,
+ gpr_timespec deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint)) {
grpc_channel_security_connector *sc = NULL;
@@ -181,7 +183,7 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
pem_root_certs, pem_root_certs_size, host, &sc) ==
GRPC_SECURITY_OK);
grpc_channel_security_connector_do_handshake(
- exec_ctx, sc, tcp, on_secure_transport_setup_done, c);
+ exec_ctx, sc, tcp, deadline, on_secure_transport_setup_done, c);
GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli");
}
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 344b7d3c3e..08e59a168e 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -53,8 +53,8 @@ typedef struct grpc_closure_list {
/** gRPC Callback definition.
*
* \param arg Arbitrary input.
- * \param success An indication on the state of the iomgr. On false, cleanup
- * actions should be taken (eg, shutdown). */
+ * \param error GRPC_ERROR_NONE if no error occurred, otherwise some grpc_error
+ * describing what went wrong */
typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -66,8 +66,11 @@ struct grpc_closure {
/** Arguments to be passed to "cb". */
void *cb_arg;
+ /** Once queued, the result of the closure. Before then: scratch space */
grpc_error *error;
+ /** Once queued, next indicates the next queued closure; before then, scratch
+ * space */
union {
grpc_closure *next;
uintptr_t scratch;
@@ -84,8 +87,8 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }
-/** add \a closure to the end of \a list and set \a closure's success to \a
- * success */
+/** add \a closure to the end of \a list
+ and set \a closure's result to \a error */
void grpc_closure_list_append(grpc_closure_list *list, grpc_closure *closure,
grpc_error *error);
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 3877ceb1e2..f9808bbda1 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -82,7 +82,7 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep);
void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
gpr_slice_buffer *slices, grpc_closure *cb);
-/* Causes any pending read/write callbacks to run immediately with
+/* Causes any pending and future read/write callbacks to run immediately with
success==0 */
void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 6da0b00582..540fb4fa7e 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,6 +33,7 @@
#include "src/core/lib/iomgr/error.h"
+#include <inttypes.h>
#include <stdbool.h>
#include <string.h>
@@ -42,6 +43,10 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
+#ifdef GPR_WINDOWS
+#include <grpc/support/log_windows.h>
+#endif
+
static void destroy_integer(void *key) {}
static void *copy_integer(void *key) { return key; }
@@ -84,14 +89,10 @@ static const gpr_avl_vtable avl_vtable_errs = {
static const char *error_int_name(grpc_error_ints key) {
switch (key) {
- case GRPC_ERROR_INT_STATUS_CODE:
- return "status_code";
case GRPC_ERROR_INT_ERRNO:
return "errno";
case GRPC_ERROR_INT_FILE_LINE:
return "file_line";
- case GRPC_ERROR_INT_WARNING:
- return "warning";
case GRPC_ERROR_INT_STREAM_ID:
return "stream_id";
case GRPC_ERROR_INT_GRPC_STATUS:
@@ -112,6 +113,8 @@ static const char *error_int_name(grpc_error_ints key) {
return "fd";
case GRPC_ERROR_INT_WSA_ERROR:
return "wsa_error";
+ case GRPC_ERROR_INT_HTTP_STATUS:
+ return "http_status";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@@ -336,7 +339,7 @@ static char *key_time(void *p) {
static char *fmt_int(void *p) {
char *s;
- gpr_asprintf(&s, "%lld", (intptr_t)p);
+ gpr_asprintf(&s, "%" PRIdPTR, (intptr_t)p);
return s;
}
@@ -418,7 +421,7 @@ static char *fmt_time(void *p) {
pfx = "";
break;
}
- gpr_asprintf(&out, "\"%s%d.%09d\"", pfx, tm.tv_sec, tm.tv_nsec);
+ gpr_asprintf(&out, "\"%s%" PRId64 ".%09d\"", pfx, tm.tv_sec, tm.tv_nsec);
return out;
}
@@ -506,7 +509,7 @@ grpc_error *grpc_os_error(const char *file, int line, int err,
GRPC_ERROR_STR_SYSCALL, call_name);
}
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
grpc_error *grpc_wsa_error(const char *file, int line, int err,
const char *call_name) {
char *utf8_message = gpr_format_message(err);
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index 695724c0be..69cdf3028e 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,49 +39,84 @@
#include <grpc/support/time.h>
-// Opaque representation of an error.
-// Errors are refcounted objects that represent the result of an operation.
-// Ownership laws:
-// if a grpc_error is returned by a function, the caller owns a ref to that
-// instance
-// if a grpc_error is passed to a grpc_closure callback function (functions
-// with the signature:
-// void (*f)(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error))
-// then those functions do not automatically own a ref to error
-// if a grpc_error is passed to *ANY OTHER FUNCTION* then that function takes
-// ownership of the error
+/// Opaque representation of an error.
+/// Errors are refcounted objects that represent the result of an operation.
+/// Ownership laws:
+/// if a grpc_error is returned by a function, the caller owns a ref to that
+/// instance
+/// if a grpc_error is passed to a grpc_closure callback function (functions
+/// with the signature:
+/// void (*f)(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error))
+/// then those functions do not automatically own a ref to error
+/// if a grpc_error is passed to *ANY OTHER FUNCTION* then that function takes
+/// ownership of the error
+/// Errors have:
+/// a set of ints, strings, and timestamps that describe the error
+/// always present are:
+/// GRPC_ERROR_STR_FILE, GRPC_ERROR_INT_FILE_LINE - source location the error
+/// was generated
+/// GRPC_ERROR_STR_DESCRIPTION - a human readable description of the error
+/// GRPC_ERROR_TIME_CREATED - a timestamp indicating when the error happened
+/// an error can also have children; these are other errors that are believed
+/// to have contributed to this one. By accumulating children, we can begin
+/// to root cause high level failures from low level failures, without having
+/// to derive execution paths from log lines
typedef struct grpc_error grpc_error;
typedef enum {
+ /// 'errno' from the operating system
GRPC_ERROR_INT_ERRNO,
+ /// __LINE__ from the call site creating the error
GRPC_ERROR_INT_FILE_LINE,
- GRPC_ERROR_INT_STATUS_CODE,
- GRPC_ERROR_INT_WARNING,
+ /// stream identifier: for errors that are associated with an individual
+ /// wire stream
GRPC_ERROR_INT_STREAM_ID,
+ /// grpc status code representing this error
GRPC_ERROR_INT_GRPC_STATUS,
+ /// offset into some binary blob (usually represented by
+ /// GRPC_ERROR_STR_RAW_BYTES) where the error occurred
GRPC_ERROR_INT_OFFSET,
+ /// context sensitive index associated with the error
GRPC_ERROR_INT_INDEX,
+ /// context sensitive size associated with the error
GRPC_ERROR_INT_SIZE,
+ /// http2 error code associated with the error (see the HTTP2 RFC)
GRPC_ERROR_INT_HTTP2_ERROR,
+ /// TSI status code associated with the error
GRPC_ERROR_INT_TSI_CODE,
+ /// grpc_security_status associated with the error
GRPC_ERROR_INT_SECURITY_STATUS,
+ /// WSAGetLastError() reported when this error occurred
GRPC_ERROR_INT_WSA_ERROR,
+ /// File descriptor associated with this error
GRPC_ERROR_INT_FD,
+ /// HTTP status (i.e. 404)
+ GRPC_ERROR_INT_HTTP_STATUS,
} grpc_error_ints;
typedef enum {
+ /// top-level textual description of this error
GRPC_ERROR_STR_DESCRIPTION,
+ /// source file in which this error occurred
GRPC_ERROR_STR_FILE,
+ /// operating system description of this error
GRPC_ERROR_STR_OS_ERROR,
+ /// syscall that generated this error
GRPC_ERROR_STR_SYSCALL,
+ /// peer that we were trying to communicate when this error occurred
GRPC_ERROR_STR_TARGET_ADDRESS,
+ /// grpc status message associated with this error
GRPC_ERROR_STR_GRPC_MESSAGE,
+ /// hex dump (or similar) with the data that generated this error
GRPC_ERROR_STR_RAW_BYTES,
+ /// tsi error string associated with this error
GRPC_ERROR_STR_TSI_ERROR,
+ /// filename that we were trying to read/write when this error occurred
GRPC_ERROR_STR_FILENAME,
} grpc_error_strs;
typedef enum {
+ /// timestamp of error creation
GRPC_ERROR_TIME_CREATED,
} grpc_error_times;
@@ -92,8 +127,17 @@ typedef enum {
const char *grpc_error_string(grpc_error *error);
void grpc_error_free_string(const char *str);
+/// Create an error - but use GRPC_ERROR_CREATE instead
grpc_error *grpc_error_create(const char *file, int line, const char *desc,
grpc_error **referencing, size_t num_referencing);
+/// Create an error (this is the preferred way of generating an error that is
+/// not due to a system call - for system calls, use GRPC_OS_ERROR or
+/// GRPC_WSA_ERROR as appropriate)
+/// \a referencing is an array of num_referencing elements indicating one or
+/// more errors that are believed to have contributed to this one
+/// err = grpc_error_create(x, y, z, r, nr) is equivalent to:
+/// err = grpc_error_create(x, y, z, NULL, 0);
+/// for (i=0; i<nr; i++) err = grpc_error_add_child(err, r[i]);
#define GRPC_ERROR_CREATE(desc) \
grpc_error_create(__FILE__, __LINE__, desc, NULL, 0)
@@ -125,13 +169,18 @@ grpc_error *grpc_error_set_time(grpc_error *src, grpc_error_times which,
gpr_timespec value);
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
const char *value);
+/// Add a child error: an error that is believed to have contributed to this
+/// error occurring. Allows root causing high level errors from lower level
+/// errors that contributed to them.
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child);
grpc_error *grpc_os_error(const char *file, int line, int err,
const char *call_name);
+/// create an error associated with errno!=0 (an 'operating system' error)
#define GRPC_OS_ERROR(err, call_name) \
grpc_os_error(__FILE__, __LINE__, err, call_name)
grpc_error *grpc_wsa_error(const char *file, int line, int err,
const char *call_name);
+/// windows only: create an error associated with WSAGetLastError()!=0
#define GRPC_WSA_ERROR(err, call_name) \
grpc_wsa_error(__FILE__, __LINE__, err, call_name)
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
new file mode 100644
index 0000000000..9e306af5fa
--- /dev/null
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
@@ -0,0 +1,2043 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* This file will be removed shortly: it's here to keep refactoring
+ * steps simple and auditable.
+ * It's the combination of the old files:
+ * - fd_posix.{h,c}
+ * - pollset_posix.{h,c}
+ * - pullset_multipoller_with_{poll,epoll}.{h,c}
+ * The new version will be split into:
+ * - ev_poll_posix.{h,c}
+ * - ev_epoll_posix.{h,c}
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_POSIX_SOCKET
+
+#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/tls.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/block_annotate.h"
+
+/*******************************************************************************
+ * FD declarations
+ */
+
+typedef struct grpc_fd_watcher {
+ struct grpc_fd_watcher *next;
+ struct grpc_fd_watcher *prev;
+ grpc_pollset *pollset;
+ grpc_pollset_worker *worker;
+ grpc_fd *fd;
+} grpc_fd_watcher;
+
+struct grpc_fd {
+ int fd;
+ /* refst format:
+ bit0: 1=active/0=orphaned
+ bit1-n: refcount
+ meaning that mostly we ref by two to avoid altering the orphaned bit,
+ and just unref by 1 when we're ready to flag the object as orphaned */
+ gpr_atm refst;
+
+ gpr_mu mu;
+ int shutdown;
+ int closed;
+ int released;
+
+ /* The watcher list.
+
+ The following watcher related fields are protected by watcher_mu.
+
+ An fd_watcher is an ephemeral object created when an fd wants to
+ begin polling, and destroyed after the poll.
+
+ It denotes the fd's interest in whether to read poll or write poll
+ or both or neither on this fd.
+
+ If a watcher is asked to poll for reads or writes, the read_watcher
+ or write_watcher fields are set respectively. A watcher may be asked
+ to poll for both, in which case both fields will be set.
+
+ read_watcher and write_watcher may be NULL if no watcher has been
+ asked to poll for reads or writes.
+
+ If an fd_watcher is not asked to poll for reads or writes, it's added
+ to a linked list of inactive watchers, rooted at inactive_watcher_root.
+ If at a later time there becomes need of a poller to poll, one of
+ the inactive pollers may be kicked out of their poll loops to take
+ that responsibility. */
+ grpc_fd_watcher inactive_watcher_root;
+ grpc_fd_watcher *read_watcher;
+ grpc_fd_watcher *write_watcher;
+
+ grpc_closure *read_closure;
+ grpc_closure *write_closure;
+
+ struct grpc_fd *freelist_next;
+
+ grpc_closure *on_done_closure;
+
+ grpc_iomgr_object iomgr_object;
+
+ /* The pollset that last noticed and notified that the fd is readable */
+ grpc_pollset *read_notifier_pollset;
+};
+
+/* Begin polling on an fd.
+ Registers that the given pollset is interested in this fd - so that if read
+ or writability interest changes, the pollset can be kicked to pick up that
+ new interest.
+ Return value is:
+ (fd_needs_read? read_mask : 0) | (fd_needs_write? write_mask : 0)
+ i.e. a combination of read_mask and write_mask determined by the fd's current
+ interest in said events.
+ Polling strategies that do not need to alter their behavior depending on the
+ fd's current interest (such as epoll) do not need to call this function.
+ MUST NOT be called with a pollset lock taken */
+static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, uint32_t read_mask,
+ uint32_t write_mask, grpc_fd_watcher *rec);
+/* Complete polling previously started with fd_begin_poll
+ MUST NOT be called with a pollset lock taken
+ if got_read or got_write are 1, also does the become_{readable,writable} as
+ appropriate. */
+static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
+ int got_read, int got_write,
+ grpc_pollset *read_notifier_pollset);
+
+/* Return 1 if this fd is orphaned, 0 otherwise */
+static bool fd_is_orphaned(grpc_fd *fd);
+
+/* Reference counting for fds */
+/*#define GRPC_FD_REF_COUNT_DEBUG*/
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
+static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+ int line);
+#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
+#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
+#else
+static void fd_ref(grpc_fd *fd);
+static void fd_unref(grpc_fd *fd);
+#define GRPC_FD_REF(fd, reason) fd_ref(fd)
+#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
+#endif
+
+static void fd_global_init(void);
+static void fd_global_shutdown(void);
+
+#define CLOSURE_NOT_READY ((grpc_closure *)0)
+#define CLOSURE_READY ((grpc_closure *)1)
+
+/*******************************************************************************
+ * pollset declarations
+ */
+
+typedef struct grpc_pollset_vtable grpc_pollset_vtable;
+
+typedef struct grpc_cached_wakeup_fd {
+ grpc_wakeup_fd fd;
+ struct grpc_cached_wakeup_fd *next;
+} grpc_cached_wakeup_fd;
+
+struct grpc_pollset_worker {
+ grpc_cached_wakeup_fd *wakeup_fd;
+ int reevaluate_polling_on_wakeup;
+ int kicked_specifically;
+ struct grpc_pollset_worker *next;
+ struct grpc_pollset_worker *prev;
+};
+
+struct grpc_pollset {
+ /* pollsets under posix can mutate representation as fds are added and
+ removed.
+ For example, we may choose a poll() based implementation on linux for
+ few fds, and an epoll() based implementation for many fds */
+ const grpc_pollset_vtable *vtable;
+ gpr_mu mu;
+ grpc_pollset_worker root_worker;
+ int in_flight_cbs;
+ int shutting_down;
+ int called_shutdown;
+ int kicked_without_pollers;
+ grpc_closure *shutdown_done;
+ grpc_closure_list idle_jobs;
+ union {
+ int fd;
+ void *ptr;
+ } data;
+ /* Local cache of eventfds for workers */
+ grpc_cached_wakeup_fd *local_wakeup_cache;
+};
+
+struct grpc_pollset_vtable {
+ void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd, int and_unlock_pollset);
+ grpc_error *(*maybe_work_and_unlock)(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now);
+ void (*finish_shutdown)(grpc_pollset *pollset);
+ void (*destroy)(grpc_pollset *pollset);
+};
+
+/* Add an fd to a pollset */
+static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ struct grpc_fd *fd);
+
+static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd);
+
+/* Convert a timespec to milliseconds:
+ - very small or negative poll times are clamped to zero to do a
+ non-blocking poll (which becomes spin polling)
+ - other small values are rounded up to one millisecond
+ - longer than a millisecond polls are rounded up to the next nearest
+ millisecond to avoid spinning
+ - infinite timeouts are converted to -1 */
+static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
+ gpr_timespec now);
+
+/* Allow kick to wakeup the currently polling worker */
+#define GRPC_POLLSET_CAN_KICK_SELF 1
+/* Force the wakee to repoll when awoken */
+#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
+/* As per pollset_kick, with an extended set of flags (defined above)
+ -- mostly for fd_posix's use. */
+static grpc_error *pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ uint32_t flags) GRPC_MUST_USE_RESULT;
+
+/* turn a pollset into a multipoller: platform specific */
+typedef void (*platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ struct grpc_fd **fds,
+ size_t fd_count);
+static platform_become_multipoller_type platform_become_multipoller;
+
+/* Return 1 if the pollset has active threads in pollset_work (pollset must
+ * be locked) */
+static int pollset_has_workers(grpc_pollset *pollset);
+
+static void remove_fd_from_all_epoll_sets(int fd);
+
+/*******************************************************************************
+ * pollset_set definitions
+ */
+
+struct grpc_pollset_set {
+ gpr_mu mu;
+
+ size_t pollset_count;
+ size_t pollset_capacity;
+ grpc_pollset **pollsets;
+
+ size_t pollset_set_count;
+ size_t pollset_set_capacity;
+ struct grpc_pollset_set **pollset_sets;
+
+ size_t fd_count;
+ size_t fd_capacity;
+ grpc_fd **fds;
+};
+
+/*******************************************************************************
+ * fd_posix.c
+ */
+
+/* We need to keep a freelist not because of any concerns of malloc performance
+ * but instead so that implementations with multiple threads in (for example)
+ * epoll_wait deal with the race between pollset removal and incoming poll
+ * notifications.
+ *
+ * The problem is that the poller ultimately holds a reference to this
+ * object, so it is very difficult to know when is safe to free it, at least
+ * without some expensive synchronization.
+ *
+ * If we keep the object freelisted, in the worst case losing this race just
+ * becomes a spurious read notification on a reused fd.
+ */
+/* TODO(klempner): We could use some form of polling generation count to know
+ * when these are safe to free. */
+/* TODO(klempner): Consider disabling freelisting if we don't have multiple
+ * threads in poll on the same fd */
+/* TODO(klempner): Batch these allocations to reduce fragmentation */
+static grpc_fd *fd_freelist = NULL;
+static gpr_mu fd_freelist_mu;
+
+static void freelist_fd(grpc_fd *fd) {
+ gpr_mu_lock(&fd_freelist_mu);
+ fd->freelist_next = fd_freelist;
+ fd_freelist = fd;
+ grpc_iomgr_unregister_object(&fd->iomgr_object);
+ gpr_mu_unlock(&fd_freelist_mu);
+}
+
+static grpc_fd *alloc_fd(int fd) {
+ grpc_fd *r = NULL;
+ gpr_mu_lock(&fd_freelist_mu);
+ if (fd_freelist != NULL) {
+ r = fd_freelist;
+ fd_freelist = fd_freelist->freelist_next;
+ }
+ gpr_mu_unlock(&fd_freelist_mu);
+ if (r == NULL) {
+ r = gpr_malloc(sizeof(grpc_fd));
+ gpr_mu_init(&r->mu);
+ }
+
+ gpr_mu_lock(&r->mu);
+ gpr_atm_rel_store(&r->refst, 1);
+ r->shutdown = 0;
+ r->read_closure = CLOSURE_NOT_READY;
+ r->write_closure = CLOSURE_NOT_READY;
+ r->fd = fd;
+ r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
+ &r->inactive_watcher_root;
+ r->freelist_next = NULL;
+ r->read_watcher = r->write_watcher = NULL;
+ r->on_done_closure = NULL;
+ r->closed = 0;
+ r->released = 0;
+ r->read_notifier_pollset = NULL;
+ gpr_mu_unlock(&r->mu);
+ return r;
+}
+
+static void destroy(grpc_fd *fd) {
+ gpr_mu_destroy(&fd->mu);
+ gpr_free(fd);
+}
+
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
+#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
+static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+ int line) {
+ gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
+ gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+#else
+#define REF_BY(fd, n, reason) ref_by(fd, n)
+#define UNREF_BY(fd, n, reason) unref_by(fd, n)
+static void ref_by(grpc_fd *fd, int n) {
+#endif
+ GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
+}
+
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+ int line) {
+ gpr_atm old;
+ gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
+ gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+#else
+static void unref_by(grpc_fd *fd, int n) {
+ gpr_atm old;
+#endif
+ old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ if (old == n) {
+ freelist_fd(fd);
+ } else {
+ GPR_ASSERT(old > n);
+ }
+}
+
+static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
+
+static void fd_global_shutdown(void) {
+ gpr_mu_lock(&fd_freelist_mu);
+ gpr_mu_unlock(&fd_freelist_mu);
+ while (fd_freelist != NULL) {
+ grpc_fd *fd = fd_freelist;
+ fd_freelist = fd_freelist->freelist_next;
+ destroy(fd);
+ }
+ gpr_mu_destroy(&fd_freelist_mu);
+}
+
+static grpc_fd *fd_create(int fd, const char *name) {
+ grpc_fd *r = alloc_fd(fd);
+ char *name2;
+ gpr_asprintf(&name2, "%s fd=%d", name, fd);
+ grpc_iomgr_register_object(&r->iomgr_object, name2);
+ gpr_free(name2);
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
+#endif
+ return r;
+}
+
+static bool fd_is_orphaned(grpc_fd *fd) {
+ return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
+}
+
+static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
+ gpr_mu_lock(&watcher->pollset->mu);
+ GPR_ASSERT(watcher->worker);
+ grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ gpr_mu_unlock(&watcher->pollset->mu);
+ return err;
+}
+
+static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
+ if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
+ pollset_kick_locked(fd->inactive_watcher_root.next);
+ } else if (fd->read_watcher) {
+ pollset_kick_locked(fd->read_watcher);
+ } else if (fd->write_watcher) {
+ pollset_kick_locked(fd->write_watcher);
+ }
+}
+
+static void wake_all_watchers_locked(grpc_fd *fd) {
+ grpc_fd_watcher *watcher;
+ for (watcher = fd->inactive_watcher_root.next;
+ watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
+ pollset_kick_locked(watcher);
+ }
+ if (fd->read_watcher) {
+ pollset_kick_locked(fd->read_watcher);
+ }
+ if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
+ pollset_kick_locked(fd->write_watcher);
+ }
+}
+
+static int has_watchers(grpc_fd *fd) {
+ return fd->read_watcher != NULL || fd->write_watcher != NULL ||
+ fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
+}
+
+static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ fd->closed = 1;
+ if (!fd->released) {
+ close(fd->fd);
+ } else {
+ remove_fd_from_all_epoll_sets(fd->fd);
+ }
+ grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
+}
+
+static int fd_wrapped_fd(grpc_fd *fd) {
+ if (fd->released || fd->closed) {
+ return -1;
+ } else {
+ return fd->fd;
+ }
+}
+
+static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *on_done, int *release_fd,
+ const char *reason) {
+ fd->on_done_closure = on_done;
+ fd->released = release_fd != NULL;
+ if (!fd->released) {
+ shutdown(fd->fd, SHUT_RDWR);
+ } else {
+ *release_fd = fd->fd;
+ }
+ gpr_mu_lock(&fd->mu);
+ REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
+ if (!has_watchers(fd)) {
+ close_fd_locked(exec_ctx, fd);
+ } else {
+ wake_all_watchers_locked(fd);
+ }
+ gpr_mu_unlock(&fd->mu);
+ UNREF_BY(fd, 2, reason); /* drop the reference */
+}
+
+/* increment refcount by two to avoid changing the orphan bit */
+#ifdef GRPC_FD_REF_COUNT_DEBUG
+static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
+ int line) {
+ ref_by(fd, 2, reason, file, line);
+}
+
+static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+ int line) {
+ unref_by(fd, 2, reason, file, line);
+}
+#else
+static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
+
+static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
+#endif
+
+static grpc_error *fd_shutdown_error(bool shutdown) {
+ if (!shutdown) {
+ return GRPC_ERROR_NONE;
+ } else {
+ return GRPC_ERROR_CREATE("FD shutdown");
+ }
+}
+
+static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure **st, grpc_closure *closure) {
+ if (fd->shutdown) {
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
+ NULL);
+ } else if (*st == CLOSURE_NOT_READY) {
+ /* not ready ==> switch to a waiting state by setting the closure */
+ *st = closure;
+ } else if (*st == CLOSURE_READY) {
+ /* already ready ==> queue the closure to run immediately */
+ *st = CLOSURE_NOT_READY;
+ grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
+ NULL);
+ maybe_wake_one_watcher_locked(fd);
+ } else {
+ /* upcallptr was set to a different closure. This is an error! */
+ gpr_log(GPR_ERROR,
+ "User called a notify_on function with a previous callback still "
+ "pending");
+ abort();
+ }
+}
+
+/* returns 1 if state becomes not ready */
+static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure **st) {
+ if (*st == CLOSURE_READY) {
+ /* duplicate ready ==> ignore */
+ return 0;
+ } else if (*st == CLOSURE_NOT_READY) {
+ /* not ready, and not waiting ==> flag ready */
+ *st = CLOSURE_READY;
+ return 0;
+ } else {
+ /* waiting ==> queue closure */
+ grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+ *st = CLOSURE_NOT_READY;
+ return 1;
+ }
+}
+
+static void set_read_notifier_pollset_locked(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) {
+ fd->read_notifier_pollset = read_notifier_pollset;
+}
+
+static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ gpr_mu_lock(&fd->mu);
+ /* only shutdown once */
+ if (!fd->shutdown) {
+ fd->shutdown = 1;
+ /* signal read/write closed to OS so that future operations fail */
+ shutdown(fd->fd, SHUT_RDWR);
+ set_ready_locked(exec_ctx, fd, &fd->read_closure);
+ set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ }
+ gpr_mu_unlock(&fd->mu);
+}
+
+static bool fd_is_shutdown(grpc_fd *fd) {
+ gpr_mu_lock(&fd->mu);
+ bool r = fd->shutdown;
+ gpr_mu_unlock(&fd->mu);
+ return r;
+}
+
+static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ gpr_mu_lock(&fd->mu);
+ notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
+ gpr_mu_unlock(&fd->mu);
+}
+
+static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ gpr_mu_lock(&fd->mu);
+ notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
+ gpr_mu_unlock(&fd->mu);
+}
+
+/* Return the read-notifier pollset */
+static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
+ grpc_pollset *notifier = NULL;
+
+ gpr_mu_lock(&fd->mu);
+ notifier = fd->read_notifier_pollset;
+ gpr_mu_unlock(&fd->mu);
+
+ return notifier;
+}
+
+static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
+ grpc_pollset_worker *worker, uint32_t read_mask,
+ uint32_t write_mask, grpc_fd_watcher *watcher) {
+ uint32_t mask = 0;
+ grpc_closure *cur;
+ int requested;
+ /* keep track of pollers that have requested our events, in case they change
+ */
+ GRPC_FD_REF(fd, "poll");
+
+ gpr_mu_lock(&fd->mu);
+
+ /* if we are shutdown, then don't add to the watcher set */
+ if (fd->shutdown) {
+ watcher->fd = NULL;
+ watcher->pollset = NULL;
+ watcher->worker = NULL;
+ gpr_mu_unlock(&fd->mu);
+ GRPC_FD_UNREF(fd, "poll");
+ return 0;
+ }
+
+ /* if there is nobody polling for read, but we need to, then start doing so */
+ cur = fd->read_closure;
+ requested = cur != CLOSURE_READY;
+ if (read_mask && fd->read_watcher == NULL && requested) {
+ fd->read_watcher = watcher;
+ mask |= read_mask;
+ }
+ /* if there is nobody polling for write, but we need to, then start doing so
+ */
+ cur = fd->write_closure;
+ requested = cur != CLOSURE_READY;
+ if (write_mask && fd->write_watcher == NULL && requested) {
+ fd->write_watcher = watcher;
+ mask |= write_mask;
+ }
+ /* if not polling, remember this watcher in case we need someone to later */
+ if (mask == 0 && worker != NULL) {
+ watcher->next = &fd->inactive_watcher_root;
+ watcher->prev = watcher->next->prev;
+ watcher->next->prev = watcher->prev->next = watcher;
+ }
+ watcher->pollset = pollset;
+ watcher->worker = worker;
+ watcher->fd = fd;
+ gpr_mu_unlock(&fd->mu);
+
+ return mask;
+}
+
+static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
+ int got_read, int got_write,
+ grpc_pollset *read_notifier_pollset) {
+ int was_polling = 0;
+ int kick = 0;
+ grpc_fd *fd = watcher->fd;
+
+ if (fd == NULL) {
+ return;
+ }
+
+ gpr_mu_lock(&fd->mu);
+
+ if (watcher == fd->read_watcher) {
+ /* remove read watcher, kick if we still need a read */
+ was_polling = 1;
+ if (!got_read) {
+ kick = 1;
+ }
+ fd->read_watcher = NULL;
+ }
+ if (watcher == fd->write_watcher) {
+ /* remove write watcher, kick if we still need a write */
+ was_polling = 1;
+ if (!got_write) {
+ kick = 1;
+ }
+ fd->write_watcher = NULL;
+ }
+ if (!was_polling && watcher->worker != NULL) {
+ /* remove from inactive list */
+ watcher->next->prev = watcher->prev;
+ watcher->prev->next = watcher->next;
+ }
+ if (got_read) {
+ if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
+ kick = 1;
+ }
+
+ if (read_notifier_pollset != NULL) {
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+ }
+ }
+ if (got_write) {
+ if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
+ kick = 1;
+ }
+ }
+ if (kick) {
+ maybe_wake_one_watcher_locked(fd);
+ }
+ if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
+ close_fd_locked(exec_ctx, fd);
+ }
+ gpr_mu_unlock(&fd->mu);
+
+ GRPC_FD_UNREF(fd, "poll");
+}
+
+/*******************************************************************************
+ * pollset_posix.c
+ */
+
+GPR_TLS_DECL(g_current_thread_poller);
+GPR_TLS_DECL(g_current_thread_worker);
+
+/** The alarm system needs to be able to wakeup 'some poller' sometimes
+ * (specifically when a new alarm needs to be triggered earlier than the next
+ * alarm 'epoch').
+ * This wakeup_fd gives us something to alert on when such a case occurs. */
+grpc_wakeup_fd grpc_global_wakeup_fd;
+
+static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+ worker->prev->next = worker->next;
+ worker->next->prev = worker->prev;
+}
+
+static int pollset_has_workers(grpc_pollset *p) {
+ return p->root_worker.next != &p->root_worker;
+}
+
+static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
+ if (pollset_has_workers(p)) {
+ grpc_pollset_worker *w = p->root_worker.next;
+ remove_worker(p, w);
+ return w;
+ } else {
+ return NULL;
+ }
+}
+
+static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+ worker->next = &p->root_worker;
+ worker->prev = worker->next->prev;
+ worker->prev->next = worker->next->prev = worker;
+}
+
+static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+ worker->prev = &p->root_worker;
+ worker->next = worker->prev->next;
+ worker->prev->next = worker->next->prev = worker;
+}
+
+static void kick_append_error(grpc_error **composite, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (*composite == GRPC_ERROR_NONE) {
+ *composite = GRPC_ERROR_CREATE("Kick Failure");
+ }
+ *composite = grpc_error_add_child(*composite, error);
+}
+
+static grpc_error *pollset_kick_ext(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker,
+ uint32_t flags) {
+ GPR_TIMER_BEGIN("pollset_kick_ext", 0);
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ /* pollset->mu already held */
+ if (specific_worker != NULL) {
+ if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
+ GPR_TIMER_BEGIN("pollset_kick_ext.broadcast", 0);
+ GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
+ for (specific_worker = p->root_worker.next;
+ specific_worker != &p->root_worker;
+ specific_worker = specific_worker->next) {
+ kick_append_error(
+ &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
+ }
+ p->kicked_without_pollers = true;
+ GPR_TIMER_END("pollset_kick_ext.broadcast", 0);
+ } else if (gpr_tls_get(&g_current_thread_worker) !=
+ (intptr_t)specific_worker) {
+ GPR_TIMER_MARK("different_thread_worker", 0);
+ if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
+ specific_worker->reevaluate_polling_on_wakeup = true;
+ }
+ specific_worker->kicked_specifically = true;
+ kick_append_error(&error,
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
+ } else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
+ GPR_TIMER_MARK("kick_yoself", 0);
+ if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
+ specific_worker->reevaluate_polling_on_wakeup = true;
+ }
+ specific_worker->kicked_specifically = true;
+ kick_append_error(&error,
+ grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
+ }
+ } else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
+ GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
+ GPR_TIMER_MARK("kick_anonymous", 0);
+ specific_worker = pop_front_worker(p);
+ if (specific_worker != NULL) {
+ if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
+ GPR_TIMER_MARK("kick_anonymous_not_self", 0);
+ push_back_worker(p, specific_worker);
+ specific_worker = pop_front_worker(p);
+ if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
+ gpr_tls_get(&g_current_thread_worker) ==
+ (intptr_t)specific_worker) {
+ push_back_worker(p, specific_worker);
+ specific_worker = NULL;
+ }
+ }
+ if (specific_worker != NULL) {
+ GPR_TIMER_MARK("finally_kick", 0);
+ push_back_worker(p, specific_worker);
+ kick_append_error(
+ &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
+ }
+ } else {
+ GPR_TIMER_MARK("kicked_no_pollers", 0);
+ p->kicked_without_pollers = true;
+ }
+ }
+
+ GPR_TIMER_END("pollset_kick_ext", 0);
+ return error;
+}
+
+static grpc_error *pollset_kick(grpc_pollset *p,
+ grpc_pollset_worker *specific_worker) {
+ return pollset_kick_ext(p, specific_worker, 0);
+}
+
+/* global state management */
+
+static grpc_error *pollset_global_init(void) {
+ gpr_tls_init(&g_current_thread_poller);
+ gpr_tls_init(&g_current_thread_worker);
+ return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
+}
+
+static void pollset_global_shutdown(void) {
+ grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
+ gpr_tls_destroy(&g_current_thread_poller);
+ gpr_tls_destroy(&g_current_thread_worker);
+}
+
+static grpc_error *kick_poller(void) {
+ return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
+}
+
+/* main interface */
+
+static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
+
+static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+ gpr_mu_init(&pollset->mu);
+ *mu = &pollset->mu;
+ pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
+ pollset->in_flight_cbs = 0;
+ pollset->shutting_down = 0;
+ pollset->called_shutdown = 0;
+ pollset->kicked_without_pollers = 0;
+ pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
+ pollset->local_wakeup_cache = NULL;
+ pollset->kicked_without_pollers = 0;
+ become_basic_pollset(pollset, NULL);
+}
+
+static void pollset_destroy(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->in_flight_cbs == 0);
+ GPR_ASSERT(!pollset_has_workers(pollset));
+ GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
+ pollset->vtable->destroy(pollset);
+ while (pollset->local_wakeup_cache) {
+ grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
+ grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
+ gpr_free(pollset->local_wakeup_cache);
+ pollset->local_wakeup_cache = next;
+ }
+ gpr_mu_destroy(&pollset->mu);
+}
+
+static void pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ GPR_ASSERT(pollset->in_flight_cbs == 0);
+ GPR_ASSERT(!pollset_has_workers(pollset));
+ GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
+ pollset->vtable->destroy(pollset);
+ pollset->shutting_down = 0;
+ pollset->called_shutdown = 0;
+ pollset->kicked_without_pollers = 0;
+ become_basic_pollset(pollset, NULL);
+}
+
+static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
+ gpr_mu_lock(&pollset->mu);
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
+/* the following (enabled only in debug) will reacquire and then release
+ our lock - meaning that if the unlocking flag passed to add_fd above is
+ not respected, the code will deadlock (in a way that we have a chance of
+ debugging) */
+#ifndef NDEBUG
+ gpr_mu_lock(&pollset->mu);
+ gpr_mu_unlock(&pollset->mu);
+#endif
+}
+
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+ GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
+ pollset->vtable->finish_shutdown(pollset);
+ grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+}
+
+static void work_combine_error(grpc_error **composite, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (*composite == GRPC_ERROR_NONE) {
+ *composite = GRPC_ERROR_CREATE("pollset_work");
+ }
+ *composite = grpc_error_add_child(*composite, error);
+}
+
+static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl,
+ gpr_timespec now, gpr_timespec deadline) {
+ grpc_pollset_worker worker;
+ *worker_hdl = &worker;
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ /* pollset->mu already held */
+ int added_worker = 0;
+ int locked = 1;
+ int queued_work = 0;
+ int keep_polling = 0;
+ GPR_TIMER_BEGIN("pollset_work", 0);
+ /* this must happen before we (potentially) drop pollset->mu */
+ worker.next = worker.prev = NULL;
+ worker.reevaluate_polling_on_wakeup = 0;
+ if (pollset->local_wakeup_cache != NULL) {
+ worker.wakeup_fd = pollset->local_wakeup_cache;
+ pollset->local_wakeup_cache = worker.wakeup_fd->next;
+ } else {
+ worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
+ error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
+ if (error != GRPC_ERROR_NONE) {
+ return error;
+ }
+ }
+ worker.kicked_specifically = 0;
+ /* If there's work waiting for the pollset to be idle, and the
+ pollset is idle, then do that work */
+ if (!pollset_has_workers(pollset) &&
+ !grpc_closure_list_empty(pollset->idle_jobs)) {
+ GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ goto done;
+ }
+ /* If we're shutting down then we don't execute any extended work */
+ if (pollset->shutting_down) {
+ GPR_TIMER_MARK("pollset_work.shutting_down", 0);
+ goto done;
+ }
+ /* Give do_promote priority so we don't starve it out */
+ if (pollset->in_flight_cbs) {
+ GPR_TIMER_MARK("pollset_work.in_flight_cbs", 0);
+ gpr_mu_unlock(&pollset->mu);
+ locked = 0;
+ goto done;
+ }
+ /* Start polling, and keep doing so while we're being asked to
+ re-evaluate our pollers (this allows poll() based pollers to
+ ensure they don't miss wakeups) */
+ keep_polling = 1;
+ while (keep_polling) {
+ keep_polling = 0;
+ if (!pollset->kicked_without_pollers) {
+ if (!added_worker) {
+ push_front_worker(pollset, &worker);
+ added_worker = 1;
+ gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
+ }
+ gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
+ GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
+ work_combine_error(&error,
+ pollset->vtable->maybe_work_and_unlock(
+ exec_ctx, pollset, &worker, deadline, now));
+ GPR_TIMER_END("maybe_work_and_unlock", 0);
+ locked = 0;
+ gpr_tls_set(&g_current_thread_poller, 0);
+ } else {
+ GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
+ pollset->kicked_without_pollers = 0;
+ }
+ /* Finished execution - start cleaning up.
+ Note that we may arrive here from outside the enclosing while() loop.
+ In that case we won't loop though as we haven't added worker to the
+ worker list, which means nobody could ask us to re-evaluate polling). */
+ done:
+ if (!locked) {
+ queued_work |= grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
+ locked = 1;
+ }
+ /* If we're forced to re-evaluate polling (via pollset_kick with
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
+ a loop */
+ if (worker.reevaluate_polling_on_wakeup) {
+ worker.reevaluate_polling_on_wakeup = 0;
+ pollset->kicked_without_pollers = 0;
+ if (queued_work || worker.kicked_specifically) {
+ /* If there's queued work on the list, then set the deadline to be
+ immediate so we get back out of the polling loop quickly */
+ deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ }
+ keep_polling = 1;
+ }
+ }
+ if (added_worker) {
+ remove_worker(pollset, &worker);
+ gpr_tls_set(&g_current_thread_worker, 0);
+ }
+ /* release wakeup fd to the local pool */
+ worker.wakeup_fd->next = pollset->local_wakeup_cache;
+ pollset->local_wakeup_cache = worker.wakeup_fd;
+ /* check shutdown conditions */
+ if (pollset->shutting_down) {
+ if (pollset_has_workers(pollset)) {
+ pollset_kick(pollset, NULL);
+ } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
+ pollset->called_shutdown = 1;
+ gpr_mu_unlock(&pollset->mu);
+ finish_shutdown(exec_ctx, pollset);
+ grpc_exec_ctx_flush(exec_ctx);
+ /* Continuing to access pollset here is safe -- it is the caller's
+ * responsibility to not destroy when it has outstanding calls to
+ * pollset_work.
+ * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
+ gpr_mu_lock(&pollset->mu);
+ } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ gpr_mu_unlock(&pollset->mu);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_mu_lock(&pollset->mu);
+ }
+ }
+ *worker_hdl = NULL;
+ GPR_TIMER_END("pollset_work", 0);
+ return error;
+}
+
+static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
+ GPR_ASSERT(!pollset->shutting_down);
+ pollset->shutting_down = 1;
+ pollset->shutdown_done = closure;
+ pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ if (!pollset_has_workers(pollset)) {
+ grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+ }
+ if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
+ !pollset_has_workers(pollset)) {
+ pollset->called_shutdown = 1;
+ finish_shutdown(exec_ctx, pollset);
+ }
+}
+
+static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
+ gpr_timespec now) {
+ gpr_timespec timeout;
+ static const int64_t max_spin_polling_us = 10;
+ if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
+ return -1;
+ }
+ if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
+ max_spin_polling_us,
+ GPR_TIMESPAN))) <= 0) {
+ return 0;
+ }
+ timeout = gpr_time_sub(deadline, now);
+ return gpr_time_to_millis(gpr_time_add(
+ timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+}
+
+/*
+ * basic_pollset - a vtable that provides polling for zero or one file
+ * descriptor via poll()
+ */
+
+typedef struct grpc_unary_promote_args {
+ const grpc_pollset_vtable *original_vtable;
+ grpc_pollset *pollset;
+ grpc_fd *fd;
+ grpc_closure promotion_closure;
+} grpc_unary_promote_args;
+
+static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
+ grpc_error *error) {
+ grpc_unary_promote_args *up_args = args;
+ const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
+ grpc_pollset *pollset = up_args->pollset;
+ grpc_fd *fd = up_args->fd;
+
+ /*
+ * This is quite tricky. There are a number of cases to keep in mind here:
+ * 1. fd may have been orphaned
+ * 2. The pollset may no longer be a unary poller (and we can't let case #1
+ * leak to other pollset types!)
+ * 3. pollset's fd (which may have changed) may have been orphaned
+ * 4. The pollset may be shutting down.
+ */
+
+ gpr_mu_lock(&pollset->mu);
+ /* First we need to ensure that nobody is polling concurrently */
+ GPR_ASSERT(!pollset_has_workers(pollset));
+
+ gpr_free(up_args);
+ /* At this point the pollset may no longer be a unary poller. In that case
+ * we should just call the right add function and be done. */
+ /* TODO(klempner): If we're not careful this could cause infinite recursion.
+ * That's not a problem for now because empty_pollset has a trivial poller
+ * and we don't have any mechanism to unbecome multipoller. */
+ pollset->in_flight_cbs--;
+ if (pollset->shutting_down) {
+ /* We don't care about this pollset anymore. */
+ if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
+ pollset->called_shutdown = 1;
+ finish_shutdown(exec_ctx, pollset);
+ }
+ } else if (fd_is_orphaned(fd)) {
+ /* Don't try to add it to anything, we'll drop our ref on it below */
+ } else if (pollset->vtable != original_vtable) {
+ pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
+ } else if (fd != pollset->data.ptr) {
+ grpc_fd *fds[2];
+ fds[0] = pollset->data.ptr;
+ fds[1] = fd;
+
+ if (fds[0] && !fd_is_orphaned(fds[0])) {
+ platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds));
+ GRPC_FD_UNREF(fds[0], "basicpoll");
+ } else {
+ /* old fd is orphaned and we haven't cleaned it up until now, so remain a
+ * unary poller */
+ /* Note that it is possible that fds[1] is also orphaned at this point.
+ * That's okay, we'll correct it at the next add or poll. */
+ if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll");
+ pollset->data.ptr = fd;
+ GRPC_FD_REF(fd, "basicpoll");
+ }
+ }
+
+ gpr_mu_unlock(&pollset->mu);
+
+ /* Matching ref in basic_pollset_add_fd */
+ GRPC_FD_UNREF(fd, "basicpoll_add");
+}
+
+static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd, int and_unlock_pollset) {
+ grpc_unary_promote_args *up_args;
+ GPR_ASSERT(fd);
+ if (fd == pollset->data.ptr) goto exit;
+
+ if (!pollset_has_workers(pollset)) {
+ /* Fast path -- no in flight cbs */
+ /* TODO(klempner): Comment this out and fix any test failures or establish
+ * they are due to timing issues */
+ grpc_fd *fds[2];
+ fds[0] = pollset->data.ptr;
+ fds[1] = fd;
+
+ if (fds[0] == NULL) {
+ pollset->data.ptr = fd;
+ GRPC_FD_REF(fd, "basicpoll");
+ } else if (!fd_is_orphaned(fds[0])) {
+ platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds));
+ GRPC_FD_UNREF(fds[0], "basicpoll");
+ } else {
+ /* old fd is orphaned and we haven't cleaned it up until now, so remain a
+ * unary poller */
+ GRPC_FD_UNREF(fds[0], "basicpoll");
+ pollset->data.ptr = fd;
+ GRPC_FD_REF(fd, "basicpoll");
+ }
+ goto exit;
+ }
+
+ /* Now we need to promote. This needs to happen when we're not polling. Since
+ * this may be called from poll, the wait needs to happen asynchronously. */
+ GRPC_FD_REF(fd, "basicpoll_add");
+ pollset->in_flight_cbs++;
+ up_args = gpr_malloc(sizeof(*up_args));
+ up_args->fd = fd;
+ up_args->original_vtable = pollset->vtable;
+ up_args->pollset = pollset;
+ up_args->promotion_closure.cb = basic_do_promote;
+ up_args->promotion_closure.cb_arg = up_args;
+
+ grpc_closure_list_append(&pollset->idle_jobs, &up_args->promotion_closure,
+ GRPC_ERROR_NONE);
+ pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+
+exit:
+ if (and_unlock_pollset) {
+ gpr_mu_unlock(&pollset->mu);
+ }
+}
+
+static grpc_error *basic_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
+#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
+#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
+
+ struct pollfd pfd[3];
+ grpc_fd *fd;
+ grpc_fd_watcher fd_watcher;
+ int timeout;
+ int r;
+ nfds_t nfds;
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ fd = pollset->data.ptr;
+ if (fd && fd_is_orphaned(fd)) {
+ GRPC_FD_UNREF(fd, "basicpoll");
+ fd = pollset->data.ptr = NULL;
+ }
+ timeout = poll_deadline_to_millis_timeout(deadline, now);
+ pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
+ pfd[0].events = POLLIN;
+ pfd[0].revents = 0;
+ pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
+ pfd[1].events = POLLIN;
+ pfd[1].revents = 0;
+ nfds = 2;
+ if (fd) {
+ pfd[2].fd = fd->fd;
+ pfd[2].revents = 0;
+ GRPC_FD_REF(fd, "basicpoll_begin");
+ gpr_mu_unlock(&pollset->mu);
+ pfd[2].events =
+ (short)fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT, &fd_watcher);
+ if (pfd[2].events != 0) {
+ nfds++;
+ }
+ } else {
+ gpr_mu_unlock(&pollset->mu);
+ }
+
+ /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+ even going into the blocking annotation if possible */
+ /* poll fd count (argument 2) is shortened by one if we have no events
+ to poll on - such that it only includes the kicker */
+ GPR_TIMER_BEGIN("poll", 0);
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ r = grpc_poll_function(pfd, nfds, timeout);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GPR_TIMER_END("poll", 0);
+
+ if (r < 0) {
+ if (errno != EINTR) {
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
+ }
+ if (fd) {
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
+ }
+ } else if (r == 0) {
+ if (fd) {
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
+ }
+ } else {
+ if (pfd[0].revents & POLLIN_CHECK) {
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
+ }
+ if (pfd[1].revents & POLLIN_CHECK) {
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
+ }
+ if (nfds > 2) {
+ fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
+ pfd[2].revents & POLLOUT_CHECK, pollset);
+ } else if (fd) {
+ fd_end_poll(exec_ctx, &fd_watcher, 0, 0, NULL);
+ }
+ }
+
+ if (fd) {
+ GRPC_FD_UNREF(fd, "basicpoll_begin");
+ }
+
+ return error;
+}
+
+static void basic_pollset_destroy(grpc_pollset *pollset) {
+ if (pollset->data.ptr != NULL) {
+ GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
+ pollset->data.ptr = NULL;
+ }
+}
+
+static const grpc_pollset_vtable basic_pollset = {
+ basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock,
+ basic_pollset_destroy, basic_pollset_destroy};
+
+static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
+ pollset->vtable = &basic_pollset;
+ pollset->data.ptr = fd_or_null;
+ if (fd_or_null != NULL) {
+ GRPC_FD_REF(fd_or_null, "basicpoll");
+ }
+}
+
+/*******************************************************************************
+ * pollset_multipoller_with_poll_posix.c
+ */
+
+#ifndef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+
+typedef struct {
+ /* all polled fds */
+ size_t fd_count;
+ size_t fd_capacity;
+ grpc_fd **fds;
+ /* fds that have been removed from the pollset explicitly */
+ size_t del_count;
+ size_t del_capacity;
+ grpc_fd **dels;
+} poll_hdr;
+
+static void multipoll_with_poll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_fd *fd,
+ int and_unlock_pollset) {
+ size_t i;
+ poll_hdr *h = pollset->data.ptr;
+ /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
+ for (i = 0; i < h->fd_count; i++) {
+ if (h->fds[i] == fd) goto exit;
+ }
+ if (h->fd_count == h->fd_capacity) {
+ h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2);
+ h->fds = gpr_realloc(h->fds, sizeof(grpc_fd *) * h->fd_capacity);
+ }
+ h->fds[h->fd_count++] = fd;
+ GRPC_FD_REF(fd, "multipoller");
+exit:
+ if (and_unlock_pollset) {
+ gpr_mu_unlock(&pollset->mu);
+ }
+}
+
+static grpc_error *multipoll_with_poll_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
+#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
+#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
+
+ int timeout;
+ int r;
+ size_t i, j, fd_count;
+ nfds_t pfd_count;
+ poll_hdr *h;
+ /* TODO(ctiller): inline some elements to avoid an allocation */
+ grpc_fd_watcher *watchers;
+ struct pollfd *pfds;
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ h = pollset->data.ptr;
+ timeout = poll_deadline_to_millis_timeout(deadline, now);
+ /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
+ pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2));
+ watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2));
+ fd_count = 0;
+ pfd_count = 2;
+ pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
+ pfds[0].events = POLLIN;
+ pfds[0].revents = 0;
+ pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
+ pfds[1].events = POLLIN;
+ pfds[1].revents = 0;
+ for (i = 0; i < h->fd_count; i++) {
+ int remove = fd_is_orphaned(h->fds[i]);
+ for (j = 0; !remove && j < h->del_count; j++) {
+ if (h->fds[i] == h->dels[j]) remove = 1;
+ }
+ if (remove) {
+ GRPC_FD_UNREF(h->fds[i], "multipoller");
+ } else {
+ h->fds[fd_count++] = h->fds[i];
+ watchers[pfd_count].fd = h->fds[i];
+ GRPC_FD_REF(watchers[pfd_count].fd, "multipoller_start");
+ pfds[pfd_count].fd = h->fds[i]->fd;
+ pfds[pfd_count].revents = 0;
+ pfd_count++;
+ }
+ }
+ for (j = 0; j < h->del_count; j++) {
+ GRPC_FD_UNREF(h->dels[j], "multipoller_del");
+ }
+ h->del_count = 0;
+ h->fd_count = fd_count;
+ gpr_mu_unlock(&pollset->mu);
+
+ for (i = 2; i < pfd_count; i++) {
+ grpc_fd *fd = watchers[i].fd;
+ pfds[i].events = (short)fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT,
+ &watchers[i]);
+ GRPC_FD_UNREF(fd, "multipoller_start");
+ }
+
+ /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+ even going into the blocking annotation if possible */
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ r = grpc_poll_function(pfds, pfd_count, timeout);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+
+ if (r < 0) {
+ if (errno != EINTR) {
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
+ }
+ for (i = 2; i < pfd_count; i++) {
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ }
+ } else if (r == 0) {
+ for (i = 2; i < pfd_count; i++) {
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ }
+ } else {
+ if (pfds[0].revents & POLLIN_CHECK) {
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd));
+ }
+ if (pfds[1].revents & POLLIN_CHECK) {
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
+ }
+ for (i = 2; i < pfd_count; i++) {
+ if (watchers[i].fd == NULL) {
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ continue;
+ }
+ fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
+ pfds[i].revents & POLLOUT_CHECK, pollset);
+ }
+ }
+
+ gpr_free(pfds);
+ gpr_free(watchers);
+
+ return error;
+}
+
+static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
+ size_t i;
+ poll_hdr *h = pollset->data.ptr;
+ for (i = 0; i < h->fd_count; i++) {
+ GRPC_FD_UNREF(h->fds[i], "multipoller");
+ }
+ for (i = 0; i < h->del_count; i++) {
+ GRPC_FD_UNREF(h->dels[i], "multipoller_del");
+ }
+ h->fd_count = 0;
+ h->del_count = 0;
+}
+
+static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
+ poll_hdr *h = pollset->data.ptr;
+ multipoll_with_poll_pollset_finish_shutdown(pollset);
+ gpr_free(h->fds);
+ gpr_free(h->dels);
+ gpr_free(h);
+}
+
+static const grpc_pollset_vtable multipoll_with_poll_pollset = {
+ multipoll_with_poll_pollset_add_fd,
+ multipoll_with_poll_pollset_maybe_work_and_unlock,
+ multipoll_with_poll_pollset_finish_shutdown,
+ multipoll_with_poll_pollset_destroy};
+
+static void poll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd **fds,
+ size_t nfds) {
+ size_t i;
+ poll_hdr *h = gpr_malloc(sizeof(poll_hdr));
+ pollset->vtable = &multipoll_with_poll_pollset;
+ pollset->data.ptr = h;
+ h->fd_count = nfds;
+ h->fd_capacity = nfds;
+ h->fds = gpr_malloc(nfds * sizeof(grpc_fd *));
+ h->del_count = 0;
+ h->del_capacity = 0;
+ h->dels = NULL;
+ for (i = 0; i < nfds; i++) {
+ h->fds[i] = fds[i];
+ GRPC_FD_REF(fds[i], "multipoller");
+ }
+}
+
+#endif /* !GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+
+/*******************************************************************************
+ * pollset_multipoller_with_epoll_posix.c
+ */
+
+#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+
+#include <errno.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/block_annotate.h"
+
+static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st,
+ grpc_pollset *read_notifier_pollset) {
+ /* only one set_ready can be active at once (but there may be a racing
+ notify_on) */
+ gpr_mu_lock(&fd->mu);
+ set_ready_locked(exec_ctx, fd, st);
+
+ /* A non-NULL read_notifier_pollset means that the fd is readable. */
+ if (read_notifier_pollset != NULL) {
+ /* Note: Since the fd might be a part of multiple pollsets, this might be
+ * called multiple times (for each time the fd becomes readable) and it is
+ * okay to set the fd's read-notifier pollset to anyone of these pollsets */
+ set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+ }
+
+ gpr_mu_unlock(&fd->mu);
+}
+
+static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_pollset *notifier_pollset) {
+ set_ready(exec_ctx, fd, &fd->read_closure, notifier_pollset);
+}
+
+static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ set_ready(exec_ctx, fd, &fd->write_closure, NULL);
+}
+
+struct epoll_fd_list {
+ int *epoll_fds;
+ size_t count;
+ size_t capacity;
+};
+
+static struct epoll_fd_list epoll_fd_global_list;
+static gpr_once init_epoll_fd_list_mu = GPR_ONCE_INIT;
+static gpr_mu epoll_fd_list_mu;
+
+static void init_mu(void) { gpr_mu_init(&epoll_fd_list_mu); }
+
+static void add_epoll_fd_to_global_list(int epoll_fd) {
+ gpr_once_init(&init_epoll_fd_list_mu, init_mu);
+
+ gpr_mu_lock(&epoll_fd_list_mu);
+ if (epoll_fd_global_list.count == epoll_fd_global_list.capacity) {
+ epoll_fd_global_list.capacity =
+ GPR_MAX((size_t)8, epoll_fd_global_list.capacity * 2);
+ epoll_fd_global_list.epoll_fds =
+ gpr_realloc(epoll_fd_global_list.epoll_fds,
+ epoll_fd_global_list.capacity * sizeof(int));
+ }
+ epoll_fd_global_list.epoll_fds[epoll_fd_global_list.count++] = epoll_fd;
+ gpr_mu_unlock(&epoll_fd_list_mu);
+}
+
+static void remove_epoll_fd_from_global_list(int epoll_fd) {
+ gpr_mu_lock(&epoll_fd_list_mu);
+ GPR_ASSERT(epoll_fd_global_list.count > 0);
+ for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
+ if (epoll_fd == epoll_fd_global_list.epoll_fds[i]) {
+ epoll_fd_global_list.epoll_fds[i] =
+ epoll_fd_global_list.epoll_fds[--(epoll_fd_global_list.count)];
+ break;
+ }
+ }
+ gpr_mu_unlock(&epoll_fd_list_mu);
+}
+
+static void remove_fd_from_all_epoll_sets(int fd) {
+ int err;
+ gpr_once_init(&init_epoll_fd_list_mu, init_mu);
+ gpr_mu_lock(&epoll_fd_list_mu);
+ if (epoll_fd_global_list.count == 0) {
+ gpr_mu_unlock(&epoll_fd_list_mu);
+ return;
+ }
+ for (size_t i = 0; i < epoll_fd_global_list.count; i++) {
+ err = epoll_ctl(epoll_fd_global_list.epoll_fds[i], EPOLL_CTL_DEL, fd, NULL);
+ if (err < 0 && errno != ENOENT) {
+ gpr_log(GPR_ERROR, "epoll_ctl del for %d failed: %s", fd,
+ strerror(errno));
+ }
+ }
+ gpr_mu_unlock(&epoll_fd_list_mu);
+}
+
+typedef struct {
+ grpc_pollset *pollset;
+ grpc_fd *fd;
+ grpc_closure closure;
+} delayed_add;
+
+typedef struct { int epoll_fd; } epoll_hdr;
+
+static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
+ epoll_hdr *h = pollset->data.ptr;
+ struct epoll_event ev;
+ int err;
+ grpc_fd_watcher watcher;
+
+ /* We pretend to be polling whilst adding an fd to keep the fd from being
+ closed during the add. This may result in a spurious wakeup being assigned
+ to this pollset whilst adding, but that should be benign. */
+ GPR_ASSERT(fd_begin_poll(fd, pollset, NULL, 0, 0, &watcher) == 0);
+ if (watcher.fd != NULL) {
+ ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+ ev.data.ptr = fd;
+ err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
+ if (err < 0) {
+ /* FDs may be added to a pollset multiple times, so EEXIST is normal. */
+ if (errno != EEXIST) {
+ gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s", fd->fd,
+ strerror(errno));
+ }
+ }
+ }
+ fd_end_poll(exec_ctx, &watcher, 0, 0, NULL);
+}
+
+static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ delayed_add *da = arg;
+
+ if (!fd_is_orphaned(da->fd)) {
+ finally_add_fd(exec_ctx, da->pollset, da->fd);
+ }
+
+ gpr_mu_lock(&da->pollset->mu);
+ da->pollset->in_flight_cbs--;
+ if (da->pollset->shutting_down) {
+ /* We don't care about this pollset anymore. */
+ if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
+ da->pollset->called_shutdown = 1;
+ grpc_exec_ctx_sched(exec_ctx, da->pollset->shutdown_done, GRPC_ERROR_NONE,
+ NULL);
+ }
+ }
+ gpr_mu_unlock(&da->pollset->mu);
+
+ GRPC_FD_UNREF(da->fd, "delayed_add");
+
+ gpr_free(da);
+}
+
+static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ grpc_fd *fd,
+ int and_unlock_pollset) {
+ if (and_unlock_pollset) {
+ gpr_mu_unlock(&pollset->mu);
+ finally_add_fd(exec_ctx, pollset, fd);
+ } else {
+ delayed_add *da = gpr_malloc(sizeof(*da));
+ da->pollset = pollset;
+ da->fd = fd;
+ GRPC_FD_REF(fd, "delayed_add");
+ grpc_closure_init(&da->closure, perform_delayed_add, da);
+ pollset->in_flight_cbs++;
+ grpc_exec_ctx_sched(exec_ctx, &da->closure, GRPC_ERROR_NONE, NULL);
+ }
+}
+
+/* TODO(klempner): We probably want to turn this down a bit */
+#define GRPC_EPOLL_MAX_EVENTS 1000
+
+static grpc_error *multipoll_with_epoll_pollset_maybe_work_and_unlock(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
+ gpr_timespec deadline, gpr_timespec now) {
+ struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
+ int ep_rv;
+ int poll_rv;
+ epoll_hdr *h = pollset->data.ptr;
+ int timeout_ms;
+ struct pollfd pfds[2];
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ /* If you want to ignore epoll's ability to sanely handle parallel pollers,
+ * for a more apples-to-apples performance comparison with poll, add a
+ * if (pollset->counter != 0) { return 0; }
+ * here.
+ */
+
+ gpr_mu_unlock(&pollset->mu);
+
+ timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
+
+ pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
+ pfds[0].events = POLLIN;
+ pfds[0].revents = 0;
+ pfds[1].fd = h->epoll_fd;
+ pfds[1].events = POLLIN;
+ pfds[1].revents = 0;
+
+ /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
+ even going into the blocking annotation if possible */
+ GPR_TIMER_BEGIN("poll", 0);
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
+ GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GPR_TIMER_END("poll", 0);
+
+ if (poll_rv < 0) {
+ if (errno != EINTR) {
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
+ }
+ } else if (poll_rv == 0) {
+ /* do nothing */
+ } else {
+ if (pfds[0].revents) {
+ work_combine_error(&error,
+ grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd));
+ }
+ if (pfds[1].revents) {
+ do {
+ /* The following epoll_wait never blocks; it has a timeout of 0 */
+ ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
+ if (ep_rv < 0) {
+ if (errno != EINTR) {
+ work_combine_error(&error, GRPC_OS_ERROR(errno, "epoll_wait"));
+ }
+ } else {
+ int i;
+ for (i = 0; i < ep_rv; ++i) {
+ grpc_fd *fd = ep_ev[i].data.ptr;
+ /* TODO(klempner): We might want to consider making err and pri
+ * separate events */
+ int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
+ int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
+ int write_ev = ep_ev[i].events & EPOLLOUT;
+ if (fd == NULL) {
+ work_combine_error(&error, grpc_wakeup_fd_consume_wakeup(
+ &grpc_global_wakeup_fd));
+ } else {
+ if (read_ev || cancel) {
+ fd_become_readable(exec_ctx, fd, pollset);
+ }
+ if (write_ev || cancel) {
+ fd_become_writable(exec_ctx, fd);
+ }
+ }
+ }
+ }
+ } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
+ }
+ }
+ return error;
+}
+
+static void multipoll_with_epoll_pollset_finish_shutdown(
+ grpc_pollset *pollset) {}
+
+static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
+ epoll_hdr *h = pollset->data.ptr;
+ close(h->epoll_fd);
+ remove_epoll_fd_from_global_list(h->epoll_fd);
+ gpr_free(h);
+}
+
+static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
+ multipoll_with_epoll_pollset_add_fd,
+ multipoll_with_epoll_pollset_maybe_work_and_unlock,
+ multipoll_with_epoll_pollset_finish_shutdown,
+ multipoll_with_epoll_pollset_destroy};
+
+static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd **fds,
+ size_t nfds) {
+ size_t i;
+ epoll_hdr *h = gpr_malloc(sizeof(epoll_hdr));
+ struct epoll_event ev;
+ int err;
+
+ pollset->vtable = &multipoll_with_epoll_pollset;
+ pollset->data.ptr = h;
+ h->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+ if (h->epoll_fd < 0) {
+ /* TODO(klempner): Fall back to poll here, especially on ENOSYS */
+ gpr_log(GPR_ERROR, "epoll_create1 failed: %s", strerror(errno));
+ abort();
+ }
+ add_epoll_fd_to_global_list(h->epoll_fd);
+
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = NULL;
+ err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD,
+ GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), &ev);
+ if (err < 0) {
+ gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s",
+ GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd),
+ strerror(errno));
+ }
+
+ for (i = 0; i < nfds; i++) {
+ multipoll_with_epoll_pollset_add_fd(exec_ctx, pollset, fds[i], 0);
+ }
+}
+
+#else /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+
+static void remove_fd_from_all_epoll_sets(int fd) {}
+
+#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+
+/*******************************************************************************
+ * pollset_set_posix.c
+ */
+
+static grpc_pollset_set *pollset_set_create(void) {
+ grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
+ memset(pollset_set, 0, sizeof(*pollset_set));
+ gpr_mu_init(&pollset_set->mu);
+ return pollset_set;
+}
+
+static void pollset_set_destroy(grpc_pollset_set *pollset_set) {
+ size_t i;
+ gpr_mu_destroy(&pollset_set->mu);
+ for (i = 0; i < pollset_set->fd_count; i++) {
+ GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
+ }
+ gpr_free(pollset_set->pollsets);
+ gpr_free(pollset_set->pollset_sets);
+ gpr_free(pollset_set->fds);
+ gpr_free(pollset_set);
+}
+
+static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
+ grpc_pollset *pollset) {
+ size_t i, j;
+ gpr_mu_lock(&pollset_set->mu);
+ if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
+ pollset_set->pollset_capacity =
+ GPR_MAX(8, 2 * pollset_set->pollset_capacity);
+ pollset_set->pollsets =
+ gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
+ sizeof(*pollset_set->pollsets));
+ }
+ pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
+ for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
+ if (fd_is_orphaned(pollset_set->fds[i])) {
+ GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
+ } else {
+ pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
+ pollset_set->fds[j++] = pollset_set->fds[i];
+ }
+ }
+ pollset_set->fd_count = j;
+ gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set,
+ grpc_pollset *pollset) {
+ size_t i;
+ gpr_mu_lock(&pollset_set->mu);
+ for (i = 0; i < pollset_set->pollset_count; i++) {
+ if (pollset_set->pollsets[i] == pollset) {
+ pollset_set->pollset_count--;
+ GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
+ pollset_set->pollsets[pollset_set->pollset_count]);
+ break;
+ }
+ }
+ gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {
+ size_t i, j;
+ gpr_mu_lock(&bag->mu);
+ if (bag->pollset_set_count == bag->pollset_set_capacity) {
+ bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
+ bag->pollset_sets =
+ gpr_realloc(bag->pollset_sets,
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ }
+ bag->pollset_sets[bag->pollset_set_count++] = item;
+ for (i = 0, j = 0; i < bag->fd_count; i++) {
+ if (fd_is_orphaned(bag->fds[i])) {
+ GRPC_FD_UNREF(bag->fds[i], "pollset_set");
+ } else {
+ pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
+ bag->fds[j++] = bag->fds[i];
+ }
+ }
+ bag->fd_count = j;
+ gpr_mu_unlock(&bag->mu);
+}
+
+static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {
+ size_t i;
+ gpr_mu_lock(&bag->mu);
+ for (i = 0; i < bag->pollset_set_count; i++) {
+ if (bag->pollset_sets[i] == item) {
+ bag->pollset_set_count--;
+ GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
+ bag->pollset_sets[bag->pollset_set_count]);
+ break;
+ }
+ }
+ gpr_mu_unlock(&bag->mu);
+}
+
+static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd) {
+ size_t i;
+ gpr_mu_lock(&pollset_set->mu);
+ if (pollset_set->fd_count == pollset_set->fd_capacity) {
+ pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
+ pollset_set->fds = gpr_realloc(
+ pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
+ }
+ GRPC_FD_REF(fd, "pollset_set");
+ pollset_set->fds[pollset_set->fd_count++] = fd;
+ for (i = 0; i < pollset_set->pollset_count; i++) {
+ pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
+ }
+ for (i = 0; i < pollset_set->pollset_set_count; i++) {
+ pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+ }
+ gpr_mu_unlock(&pollset_set->mu);
+}
+
+static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pollset_set, grpc_fd *fd) {
+ size_t i;
+ gpr_mu_lock(&pollset_set->mu);
+ for (i = 0; i < pollset_set->fd_count; i++) {
+ if (pollset_set->fds[i] == fd) {
+ pollset_set->fd_count--;
+ GPR_SWAP(grpc_fd *, pollset_set->fds[i],
+ pollset_set->fds[pollset_set->fd_count]);
+ GRPC_FD_UNREF(fd, "pollset_set");
+ break;
+ }
+ }
+ for (i = 0; i < pollset_set->pollset_set_count; i++) {
+ pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+ }
+ gpr_mu_unlock(&pollset_set->mu);
+}
+
+/*******************************************************************************
+ * event engine binding
+ */
+
+static void shutdown_engine(void) {
+ fd_global_shutdown();
+ pollset_global_shutdown();
+}
+
+static const grpc_event_engine_vtable vtable = {
+ .pollset_size = sizeof(grpc_pollset),
+
+ .fd_create = fd_create,
+ .fd_wrapped_fd = fd_wrapped_fd,
+ .fd_orphan = fd_orphan,
+ .fd_shutdown = fd_shutdown,
+ .fd_is_shutdown = fd_is_shutdown,
+ .fd_notify_on_read = fd_notify_on_read,
+ .fd_notify_on_write = fd_notify_on_write,
+ .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
+
+ .pollset_init = pollset_init,
+ .pollset_shutdown = pollset_shutdown,
+ .pollset_reset = pollset_reset,
+ .pollset_destroy = pollset_destroy,
+ .pollset_work = pollset_work,
+ .pollset_kick = pollset_kick,
+ .pollset_add_fd = pollset_add_fd,
+
+ .pollset_set_create = pollset_set_create,
+ .pollset_set_destroy = pollset_set_destroy,
+ .pollset_set_add_pollset = pollset_set_add_pollset,
+ .pollset_set_del_pollset = pollset_set_del_pollset,
+ .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
+ .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
+ .pollset_set_add_fd = pollset_set_add_fd,
+ .pollset_set_del_fd = pollset_set_del_fd,
+
+ .kick_poller = kick_poller,
+
+ .shutdown_engine = shutdown_engine,
+};
+
+const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void) {
+#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+ platform_become_multipoller = epoll_become_multipoller;
+#else
+ platform_become_multipoller = poll_become_multipoller;
+#endif
+ fd_global_init();
+ pollset_global_init();
+ return &vtable;
+}
+
+#endif
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.h b/src/core/lib/iomgr/ev_poll_and_epoll_posix.h
new file mode 100644
index 0000000000..06d6dbf29d
--- /dev/null
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.h
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H
+#define GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H
+
+#include "src/core/lib/iomgr/ev_posix.h"
+
+const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void);
+
+#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_AND_EPOLL_POSIX_H */
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 52efb0d6b2..45c0a5e954 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -59,8 +59,6 @@
* FD declarations
*/
-grpc_wakeup_fd grpc_global_wakeup_fd;
-
typedef struct grpc_fd_watcher {
struct grpc_fd_watcher *next;
struct grpc_fd_watcher *prev;
@@ -373,7 +371,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
if (!fd->released) {
close(fd->fd);
}
- grpc_exec_ctx_push(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
}
static int fd_wrapped_fd(grpc_fd *fd) {
@@ -432,14 +430,17 @@ static grpc_error *fd_shutdown_error(bool shutdown) {
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
- if (*st == CLOSURE_NOT_READY) {
+ if (fd->shutdown) {
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
+ NULL);
+ } else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
- grpc_exec_ctx_push(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
+ NULL);
maybe_wake_one_watcher_locked(fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
@@ -462,7 +463,7 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
return 0;
} else {
/* waiting ==> queue closure */
- grpc_exec_ctx_push(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+ grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
*st = CLOSURE_NOT_READY;
return 1;
}
@@ -475,11 +476,22 @@ static void set_read_notifier_pollset_locked(
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
- GPR_ASSERT(!fd->shutdown);
- fd->shutdown = 1;
- set_ready_locked(exec_ctx, fd, &fd->read_closure);
- set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ /* only shutdown once */
+ if (!fd->shutdown) {
+ fd->shutdown = 1;
+ /* signal read/write closed to OS so that future operations fail */
+ shutdown(fd->fd, SHUT_RDWR);
+ set_ready_locked(exec_ctx, fd, &fd->read_closure);
+ set_ready_locked(exec_ctx, fd, &fd->write_closure);
+ }
+ gpr_mu_unlock(&fd->mu);
+}
+
+static bool fd_is_shutdown(grpc_fd *fd) {
+ gpr_mu_lock(&fd->mu);
+ bool r = fd->shutdown;
gpr_mu_unlock(&fd->mu);
+ return r;
}
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
@@ -717,6 +729,7 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
}
GPR_TIMER_END("pollset_kick_ext", 0);
+ GRPC_LOG_IF_ERROR("pollset_kick_ext", GRPC_ERROR_REF(error));
return error;
}
@@ -811,7 +824,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_FD_UNREF(pollset->fds[i], "multipoller");
}
pollset->fd_count = 0;
- grpc_exec_ctx_push(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
}
static void work_combine_error(grpc_error **composite, grpc_error *error) {
@@ -845,6 +858,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
+ GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
return error;
}
}
@@ -1024,6 +1038,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
*worker_hdl = NULL;
GPR_TIMER_END("pollset_work", 0);
+ GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
return error;
}
@@ -1215,6 +1230,7 @@ static const grpc_event_engine_vtable vtable = {
.fd_wrapped_fd = fd_wrapped_fd,
.fd_orphan = fd_orphan,
.fd_shutdown = fd_shutdown,
+ .fd_is_shutdown = fd_is_shutdown,
.fd_notify_on_read = fd_notify_on_read,
.fd_notify_on_write = fd_notify_on_write,
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
@@ -1242,7 +1258,9 @@ static const grpc_event_engine_vtable vtable = {
};
const grpc_event_engine_vtable *grpc_init_poll_posix(void) {
- pollset_global_init();
+ if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+ return NULL;
+ }
return &vtable;
}
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index ad8eaab454..4c360f13a3 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -44,6 +44,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/support/env.h"
@@ -61,7 +62,7 @@ typedef struct {
} event_engine_factory;
static const event_engine_factory g_factories[] = {
- {"poll", grpc_init_poll_posix},
+ {"poll", grpc_init_poll_posix}, {"legacy", grpc_init_poll_and_epoll_posix},
};
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
@@ -152,6 +153,10 @@ void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
g_event_engine->fd_shutdown(exec_ctx, fd);
}
+bool grpc_fd_is_shutdown(grpc_fd *fd) {
+ return g_event_engine->fd_is_shutdown(fd);
+}
+
void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
g_event_engine->fd_notify_on_read(exec_ctx, fd, closure);
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 3058f22c91..87e9bc4d46 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -55,6 +55,7 @@ typedef struct grpc_event_engine_vtable {
grpc_closure *closure);
void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure);
+ bool (*fd_is_shutdown)(grpc_fd *fd);
grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
grpc_fd *fd);
@@ -116,7 +117,10 @@ int grpc_fd_wrapped_fd(grpc_fd *fd);
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
int *release_fd, const char *reason);
-/* Cause any current callbacks to error out with GRPC_CALLBACK_CANCELLED. */
+/* Has grpc_fd_shutdown been called on an fd? */
+bool grpc_fd_is_shutdown(grpc_fd *fd);
+
+/* Cause any current and future callbacks to fail. */
void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Register read interest, causing read_cb to be called once when fd becomes
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index fcf39ffd54..c44aafcddf 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -82,9 +82,9 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
grpc_exec_ctx_flush(exec_ctx);
}
-void grpc_exec_ctx_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error,
- grpc_workqueue *offload_target_or_null) {
+void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error,
+ grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
}
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 617f48f1b8..38f27d9b13 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -94,9 +94,9 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
* the instance is destroyed, or work may be lost. */
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
/** Add a closure to be executed at the next flush/finish point */
-void grpc_exec_ctx_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error,
- grpc_workqueue *offload_target_or_null);
+void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error,
+ grpc_workqueue *offload_target_or_null);
/** Returns true if we'd like to leave this execution context as soon as
possible: useful for deciding whether to do something more or not depending
on outside context */
diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.c
index 1faa8eeedf..2532e52e48 100644
--- a/src/core/lib/iomgr/iocp_windows.c
+++ b/src/core/lib/iomgr/iocp_windows.c
@@ -39,7 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/thd.h>
#include "src/core/lib/iomgr/iocp_windows.h"
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index 60cef8ba77..89292a153e 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -96,7 +96,8 @@ void grpc_iomgr_shutdown(void) {
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
if (g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
+ gpr_log(GPR_DEBUG,
+ "Waiting for %" PRIuPTR " iomgr objects to be destroyed",
count_objects());
}
last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
@@ -114,9 +115,9 @@ void grpc_iomgr_shutdown(void) {
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
if (g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG,
- "Failed to free %d iomgr objects before shutdown deadline: "
- "memory leaks are likely",
+ gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
count_objects());
dump_objects("LEAKED");
if (grpc_iomgr_abort_on_leaks()) {
diff --git a/src/core/lib/iomgr/iomgr_windows.c b/src/core/lib/iomgr/iomgr_windows.c
index 398517fc75..7653f6e635 100644
--- a/src/core/lib/iomgr/iomgr_windows.c
+++ b/src/core/lib/iomgr/iomgr_windows.c
@@ -35,7 +35,7 @@
#ifdef GPR_WINSOCK_SOCKET
-#include "src/core/lib/iomgr/sockaddr_win32.h"
+#include "src/core/lib/iomgr/sockaddr_windows.h"
#include <grpc/support/log.h>
diff --git a/src/core/lib/iomgr/polling_entity.c b/src/core/lib/iomgr/polling_entity.c
new file mode 100644
index 0000000000..d1686aa12f
--- /dev/null
+++ b/src/core/lib/iomgr/polling_entity.c
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/polling_entity.h"
+
+grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
+ grpc_pollset_set *pollset_set) {
+ grpc_polling_entity pollent;
+ pollent.pollent.pollset_set = pollset_set;
+ pollent.tag = POPS_POLLSET_SET;
+ return pollent;
+}
+
+grpc_polling_entity grpc_polling_entity_create_from_pollset(
+ grpc_pollset *pollset) {
+ grpc_polling_entity pollent;
+ pollent.pollent.pollset = pollset;
+ pollent.tag = POPS_POLLSET;
+ return pollent;
+}
+
+grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
+ if (pollent->tag == POPS_POLLSET) {
+ return pollent->pollent.pollset;
+ }
+ return NULL;
+}
+
+grpc_pollset_set *grpc_polling_entity_pollset_set(
+ grpc_polling_entity *pollent) {
+ if (pollent->tag == POPS_POLLSET_SET) {
+ return pollent->pollent.pollset_set;
+ }
+ return NULL;
+}
+
+bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) {
+ return pollent->tag == POPS_NONE;
+}
+
+void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_polling_entity *pollent,
+ grpc_pollset_set *pss_dst) {
+ if (pollent->tag == POPS_POLLSET) {
+ GPR_ASSERT(pollent->pollent.pollset != NULL);
+ grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
+ } else if (pollent->tag == POPS_POLLSET_SET) {
+ GPR_ASSERT(pollent->pollent.pollset_set != NULL);
+ grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
+ pollent->pollent.pollset_set);
+ } else {
+ gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag);
+ abort();
+ }
+}
+
+void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_polling_entity *pollent,
+ grpc_pollset_set *pss_dst) {
+ if (pollent->tag == POPS_POLLSET) {
+ GPR_ASSERT(pollent->pollent.pollset != NULL);
+ grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
+ } else if (pollent->tag == POPS_POLLSET_SET) {
+ GPR_ASSERT(pollent->pollent.pollset_set != NULL);
+ grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
+ pollent->pollent.pollset_set);
+ } else {
+ gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag);
+ abort();
+ }
+}
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
new file mode 100644
index 0000000000..e81531053c
--- /dev/null
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H
+#define GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H
+
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/pollset_set.h"
+
+/* A grpc_polling_entity is a pollset-or-pollset_set container. It allows
+ * functions that
+ * accept a pollset XOR a pollset_set to do so through an abstract interface.
+ * No ownership is taken. */
+
+typedef struct grpc_polling_entity {
+ union {
+ grpc_pollset *pollset;
+ grpc_pollset_set *pollset_set;
+ } pollent;
+ enum pops_tag { POPS_NONE, POPS_POLLSET, POPS_POLLSET_SET } tag;
+} grpc_polling_entity;
+
+grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
+ grpc_pollset_set *pollset_set);
+grpc_polling_entity grpc_polling_entity_create_from_pollset(
+ grpc_pollset *pollset);
+
+/** If \a pollent contains a pollset, return it. Otherwise, return NULL */
+grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent);
+
+/** If \a pollent contains a pollset_set, return it. Otherwise, return NULL */
+grpc_pollset_set *grpc_polling_entity_pollset_set(grpc_polling_entity *pollent);
+
+bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent);
+
+/** Add the pollset or pollset_set in \a pollent to the destination pollset_set
+ * \a
+ * pss_dst */
+void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_polling_entity *pollent,
+ grpc_pollset_set *pss_dst);
+
+/** Delete the pollset or pollset_set in \a pollent from the destination
+ * pollset_set \a
+ * pss_dst */
+void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_polling_entity *pollent,
+ grpc_pollset_set *pss_dst);
+/* pollset_set specific */
+
+#endif /* GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H */
diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c
index 89f60b92fb..a35a9766fc 100644
--- a/src/core/lib/iomgr/pollset_set_windows.c
+++ b/src/core/lib/iomgr/pollset_set_windows.c
@@ -32,12 +32,15 @@
*/
#include <grpc/support/port_platform.h>
+#include <stdint.h>
#ifdef GPR_WINSOCK_SOCKET
#include "src/core/lib/iomgr/pollset_set_windows.h"
-grpc_pollset_set* grpc_pollset_set_create(void) { return NULL; }
+grpc_pollset_set* grpc_pollset_set_create(void) {
+ return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
+}
void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 5882d8d71d..626dd784b3 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -109,7 +109,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
- grpc_exec_ctx_push(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
} else {
pollset->on_shutdown = closure;
}
@@ -167,8 +167,8 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
if (pollset->shutting_down && pollset->on_shutdown != NULL) {
- grpc_exec_ctx_push(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE,
+ NULL);
pollset->on_shutdown = NULL;
}
goto done;
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 9a320dc952..4e9f978584 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -163,7 +163,7 @@ typedef struct {
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
request *r = rp;
- grpc_exec_ctx_push(
+ grpc_exec_ctx_sched(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out),
NULL);
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 22a9feb72d..2af8af82dc 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -43,7 +43,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
@@ -154,7 +154,7 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
} else {
GRPC_ERROR_REF(error);
}
- grpc_exec_ctx_push(exec_ctx, r->on_done, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, r->on_done, error, NULL);
gpr_free(r->name);
gpr_free(r->default_port);
gpr_free(r);
diff --git a/src/core/lib/iomgr/sockaddr.h b/src/core/lib/iomgr/sockaddr.h
index 891a2f094f..5563d0b8a6 100644
--- a/src/core/lib/iomgr/sockaddr.h
+++ b/src/core/lib/iomgr/sockaddr.h
@@ -36,8 +36,8 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
-#include "src/core/lib/iomgr/sockaddr_win32.h"
+#ifdef GPR_WINDOWS
+#include "src/core/lib/iomgr/sockaddr_windows.h"
#endif
#ifdef GPR_POSIX_SOCKETADDR
diff --git a/src/core/lib/iomgr/sockaddr_win32.h b/src/core/lib/iomgr/sockaddr_windows.h
index 02aeae7619..971db5b32b 100644
--- a/src/core/lib/iomgr/sockaddr_win32.h
+++ b/src/core/lib/iomgr/sockaddr_windows.h
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_WIN32_H
-#define GRPC_CORE_LIB_IOMGR_SOCKADDR_WIN32_H
+#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H
+#define GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H
#include <winsock2.h>
#include <ws2tcpip.h>
@@ -40,4 +40,4 @@
// must be included after the above
#include <mswsock.h>
-#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_WIN32_H */
+#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c
index bd8ef00c85..d2f6261e2a 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.c
@@ -49,6 +49,7 @@
#include <sys/types.h>
#include <unistd.h>
+#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
@@ -116,6 +117,20 @@ grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) {
return GRPC_ERROR_NONE;
}
+grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes) {
+ return 0 == setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffer_size_bytes,
+ sizeof(buffer_size_bytes))
+ ? GRPC_ERROR_NONE
+ : GRPC_OS_ERROR(errno, "setsockopt(SO_SNDBUF)");
+}
+
+grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) {
+ return 0 == setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buffer_size_bytes,
+ sizeof(buffer_size_bytes))
+ ? GRPC_ERROR_NONE
+ : GRPC_OS_ERROR(errno, "setsockopt(SO_RCVBUF)");
+}
+
/* set a socket to close on exec */
grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec) {
int oldflags = fcntl(fd, F_GETFD, 0);
@@ -238,6 +253,16 @@ static int set_socket_dualstack(int fd) {
}
}
+static grpc_error *error_for_fd(int fd, const struct sockaddr *addr) {
+ if (fd >= 0) return GRPC_ERROR_NONE;
+ char *addr_str;
+ grpc_sockaddr_to_string(&addr_str, addr, 0);
+ grpc_error *err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, addr_str);
+ gpr_free(addr_str);
+ return err;
+}
+
grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
int protocol,
grpc_dualstack_mode *dsmode,
@@ -258,7 +283,7 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
/* If this isn't an IPv4 address, then return whatever we've got. */
if (!grpc_sockaddr_is_v4mapped(addr, NULL)) {
*dsmode = GRPC_DSMODE_IPV6;
- return GRPC_ERROR_NONE;
+ return error_for_fd(*newfd, addr);
}
/* Fall back to AF_INET. */
if (*newfd >= 0) {
@@ -268,10 +293,7 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
}
*dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
*newfd = socket(family, type, protocol);
- if (*newfd == -1) {
- return GRPC_OS_ERROR(errno, "socket");
- }
- return GRPC_ERROR_NONE;
+ return error_for_fd(*newfd, addr);
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 607fbc7a79..7bcc2219ae 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -69,20 +69,23 @@ grpc_error *grpc_set_socket_reuse_port(int fd, int reuse);
int grpc_ipv6_loopback_available(void);
/* Tries to set SO_NOSIGPIPE if available on this platform.
- Returns 1 on success, 0 on failure.
If SO_NO_SIGPIPE is not available, returns 1. */
grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd);
/* Tries to set IP_PKTINFO if available on this platform.
- Returns 1 on success, 0 on failure.
If IP_PKTINFO is not available, returns 1. */
grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd);
/* Tries to set IPV6_RECVPKTINFO if available on this platform.
- Returns 1 on success, 0 on failure.
If IPV6_RECVPKTINFO is not available, returns 1. */
grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd);
+/* Tries to set the socket's send buffer to given size. */
+grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes);
+
+/* Tries to set the socket's receive buffer to given size. */
+grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes);
+
/* An enum to keep track of IPv4/IPv6 socket modes.
Currently, this information is only used when a socket is first created, but
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index 8919452751..d7d5f6f157 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -42,7 +42,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/iocp_windows.h"
@@ -123,7 +123,7 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
info->has_pending_iocp = 0;
- grpc_exec_ctx_push(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
} else {
info->closure = closure;
}
@@ -146,7 +146,7 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu);
if (info->closure) {
- grpc_exec_ctx_push(exec_ctx, info->closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, info->closure, GRPC_ERROR_NONE, NULL);
info->closure = NULL;
} else {
info->has_pending_iocp = 1;
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index 9c4ca1617f..80c7a3f128 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -221,7 +221,7 @@ finish:
gpr_free(ac->addr_str);
gpr_free(ac);
}
- grpc_exec_ctx_push(exec_ctx, closure, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
}
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
@@ -250,7 +250,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_push(exec_ctx, closure, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
return;
}
if (dsmode == GRPC_DSMODE_IPV4) {
@@ -260,7 +260,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
addr_len = sizeof(addr4_copy);
}
if ((error = prepare_socket(addr, fd)) != GRPC_ERROR_NONE) {
- grpc_exec_ctx_push(exec_ctx, closure, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
return;
}
@@ -276,14 +276,14 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (err >= 0) {
*ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
- grpc_exec_ctx_push(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
goto done;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
- grpc_exec_ctx_push(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"),
+ NULL);
goto done;
}
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index dbf116422f..562cb9c6bf 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -35,11 +35,11 @@
#ifdef GPR_WINSOCK_SOCKET
-#include "src/core/lib/iomgr/sockaddr_win32.h"
+#include "src/core/lib/iomgr/sockaddr_windows.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/useful.h>
@@ -121,7 +121,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect_unlock_and_cleanup(ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
- grpc_exec_ctx_push(exec_ctx, on_done, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
}
/* Tries to issue one async connection, then schedules both an IOCP
@@ -225,7 +225,7 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
- grpc_exec_ctx_push(exec_ctx, on_done, final_error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL);
}
#endif /* GPR_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 48a19bed84..017f52c367 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -160,7 +160,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_error *error) {
grpc_closure *cb = tcp->read_cb;
- if (grpc_tcp_trace) {
+ if (false && grpc_tcp_trace) {
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -175,7 +175,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
- grpc_exec_ctx_push(exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
#define MAX_READ_IOVEC 4
@@ -277,7 +277,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
} else {
- grpc_exec_ctx_push(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE, NULL);
}
}
@@ -394,7 +394,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_error *error = GRPC_ERROR_NONE;
- if (grpc_tcp_trace) {
+ if (false && grpc_tcp_trace) {
size_t i;
for (i = 0; i < buf->count; i++) {
@@ -410,7 +410,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
- grpc_exec_ctx_push(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, grpc_fd_is_shutdown(tcp->em_fd)
+ ? GRPC_ERROR_CREATE("EOF")
+ : GRPC_ERROR_NONE,
+ NULL);
return;
}
tcp->outgoing_buffer = buf;
@@ -422,7 +425,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
- grpc_exec_ctx_push(exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
GPR_TIMER_END("tcp_write", 0);
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index aac3beb8bd..a1a463550a 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -132,6 +132,9 @@ struct grpc_tcp_server {
grpc_pollset **pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
+
+ /* next pollset to assign a channel to */
+ size_t next_pollset_to_assign;
};
static gpr_once check_init = GPR_ONCE_INIT;
@@ -178,13 +181,14 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
s->head = NULL;
s->tail = NULL;
s->nports = 0;
+ s->next_pollset_to_assign = 0;
*server = s;
return GRPC_ERROR_NONE;
}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_push(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
}
gpr_mu_destroy(&s->mu);
@@ -364,7 +368,9 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
goto error;
}
- read_notifier_pollset = grpc_fd_get_read_notifier_pollset(exec_ctx, sp->emfd);
+ read_notifier_pollset =
+ sp->server->pollsets[(sp->server->next_pollset_to_assign++) %
+ sp->server->pollset_count];
/* loop until accept4 returns EAGAIN, and then re-arm notification */
for (;;) {
@@ -588,9 +594,9 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
if (port == 0 && sp != NULL) {
grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
}
- addr = (struct sockaddr *)&wild4;
- addr_len = sizeof(wild4);
}
+ addr = (struct sockaddr *)&wild4;
+ addr_len = sizeof(wild4);
}
errs[1] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index 481111c9f9..2a51671ec7 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -41,7 +41,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@@ -121,7 +121,7 @@ grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
- grpc_exec_ctx_push(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
}
/* Now that the accepts have been aborted, we can destroy the sockets.
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 88c9354f6e..b2af8030aa 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -37,11 +37,11 @@
#include <limits.h>
-#include "src/core/lib/iomgr/sockaddr_win32.h"
+#include "src/core/lib/iomgr/sockaddr_windows.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -183,7 +183,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
tcp->read_cb = NULL;
TCP_UNREF(tcp, "read");
- grpc_exec_ctx_push(exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -197,8 +197,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer;
if (tcp->shutting_down) {
- grpc_exec_ctx_push(exec_ctx, cb,
- GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
return;
}
@@ -222,7 +222,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* Did we get data immediately ? Yay. */
if (info->wsa_error != WSAEWOULDBLOCK) {
info->bytes_transfered = bytes_read;
- grpc_exec_ctx_push(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL);
return;
}
@@ -235,8 +235,8 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
info->wsa_error = wsa_error;
- grpc_exec_ctx_push(exec_ctx, &tcp->on_read,
- GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, &tcp->on_read,
+ GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL);
return;
}
}
@@ -267,7 +267,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
}
TCP_UNREF(tcp, "write");
- grpc_exec_ctx_push(exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
/* Initiates a write. */
@@ -285,8 +285,8 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len;
if (tcp->shutting_down) {
- grpc_exec_ctx_push(exec_ctx, cb,
- GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
return;
}
@@ -317,7 +317,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *error = status == 0
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend");
- grpc_exec_ctx_push(exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
return;
}
@@ -334,8 +334,8 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
TCP_UNREF(tcp, "write");
- grpc_exec_ctx_push(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"),
+ NULL);
return;
}
}
diff --git a/src/core/lib/iomgr/timer.c b/src/core/lib/iomgr/timer.c
index b46c6df287..9975fa1671 100644
--- a/src/core/lib/iomgr/timer.c
+++ b/src/core/lib/iomgr/timer.c
@@ -186,7 +186,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
if (!g_initialized) {
timer->triggered = 1;
- grpc_exec_ctx_push(
+ grpc_exec_ctx_sched(
exec_ctx, &timer->closure,
GRPC_ERROR_CREATE("Attempt to create timer before initialization"),
NULL);
@@ -195,7 +195,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
if (gpr_time_cmp(deadline, now) <= 0) {
timer->triggered = 1;
- grpc_exec_ctx_push(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
return;
}
@@ -247,7 +247,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
shard_type *shard = &g_shards[shard_idx(timer)];
gpr_mu_lock(&shard->mu);
if (!timer->triggered) {
- grpc_exec_ctx_push(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
timer->triggered = 1;
if (timer->heap_index == INVALID_HEAP_INDEX) {
list_remove(timer);
@@ -287,8 +287,8 @@ static int refill_queue(shard_type *shard, gpr_timespec now) {
return !grpc_timer_heap_is_empty(&shard->heap);
}
-/* This pops the next non-cancelled timer with deadline <= now from the queue,
- or returns NULL if there isn't one.
+/* This pops the next non-cancelled timer with deadline <= now from the
+ queue, or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
grpc_timer *timer;
@@ -313,7 +313,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
- grpc_exec_ctx_push(exec_ctx, &timer->closure, GRPC_ERROR_REF(error), NULL);
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error), NULL);
n++;
}
*new_min_deadline = compute_min_deadline(shard);
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 98ffccd59b..16150687d3 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -210,6 +210,8 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
size_t addr_len) {
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
+ /* Set send/receive socket buffers to 1 MB */
+ int buffer_size_bytes = 1024 * 1024;
if (fd < 0) {
goto error;
@@ -239,6 +241,18 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
+ if (!grpc_set_socket_sndbuf(fd, buffer_size_bytes)) {
+ gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes",
+ buf_size_bytes);
+ goto error;
+ }
+
+ if (!grpc_set_socket_rcvbuf(fd, buffer_size_bytes)) {
+ gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes",
+ buf_size_bytes);
+ goto error;
+ }
+
return grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
error:
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
index 6e8b1218be..5cc40eea50 100644
--- a/src/core/lib/iomgr/workqueue.h
+++ b/src/core/lib/iomgr/workqueue.h
@@ -43,7 +43,7 @@
#include "src/core/lib/iomgr/workqueue_posix.h"
#endif
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue_windows.h"
#endif
@@ -78,7 +78,7 @@ void grpc_workqueue_add_to_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset);
/** Add a work item to a workqueue */
-void grpc_workqueue_push(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error);
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error);
#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */
diff --git a/src/core/lib/iomgr/workqueue_posix.c b/src/core/lib/iomgr/workqueue_posix.c
index 297e8d3102..45e0f6063b 100644
--- a/src/core/lib/iomgr/workqueue_posix.c
+++ b/src/core/lib/iomgr/workqueue_posix.c
@@ -137,8 +137,8 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
}
-void grpc_workqueue_push(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error) {
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error) {
grpc_error *push_error = GRPC_ERROR_NONE;
gpr_mu_lock(&workqueue->mu);
if (grpc_closure_list_empty(workqueue->closure_list)) {
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
index c3c0446a57..275f040b1c 100644
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ b/src/core/lib/iomgr/workqueue_windows.c
@@ -33,8 +33,8 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue.h"
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c
index 18189a8fb8..07db8bfd75 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.c
+++ b/src/core/lib/security/credentials/composite/composite_credentials.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <string.h>
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/surface/api_trace.h"
#include <grpc/support/alloc.h>
@@ -49,7 +50,7 @@ typedef struct {
grpc_credentials_md_store *md_elems;
grpc_auth_metadata_context auth_md_context;
void *user_data;
- grpc_pollset *pollset;
+ grpc_polling_entity *pollent;
grpc_credentials_metadata_cb cb;
} grpc_composite_call_credentials_metadata_context;
@@ -93,7 +94,7 @@ static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_call_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
grpc_call_credentials_get_request_metadata(
- exec_ctx, inner_creds, ctx->pollset, ctx->auth_md_context,
+ exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context,
composite_call_metadata_cb, ctx);
return;
}
@@ -106,7 +107,7 @@ static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *user_data,
static void composite_call_get_request_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_pollset *pollset, grpc_auth_metadata_context auth_md_context,
+ grpc_polling_entity *pollent, grpc_auth_metadata_context auth_md_context,
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
grpc_composite_call_credentials_metadata_context *ctx;
@@ -117,10 +118,10 @@ static void composite_call_get_request_metadata(
ctx->user_data = user_data;
ctx->cb = cb;
ctx->composite_creds = c;
- ctx->pollset = pollset;
+ ctx->pollent = pollent;
ctx->md_elems = grpc_credentials_md_store_create(c->inner.num_creds);
grpc_call_credentials_get_request_metadata(
- exec_ctx, c->inner.creds_array[ctx->creds_index++], pollset,
+ exec_ctx, c->inner.creds_array[ctx->creds_index++], ctx->pollent,
auth_md_context, composite_call_metadata_cb, ctx);
}
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.h b/src/core/lib/security/credentials/composite/composite_credentials.h
index 43974a9623..0d8966f464 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.h
+++ b/src/core/lib/security/credentials/composite/composite_credentials.h
@@ -68,4 +68,5 @@ typedef struct {
grpc_call_credentials_array inner;
} grpc_composite_call_credentials;
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H \
+ */
diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.c
index ce31919f74..0eadaec191 100644
--- a/src/core/lib/security/credentials/credentials.c
+++ b/src/core/lib/security/credentials/credentials.c
@@ -113,7 +113,7 @@ void grpc_call_credentials_release(grpc_call_credentials *creds) {
void grpc_call_credentials_get_request_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_polling_entity *pollent, grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb, void *user_data) {
if (creds == NULL || creds->vtable->get_request_metadata == NULL) {
if (cb != NULL) {
@@ -121,7 +121,7 @@ void grpc_call_credentials_get_request_metadata(
}
return;
}
- creds->vtable->get_request_metadata(exec_ctx, creds, pollset, context, cb,
+ creds->vtable->get_request_metadata(exec_ctx, creds, pollent, context, cb,
user_data);
}
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index 675e02b58d..ce235e3a1d 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -41,6 +41,7 @@
#include "src/core/lib/http/httpcli.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/transport/security_connector.h"
struct grpc_http_response;
@@ -164,7 +165,8 @@ typedef void (*grpc_credentials_metadata_cb)(grpc_exec_ctx *exec_ctx,
typedef struct {
void (*destruct)(grpc_call_credentials *c);
void (*get_request_metadata)(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *c, grpc_pollset *pollset,
+ grpc_call_credentials *c,
+ grpc_polling_entity *pollent,
grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data);
@@ -180,7 +182,7 @@ grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds);
void grpc_call_credentials_unref(grpc_call_credentials *creds);
void grpc_call_credentials_get_request_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_polling_entity *pollent, grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb, void *user_data);
/* Metadata-only credentials with the specified key and value where
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index 1ff7bd14a5..ee6d964de1 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -106,7 +106,7 @@ static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
static void md_only_test_get_request_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_polling_entity *pollent, grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.h b/src/core/lib/security/credentials/fake/fake_credentials.h
index adeba94bdb..9cf38084a3 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.h
+++ b/src/core/lib/security/credentials/fake/fake_credentials.h
@@ -53,4 +53,4 @@ typedef struct {
int is_async;
} grpc_md_only_test_credentials;
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_FAKE_FAKE_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_FAKE_FAKE_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/google_default/credentials_win32.c b/src/core/lib/security/credentials/google_default/credentials_windows.c
index cd8b48080a..208b8fd9ad 100644
--- a/src/core/lib/security/credentials/google_default/credentials_win32.c
+++ b/src/core/lib/security/credentials/google_default/credentials_windows.c
@@ -33,7 +33,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
@@ -58,4 +58,4 @@ char *grpc_get_well_known_google_credentials_file_path_impl(void) {
return result;
}
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index 29f818b8c8..312a3d4f90 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -42,6 +42,7 @@
#include "src/core/lib/http/httpcli.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/load_file.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/credentials/jwt/jwt_credentials.h"
#include "src/core/lib/security/credentials/oauth2/oauth2_credentials.h"
#include "src/core/lib/support/env.h"
@@ -63,7 +64,7 @@ static gpr_once g_once = GPR_ONCE_INIT;
static void init_default_credentials(void) { gpr_mu_init(&g_state_mu); }
typedef struct {
- grpc_pollset *pollset;
+ grpc_polling_entity pollent;
int is_done;
int success;
grpc_http_response response;
@@ -89,7 +90,9 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
}
gpr_mu_lock(g_polling_mu);
detector->is_done = 1;
- GRPC_LOG_IF_ERROR("Pollset kick", grpc_pollset_kick(detector->pollset, NULL));
+ GRPC_LOG_IF_ERROR(
+ "Pollset kick",
+ grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL));
gpr_mu_unlock(g_polling_mu);
}
@@ -108,8 +111,9 @@ static int is_stack_running_on_compute_engine(void) {
on compute engine. */
gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
- detector.pollset = gpr_malloc(grpc_pollset_size());
- grpc_pollset_init(detector.pollset, &g_polling_mu);
+ grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
+ grpc_pollset_init(pollset, &g_polling_mu);
+ detector.pollent = grpc_polling_entity_create_from_pollset(pollset);
detector.is_done = 0;
detector.success = 0;
@@ -121,7 +125,7 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
grpc_httpcli_get(
- &exec_ctx, &context, detector.pollset, &request,
+ &exec_ctx, &context, &detector.pollent, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector),
&detector.response);
@@ -135,8 +139,9 @@ static int is_stack_running_on_compute_engine(void) {
grpc_pollset_worker *worker = NULL;
if (!GRPC_LOG_IF_ERROR(
"pollset_work",
- grpc_pollset_work(&exec_ctx, detector.pollset, &worker,
- gpr_now(GPR_CLOCK_MONOTONIC),
+ grpc_pollset_work(&exec_ctx,
+ grpc_polling_entity_pollset(&detector.pollent),
+ &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC)))) {
detector.is_done = 1;
detector.success = 0;
@@ -145,12 +150,15 @@ static int is_stack_running_on_compute_engine(void) {
gpr_mu_unlock(g_polling_mu);
grpc_httpcli_context_destroy(&context);
- grpc_closure_init(&destroy_closure, destroy_pollset, detector.pollset);
- grpc_pollset_shutdown(&exec_ctx, detector.pollset, &destroy_closure);
+ grpc_closure_init(&destroy_closure, destroy_pollset,
+ grpc_polling_entity_pollset(&detector.pollent));
+ grpc_pollset_shutdown(&exec_ctx,
+ grpc_polling_entity_pollset(&detector.pollent),
+ &destroy_closure);
grpc_exec_ctx_finish(&exec_ctx);
g_polling_mu = NULL;
- gpr_free(detector.pollset);
+ gpr_free(grpc_polling_entity_pollset(&detector.pollent));
grpc_http_response_destroy(&detector.response);
return detector.success;
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index 0d14cbfdad..fac4377e2c 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -42,4 +42,5 @@
void grpc_flush_cached_google_default_credentials(void);
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \
+ */
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.c b/src/core/lib/security/credentials/iam/iam_credentials.c
index 89defa7c60..64d5871844 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.c
+++ b/src/core/lib/security/credentials/iam/iam_credentials.c
@@ -49,7 +49,7 @@ static void iam_destruct(grpc_call_credentials *creds) {
static void iam_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_credentials *creds,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data) {
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.h b/src/core/lib/security/credentials/iam/iam_credentials.h
index e989bfc28e..af54faa586 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.h
+++ b/src/core/lib/security/credentials/iam/iam_credentials.h
@@ -41,4 +41,4 @@ typedef struct {
grpc_credentials_md_store *iam_md;
} grpc_google_iam_credentials;
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_IAM_IAM_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_IAM_IAM_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.c b/src/core/lib/security/credentials/jwt/jwt_credentials.c
index 8755a96af4..973fb75eaa 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.c
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.c
@@ -64,7 +64,7 @@ static void jwt_destruct(grpc_call_credentials *creds) {
static void jwt_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_credentials *creds,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data) {
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.h b/src/core/lib/security/credentials/jwt/jwt_credentials.h
index dfa4a1ebd9..d572606179 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.h
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.h
@@ -59,4 +59,4 @@ grpc_call_credentials *
grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
grpc_auth_json_key key, gpr_timespec token_lifetime);
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index bdee6c85f7..73eb2e3258 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -37,6 +37,7 @@
#include <string.h>
#include "src/core/lib/http/httpcli.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/util/b64.h"
#include "src/core/lib/tsi/ssl_types.h"
@@ -320,9 +321,15 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
/* --- verifier_cb_ctx object. --- */
+typedef enum {
+ HTTP_RESPONSE_OPENID = 0,
+ HTTP_RESPONSE_KEYS,
+ HTTP_RESPONSE_COUNT /* must be last */
+} http_response_index;
+
typedef struct {
grpc_jwt_verifier *verifier;
- grpc_pollset *pollset;
+ grpc_polling_entity pollent;
jose_header *header;
grpc_jwt_claims *claims;
char *audience;
@@ -330,7 +337,7 @@ typedef struct {
gpr_slice signed_data;
void *user_data;
grpc_jwt_verification_done_cb user_cb;
- grpc_http_response responses[2];
+ grpc_http_response responses[HTTP_RESPONSE_COUNT];
} verifier_cb_ctx;
/* Takes ownership of the header, claims and signature. */
@@ -339,10 +346,11 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
grpc_jwt_claims *claims, const char *audience, gpr_slice signature,
const char *signed_jwt, size_t signed_jwt_len, void *user_data,
grpc_jwt_verification_done_cb cb) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
verifier_cb_ctx *ctx = gpr_malloc(sizeof(verifier_cb_ctx));
memset(ctx, 0, sizeof(verifier_cb_ctx));
ctx->verifier = verifier;
- ctx->pollset = pollset;
+ ctx->pollent = grpc_polling_entity_create_from_pollset(pollset);
ctx->header = header;
ctx->audience = gpr_strdup(audience);
ctx->claims = claims;
@@ -350,6 +358,7 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
ctx->signed_data = gpr_slice_from_copied_buffer(signed_jwt, signed_jwt_len);
ctx->user_data = user_data;
ctx->user_cb = cb;
+ grpc_exec_ctx_finish(&exec_ctx);
return ctx;
}
@@ -359,7 +368,7 @@ void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) {
gpr_slice_unref(ctx->signature);
gpr_slice_unref(ctx->signed_data);
jose_header_destroy(ctx->header);
- for (size_t i = 0; i < GPR_ARRAY_SIZE(ctx->responses); i++) {
+ for (size_t i = 0; i < HTTP_RESPONSE_COUNT; i++) {
grpc_http_response_destroy(&ctx->responses[i]);
}
/* TODO: see what to do with claims... */
@@ -578,7 +587,7 @@ end:
static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
- grpc_json *json = json_from_http(&ctx->responses[1]);
+ grpc_json *json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]);
EVP_PKEY *verification_key = NULL;
grpc_jwt_verifier_status status = GRPC_JWT_VERIFIER_GENERIC_ERROR;
grpc_jwt_claims *claims = NULL;
@@ -620,7 +629,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
const grpc_json *cur;
verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
- const grpc_http_response *response = &ctx->responses[0];
+ const grpc_http_response *response = &ctx->responses[HTTP_RESPONSE_OPENID];
grpc_json *json = json_from_http(response);
grpc_httpcli_request req;
const char *jwks_uri;
@@ -649,9 +658,10 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
}
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, ctx->pollset, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
- grpc_closure_create(on_keys_retrieved, ctx), &ctx->responses[1]);
+ grpc_closure_create(on_keys_retrieved, ctx),
+ &ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_json_destroy(json);
gpr_free(req.host);
return;
@@ -699,7 +709,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
grpc_httpcli_request req;
memset(&req, 0, sizeof(grpc_httpcli_request));
req.handshaker = &grpc_httpcli_ssl;
- int rsp_idx;
+ http_response_index rsp_idx;
GPR_ASSERT(ctx != NULL && ctx->header != NULL && ctx->claims != NULL);
iss = ctx->claims->iss;
@@ -739,7 +749,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss);
}
http_cb = grpc_closure_create(on_keys_retrieved, ctx);
- rsp_idx = 1;
+ rsp_idx = HTTP_RESPONSE_KEYS;
} else {
req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
path_prefix = strchr(req.host, '/');
@@ -751,11 +761,11 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
GRPC_OPENID_CONFIG_URL_SUFFIX);
}
http_cb = grpc_closure_create(on_openid_config_retrieved, ctx);
- rsp_idx = 0;
+ rsp_idx = HTTP_RESPONSE_OPENID;
}
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, ctx->pollset, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
http_cb, &ctx->responses[rsp_idx]);
gpr_free(req.host);
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index 671dea31e6..1102553dd3 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -226,6 +226,8 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
gpr_timespec token_lifetime;
grpc_credentials_status status;
+ GRPC_LOG_IF_ERROR("oauth_fetch", GRPC_ERROR_REF(error));
+
gpr_mu_lock(&c->mu);
status = grpc_oauth2_token_fetcher_credentials_parse_server_response(
&r->response, &c->access_token_md, &token_lifetime);
@@ -244,7 +246,7 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
static void oauth2_token_fetcher_get_request_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_polling_entity *pollent, grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
@@ -270,7 +272,7 @@ static void oauth2_token_fetcher_get_request_metadata(
c->fetch_func(
exec_ctx,
grpc_credentials_metadata_request_create(creds, cb, user_data),
- &c->httpcli_context, pollset, on_oauth2_token_fetcher_http_response,
+ &c->httpcli_context, pollent, on_oauth2_token_fetcher_http_response,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), refresh_threshold));
}
}
@@ -295,7 +297,7 @@ static grpc_call_credentials_vtable compute_engine_vtable = {
static void compute_engine_fetch_oauth2(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
- grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
+ grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
grpc_iomgr_cb_func response_cb, gpr_timespec deadline) {
grpc_http_header header = {"Metadata-Flavor", "Google"};
grpc_httpcli_request request;
@@ -304,7 +306,7 @@ static void compute_engine_fetch_oauth2(
request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
- grpc_httpcli_get(exec_ctx, httpcli_context, pollset, &request, deadline,
+ grpc_httpcli_get(exec_ctx, httpcli_context, pollent, &request, deadline,
grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
}
@@ -337,7 +339,7 @@ static grpc_call_credentials_vtable refresh_token_vtable = {
static void refresh_token_fetch_oauth2(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
- grpc_httpcli_context *httpcli_context, grpc_pollset *pollset,
+ grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
grpc_iomgr_cb_func response_cb, gpr_timespec deadline) {
grpc_google_refresh_token_credentials *c =
(grpc_google_refresh_token_credentials *)metadata_req->creds;
@@ -354,7 +356,7 @@ static void refresh_token_fetch_oauth2(
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
- grpc_httpcli_post(exec_ctx, httpcli_context, pollset, &request, body,
+ grpc_httpcli_post(exec_ctx, httpcli_context, pollent, &request, body,
strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
@@ -399,7 +401,7 @@ static void access_token_destruct(grpc_call_credentials *creds) {
static void access_token_get_request_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_pollset *pollset, grpc_auth_metadata_context context,
+ grpc_polling_entity *pollent, grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
cb(exec_ctx, user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
index be7bc81626..7f6f205c22 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
@@ -70,7 +70,7 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token);
typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx,
grpc_credentials_metadata_request *req,
grpc_httpcli_context *http_context,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
grpc_iomgr_cb_func cb,
gpr_timespec deadline);
typedef struct {
@@ -106,4 +106,4 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_http_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime);
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c
index bae357321e..9fb55e8466 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.c
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c
@@ -94,7 +94,7 @@ static void plugin_md_request_metadata_ready(void *request,
static void plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_credentials *creds,
- grpc_pollset *pollset,
+ grpc_polling_entity *pollent,
grpc_auth_metadata_context context,
grpc_credentials_metadata_cb cb,
void *user_data) {
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.h b/src/core/lib/security/credentials/plugin/plugin_credentials.h
index b5dd3ad5e2..89073cb3d1 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.h
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.h
@@ -42,4 +42,4 @@ typedef struct {
grpc_credentials_md_store *plugin_md;
} grpc_plugin_credentials;
-#endif // GRPC_CORE_LIB_SECURITY_CREDENTIALS_PLUGIN_PLUGIN_CREDENTIALS_H
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_PLUGIN_PLUGIN_CREDENTIALS_H */
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index e3cbcb4433..76be2acd72 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -54,11 +54,11 @@ typedef struct {
grpc_call_credentials *creds;
grpc_mdstr *host;
grpc_mdstr *method;
- /* pollset bound to this call; if we need to make external
- network requests, they should be done under this pollset
- so that work can progress when this call wants work to
- progress */
- grpc_pollset *pollset;
+ /* pollset{_set} bound to this call; if we need to make external
+ network requests, they should be done under a pollset added to this
+ pollset_set so that work can progress when this call wants work to progress
+ */
+ grpc_polling_entity *pollent;
grpc_transport_stream_op op;
uint8_t security_context_set;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
@@ -184,9 +184,9 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
calld->op = *op; /* Copy op (originates from the caller's stack). */
- GPR_ASSERT(calld->pollset);
+ GPR_ASSERT(calld->pollent != NULL);
grpc_call_credentials_get_request_metadata(
- exec_ctx, calld->creds, calld->pollset, calld->auth_md_context,
+ exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
on_credentials_metadata, elem);
}
@@ -270,15 +270,16 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
memset(calld, 0, sizeof(*calld));
}
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_pollset *pollset) {
+static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_polling_entity *pollent) {
call_data *calld = elem->call_data;
- calld->pollset = pollset;
+ calld->pollent = pollent;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- void *ignored) {
+ const grpc_call_stats *stats, void *ignored) {
call_data *calld = elem->call_data;
grpc_call_credentials_unref(calld->creds);
if (calld->host != NULL) {
@@ -329,8 +330,14 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter");
}
-const grpc_channel_filter grpc_client_auth_filter = {
- auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, set_pollset, destroy_call_elem,
- sizeof(channel_data), init_channel_elem, destroy_channel_elem,
- grpc_call_next_get_peer, "client-auth"};
+const grpc_channel_filter grpc_client_auth_filter = {auth_start_transport_op,
+ grpc_channel_next_op,
+ sizeof(call_data),
+ init_call_elem,
+ set_pollset_or_pollset_set,
+ destroy_call_elem,
+ sizeof(channel_data),
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_call_next_get_peer,
+ "client-auth"};
diff --git a/src/core/lib/security/transport/handshake.c b/src/core/lib/security/transport/handshake.c
index c87262f124..b374ca7633 100644
--- a/src/core/lib/security/transport/handshake.c
+++ b/src/core/lib/security/transport/handshake.c
@@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
+#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
@@ -61,6 +62,8 @@ typedef struct {
grpc_closure on_handshake_data_sent_to_peer;
grpc_closure on_handshake_data_received_from_peer;
grpc_auth_context *auth_context;
+ grpc_timer timer;
+ gpr_refcount refs;
} grpc_security_handshake;
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
@@ -97,9 +100,23 @@ static void security_connector_remove_handshake(grpc_security_handshake *h) {
gpr_mu_unlock(&sc->mu);
}
+static void unref_handshake(grpc_security_handshake *h) {
+ if (gpr_unref(&h->refs)) {
+ if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
+ if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
+ gpr_slice_buffer_destroy(&h->left_overs);
+ gpr_slice_buffer_destroy(&h->outgoing);
+ gpr_slice_buffer_destroy(&h->incoming);
+ GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
+ GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake");
+ gpr_free(h);
+ }
+}
+
static void security_handshake_done(grpc_exec_ctx *exec_ctx,
grpc_security_handshake *h,
grpc_error *error) {
+ grpc_timer_cancel(exec_ctx, &h->timer);
if (!h->is_client_side) {
security_connector_remove_handshake(h);
}
@@ -119,14 +136,7 @@ static void security_handshake_done(grpc_exec_ctx *exec_ctx,
}
h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, NULL, NULL);
}
- if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
- if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
- gpr_slice_buffer_destroy(&h->left_overs);
- gpr_slice_buffer_destroy(&h->outgoing);
- gpr_slice_buffer_destroy(&h->incoming);
- GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
- GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake");
- gpr_free(h);
+ unref_handshake(h);
GRPC_ERROR_UNREF(error);
}
@@ -149,7 +159,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *user_data,
if (result != TSI_OK) {
security_handshake_done(
exec_ctx, h,
- grpc_set_tsi_error_bits(
+ grpc_set_tsi_error_result(
GRPC_ERROR_CREATE("Frame protector creation failed"), result));
return;
}
@@ -168,7 +178,7 @@ static void check_peer(grpc_exec_ctx *exec_ctx, grpc_security_handshake *h) {
if (result != TSI_OK) {
security_handshake_done(
- exec_ctx, h, grpc_set_tsi_error_bits(
+ exec_ctx, h, grpc_set_tsi_error_result(
GRPC_ERROR_CREATE("Peer extraction failed"), result));
return;
}
@@ -195,9 +205,9 @@ static void send_handshake_bytes_to_peer(grpc_exec_ctx *exec_ctx,
} while (result == TSI_INCOMPLETE_DATA);
if (result != TSI_OK) {
- security_handshake_done(
- exec_ctx, h,
- grpc_set_tsi_error_bits(GRPC_ERROR_CREATE("Handshake failed"), result));
+ security_handshake_done(exec_ctx, h,
+ grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE("Handshake failed"), result));
return;
}
@@ -249,9 +259,9 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
}
if (result != TSI_OK) {
- security_handshake_done(
- exec_ctx, h,
- grpc_set_tsi_error_bits(GRPC_ERROR_CREATE("Handshake failed"), result));
+ security_handshake_done(exec_ctx, h,
+ grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE("Handshake failed"), result));
return;
}
@@ -304,13 +314,19 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
- tsi_handshaker *handshaker,
- grpc_security_connector *connector,
- bool is_client_side,
- grpc_endpoint *nonsecure_endpoint,
- grpc_security_handshake_done_cb cb,
- void *user_data) {
+static void on_timeout(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_security_handshake *h = arg;
+ if (error == GRPC_ERROR_NONE) {
+ grpc_endpoint_shutdown(exec_ctx, h->wrapped_endpoint);
+ }
+ unref_handshake(h);
+}
+
+void grpc_do_security_handshake(
+ grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
+ grpc_security_connector *connector, bool is_client_side,
+ grpc_endpoint *nonsecure_endpoint, gpr_timespec deadline,
+ grpc_security_handshake_done_cb cb, void *user_data) {
grpc_security_connector_handshake_list *handshake_node;
grpc_security_handshake *h = gpr_malloc(sizeof(grpc_security_handshake));
memset(h, 0, sizeof(grpc_security_handshake));
@@ -322,6 +338,7 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
h->wrapped_endpoint = nonsecure_endpoint;
h->user_data = user_data;
h->cb = cb;
+ gpr_ref_init(&h->refs, 2); /* timer and handshake proper each get a ref */
grpc_closure_init(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h);
grpc_closure_init(&h->on_handshake_data_received_from_peer,
@@ -340,6 +357,8 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&server_connector->mu);
}
send_handshake_bytes_to_peer(exec_ctx, h);
+ grpc_timer_init(exec_ctx, &h->timer, deadline, on_timeout, h,
+ gpr_now(deadline.clock_type));
}
void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/security/transport/handshake.h b/src/core/lib/security/transport/handshake.h
index 6ed850b315..c0906dd6af 100644
--- a/src/core/lib/security/transport/handshake.h
+++ b/src/core/lib/security/transport/handshake.h
@@ -38,13 +38,11 @@
#include "src/core/lib/security/transport/security_connector.h"
/* Calls the callback upon completion. Takes owership of handshaker. */
-void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
- tsi_handshaker *handshaker,
- grpc_security_connector *connector,
- bool is_client_side,
- grpc_endpoint *nonsecure_endpoint,
- grpc_security_handshake_done_cb cb,
- void *user_data);
+void grpc_do_security_handshake(
+ grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
+ grpc_security_connector *connector, bool is_client_side,
+ grpc_endpoint *nonsecure_endpoint, gpr_timespec deadline,
+ grpc_security_handshake_done_cb cb, void *user_data);
void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx, void *handshake);
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 23b51f33f6..7650d68e89 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -128,7 +128,7 @@ static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
grpc_error *error) {
- if (grpc_trace_secure_endpoint) {
+ if (false && grpc_trace_secure_endpoint) {
size_t i;
for (i = 0; i < ep->read_buffer->count; i++) {
char *data = gpr_dump_slice(ep->read_buffer->slices[i],
@@ -138,7 +138,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
}
}
ep->read_buffer = NULL;
- grpc_exec_ctx_push(exec_ctx, ep->read_cb, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL);
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
@@ -211,7 +211,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
if (result != TSI_OK) {
gpr_slice_buffer_reset_and_unref(ep->read_buffer);
- call_read_cb(exec_ctx, ep, grpc_set_tsi_error_bits(
+ call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result(
GRPC_ERROR_CREATE("Unwrap failed"), result));
return;
}
@@ -256,7 +256,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
- if (grpc_trace_secure_endpoint) {
+ if (false && grpc_trace_secure_endpoint) {
for (i = 0; i < slices->count; i++) {
char *data =
gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
@@ -319,9 +319,9 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
- grpc_exec_ctx_push(
+ grpc_exec_ctx_sched(
exec_ctx, cb,
- grpc_set_tsi_error_bits(GRPC_ERROR_CREATE("Wrap failed"), result),
+ grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result),
NULL);
return;
}
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 03b64c5121..f0ee6770e5 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -127,23 +127,25 @@ void grpc_server_security_connector_shutdown(
void grpc_channel_security_connector_do_handshake(
grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint, grpc_security_handshake_done_cb cb,
- void *user_data) {
+ grpc_endpoint *nonsecure_endpoint, gpr_timespec deadline,
+ grpc_security_handshake_done_cb cb, void *user_data) {
if (sc == NULL || nonsecure_endpoint == NULL) {
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
} else {
- sc->do_handshake(exec_ctx, sc, nonsecure_endpoint, cb, user_data);
+ sc->do_handshake(exec_ctx, sc, nonsecure_endpoint, deadline, cb, user_data);
}
}
void grpc_server_security_connector_do_handshake(
grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
- grpc_security_handshake_done_cb cb, void *user_data) {
+ gpr_timespec deadline, grpc_security_handshake_done_cb cb,
+ void *user_data) {
if (sc == NULL || nonsecure_endpoint == NULL) {
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL, NULL);
} else {
- sc->do_handshake(exec_ctx, sc, acceptor, nonsecure_endpoint, cb, user_data);
+ sc->do_handshake(exec_ctx, sc, acceptor, nonsecure_endpoint, deadline, cb,
+ user_data);
}
}
@@ -310,20 +312,23 @@ static void fake_channel_check_call_host(grpc_exec_ctx *exec_ctx,
static void fake_channel_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
+ gpr_timespec deadline,
grpc_security_handshake_done_cb cb,
void *user_data) {
grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(1), &sc->base,
- true, nonsecure_endpoint, cb, user_data);
+ true, nonsecure_endpoint, deadline, cb, user_data);
}
static void fake_server_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_server_security_connector *sc,
grpc_tcp_server_acceptor *acceptor,
grpc_endpoint *nonsecure_endpoint,
+ gpr_timespec deadline,
grpc_security_handshake_done_cb cb,
void *user_data) {
grpc_do_security_handshake(exec_ctx, tsi_create_fake_handshaker(0), &sc->base,
- false, nonsecure_endpoint, cb, user_data);
+ false, nonsecure_endpoint, deadline, cb,
+ user_data);
}
static grpc_security_connector_vtable fake_channel_vtable = {
@@ -413,6 +418,7 @@ static grpc_security_status ssl_create_handshaker(
static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
+ gpr_timespec deadline,
grpc_security_handshake_done_cb cb,
void *user_data) {
grpc_ssl_channel_security_connector *c =
@@ -427,7 +433,7 @@ static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
cb(exec_ctx, user_data, status, NULL, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, true,
- nonsecure_endpoint, cb, user_data);
+ nonsecure_endpoint, deadline, cb, user_data);
}
}
@@ -435,6 +441,7 @@ static void ssl_server_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_server_security_connector *sc,
grpc_tcp_server_acceptor *acceptor,
grpc_endpoint *nonsecure_endpoint,
+ gpr_timespec deadline,
grpc_security_handshake_done_cb cb,
void *user_data) {
grpc_ssl_server_security_connector *c =
@@ -446,7 +453,7 @@ static void ssl_server_do_handshake(grpc_exec_ctx *exec_ctx,
cb(exec_ctx, user_data, status, NULL, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, &sc->base, false,
- nonsecure_endpoint, cb, user_data);
+ nonsecure_endpoint, deadline, cb, user_data);
}
}
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 84e586deaa..c2ddf5ee1e 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -143,7 +143,7 @@ struct grpc_channel_security_connector {
grpc_security_call_host_check_cb cb, void *user_data);
void (*do_handshake)(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc,
- grpc_endpoint *nonsecure_endpoint,
+ grpc_endpoint *nonsecure_endpoint, gpr_timespec deadline,
grpc_security_handshake_done_cb cb, void *user_data);
};
@@ -156,8 +156,8 @@ void grpc_channel_security_connector_check_call_host(
/* Handshake. */
void grpc_channel_security_connector_do_handshake(
grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector,
- grpc_endpoint *nonsecure_endpoint, grpc_security_handshake_done_cb cb,
- void *user_data);
+ grpc_endpoint *nonsecure_endpoint, gpr_timespec deadline,
+ grpc_security_handshake_done_cb cb, void *user_data);
/* --- server_security_connector object. ---
@@ -174,14 +174,14 @@ struct grpc_server_security_connector {
void (*do_handshake)(grpc_exec_ctx *exec_ctx,
grpc_server_security_connector *sc,
grpc_tcp_server_acceptor *acceptor,
- grpc_endpoint *nonsecure_endpoint,
+ grpc_endpoint *nonsecure_endpoint, gpr_timespec deadline,
grpc_security_handshake_done_cb cb, void *user_data);
};
void grpc_server_security_connector_do_handshake(
grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
grpc_tcp_server_acceptor *acceptor, grpc_endpoint *nonsecure_endpoint,
- grpc_security_handshake_done_cb cb, void *user_data);
+ gpr_timespec deadline, grpc_security_handshake_done_cb cb, void *user_data);
void grpc_server_security_connector_shutdown(
grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector);
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index d45ec4020c..12e789bde9 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -128,7 +128,7 @@ static void on_md_processing_done(
grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
elem);
grpc_metadata_array_destroy(&calld->md);
- grpc_exec_ctx_push(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL);
} else {
gpr_slice message;
grpc_transport_stream_op close_op;
@@ -146,10 +146,10 @@ static void on_md_processing_done(
calld->transport_op.send_trailing_metadata = NULL;
grpc_transport_stream_op_add_close(&close_op, status, &message);
grpc_call_next_op(&exec_ctx, elem, &close_op);
- grpc_exec_ctx_push(&exec_ctx, calld->on_done_recv,
- grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status),
- NULL);
+ grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv,
+ grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status),
+ NULL);
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -169,8 +169,8 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
}
- grpc_exec_ctx_push(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error),
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error),
+ NULL);
}
static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@@ -224,12 +224,9 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_server_security_context_destroy;
}
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_pollset *pollset) {}
-
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- void *ignored) {}
+ const grpc_call_stats *stats, void *ignored) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -262,7 +259,14 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
const grpc_channel_filter grpc_server_auth_filter = {
- auth_start_transport_op, grpc_channel_next_op, sizeof(call_data),
- init_call_elem, set_pollset, destroy_call_elem,
- sizeof(channel_data), init_channel_elem, destroy_channel_elem,
- grpc_call_next_get_peer, "server-auth"};
+ auth_start_transport_op,
+ grpc_channel_next_op,
+ sizeof(call_data),
+ init_call_elem,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
+ destroy_call_elem,
+ sizeof(channel_data),
+ init_channel_elem,
+ destroy_channel_elem,
+ grpc_call_next_get_peer,
+ "server-auth"};
diff --git a/src/core/lib/security/transport/tsi_error.c b/src/core/lib/security/transport/tsi_error.c
index b9fb814905..afc1733567 100644
--- a/src/core/lib/security/transport/tsi_error.c
+++ b/src/core/lib/security/transport/tsi_error.c
@@ -33,7 +33,7 @@
#include "src/core/lib/security/transport/tsi_error.h"
-grpc_error *grpc_set_tsi_error_bits(grpc_error *error, tsi_result result) {
+grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result) {
return grpc_error_set_int(grpc_error_set_str(error, GRPC_ERROR_STR_TSI_ERROR,
tsi_result_to_string(result)),
GRPC_ERROR_INT_TSI_CODE, result);
diff --git a/src/core/lib/security/transport/tsi_error.h b/src/core/lib/security/transport/tsi_error.h
index 0b691a427a..636fbb89cf 100644
--- a/src/core/lib/security/transport/tsi_error.h
+++ b/src/core/lib/security/transport/tsi_error.h
@@ -37,6 +37,6 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/tsi/transport_security_interface.h"
-grpc_error *grpc_set_tsi_error_bits(grpc_error *error, tsi_result result);
+grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result);
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_TSI_ERROR_H */
diff --git a/src/core/lib/security/util/json_util.h b/src/core/lib/security/util/json_util.h
index 5959626a5f..137900593f 100644
--- a/src/core/lib/security/util/json_util.h
+++ b/src/core/lib/security/util/json_util.h
@@ -52,4 +52,4 @@ const char *grpc_json_get_string_property(const grpc_json *json,
bool grpc_copy_json_string_property(const grpc_json *json,
const char *prop_name, char **copied_value);
-#endif // GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H
+#endif /* GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H */
diff --git a/src/core/lib/support/cpu_windows.c b/src/core/lib/support/cpu_windows.c
index ce32eb0a9d..34d006bfc8 100644
--- a/src/core/lib/support/cpu_windows.c
+++ b/src/core/lib/support/cpu_windows.c
@@ -33,7 +33,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include <grpc/support/log.h>
unsigned gpr_cpu_num_cores(void) {
@@ -44,4 +44,4 @@ unsigned gpr_cpu_num_cores(void) {
unsigned gpr_cpu_current_cpu(void) { return GetCurrentProcessorNumber(); }
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/support/env_win32.c b/src/core/lib/support/env_windows.c
index e670e1e8d0..9116959442 100644
--- a/src/core/lib/support/env_win32.c
+++ b/src/core/lib/support/env_windows.c
@@ -33,13 +33,13 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32_ENV
+#ifdef GPR_WINDOWS_ENV
#include <windows.h>
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
-#include "src/core/lib/support/string_win32.h"
+#include "src/core/lib/support/string_windows.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -76,4 +76,4 @@ void gpr_setenv(const char *name, const char *value) {
GPR_ASSERT(res);
}
-#endif /* GPR_WIN32_ENV */
+#endif /* GPR_WINDOWS_ENV */
diff --git a/src/core/lib/support/log.c b/src/core/lib/support/log.c
index 882abf673c..bae0957df7 100644
--- a/src/core/lib/support/log.c
+++ b/src/core/lib/support/log.c
@@ -96,4 +96,6 @@ void gpr_log_verbosity_init() {
}
}
-void gpr_set_log_function(gpr_log_func f) { g_log_func = f; }
+void gpr_set_log_function(gpr_log_func f) {
+ g_log_func = f ? f : gpr_default_log;
+}
diff --git a/src/core/lib/support/log_linux.c b/src/core/lib/support/log_linux.c
index ca04c022e3..508fae4eec 100644
--- a/src/core/lib/support/log_linux.c
+++ b/src/core/lib/support/log_linux.c
@@ -95,9 +95,9 @@ void gpr_default_log(gpr_log_func_args *args) {
strcpy(time_buffer, "error:strftime");
}
- gpr_asprintf(&prefix, "%s%s.%09d %7tu %s:%d]",
+ gpr_asprintf(&prefix, "%s%s.%09" PRId32 " %7ld %s:%d]",
gpr_log_severity_string(args->severity), time_buffer,
- (int)(now.tv_nsec), gettid(), display_file, args->line);
+ now.tv_nsec, gettid(), display_file, args->line);
fprintf(stderr, "%-60s %s\n", prefix, args->message);
gpr_free(prefix);
diff --git a/src/core/lib/support/log_win32.c b/src/core/lib/support/log_windows.c
index 29735bd18c..ea898c359d 100644
--- a/src/core/lib/support/log_win32.c
+++ b/src/core/lib/support/log_windows.c
@@ -33,19 +33,19 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32_LOG
+#ifdef GPR_WINDOWS_LOG
#include <stdarg.h>
#include <stdio.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/support/string.h"
-#include "src/core/lib/support/string_win32.h"
+#include "src/core/lib/support/string_windows.h"
void gpr_log(const char *file, int line, gpr_log_severity severity,
const char *format, ...) {
@@ -109,4 +109,4 @@ void gpr_default_log(gpr_log_func_args *args) {
fflush(stderr);
}
-#endif /* GPR_WIN32_LOG */
+#endif /* GPR_WINDOWS_LOG */
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index a2ab6c5f1f..30c1e67647 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -194,6 +194,16 @@ int int64_ttoa(int64_t value, char *string) {
return i;
}
+char *gpr_leftpad(const char *str, char flag, size_t length) {
+ const size_t str_length = strlen(str);
+ const size_t out_length = str_length > length ? str_length : length;
+ char *out = gpr_malloc(out_length + 1);
+ memset(out, flag, out_length - str_length);
+ memcpy(out + out_length - str_length, str, str_length);
+ out[out_length] = 0;
+ return out;
+}
+
char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
return gpr_strjoin_sep(strs, nstrs, "", final_length);
}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index ea58610914..2b6bb3eec6 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -83,6 +83,10 @@ int int64_ttoa(int64_t value, char *output);
/* Reverse a run of bytes */
void gpr_reverse_bytes(char *str, int len);
+/* Pad a string with flag characters. The given length specifies the minimum
+ field width. The input string is never truncated. */
+char *gpr_leftpad(const char *str, char flag, size_t length);
+
/* Join a set of strings, returning the resulting string.
Total combined length (excluding null terminator) is returned in total_length
if it is non-null. */
diff --git a/src/core/lib/support/string_util_win32.c b/src/core/lib/support/string_util_windows.c
index 0d7bcdb5aa..049c9a8c04 100644
--- a/src/core/lib/support/string_util_win32.c
+++ b/src/core/lib/support/string_util_windows.c
@@ -35,7 +35,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
/* Some platforms (namely msys) need wchar to be included BEFORE
anything else, especially strsafe.h. */
@@ -91,4 +91,4 @@ char *gpr_format_message(int messageid) {
return message;
}
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/support/string_win32.c b/src/core/lib/support/string_windows.c
index 6b92f79253..ecc2a3a4e5 100644
--- a/src/core/lib/support/string_win32.c
+++ b/src/core/lib/support/string_windows.c
@@ -35,7 +35,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32_STRING
+#ifdef GPR_WINDOWS_STRING
#include <stdarg.h>
#include <stdio.h>
@@ -80,4 +80,4 @@ int gpr_asprintf(char **strp, const char *format, ...) {
return -1;
}
-#endif /* GPR_WIN32_STRING */
+#endif /* GPR_WINDOWS_STRING */
diff --git a/src/core/lib/support/string_win32.h b/src/core/lib/support/string_windows.h
index ff4a694ca9..899563b72d 100644
--- a/src/core/lib/support/string_win32.h
+++ b/src/core/lib/support/string_windows.h
@@ -31,17 +31,17 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_STRING_WIN32_H
-#define GRPC_CORE_LIB_SUPPORT_STRING_WIN32_H
+#ifndef GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H
+#define GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
/* These allocate new strings using gpr_malloc to convert from and to utf-8. */
LPTSTR gpr_char_to_tchar(LPCSTR input);
LPSTR gpr_tchar_to_char(LPCTSTR input);
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
-#endif /* GRPC_CORE_LIB_SUPPORT_STRING_WIN32_H */
+#endif /* GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H */
diff --git a/src/core/lib/support/subprocess_windows.c b/src/core/lib/support/subprocess_windows.c
index 264306f1bd..dee8c44ac1 100644
--- a/src/core/lib/support/subprocess_windows.c
+++ b/src/core/lib/support/subprocess_windows.c
@@ -43,7 +43,7 @@
#include <grpc/support/log.h>
#include <grpc/support/subprocess.h>
#include "src/core/lib/support/string.h"
-#include "src/core/lib/support/string_win32.h"
+#include "src/core/lib/support/string_windows.h"
struct gpr_subprocess {
PROCESS_INFORMATION pi;
diff --git a/src/core/lib/support/sync_win32.c b/src/core/lib/support/sync_windows.c
index 470a9f9704..8f0e8ff69f 100644
--- a/src/core/lib/support/sync_win32.c
+++ b/src/core/lib/support/sync_windows.c
@@ -35,7 +35,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@@ -130,4 +130,4 @@ void gpr_once_init(gpr_once *once, void (*init_function)(void)) {
InitOnceExecuteOnce(once, run_once_func, &arg, &dummy);
}
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/support/thd_win32.c b/src/core/lib/support/thd_windows.c
index 6deb3140eb..74d2250df4 100644
--- a/src/core/lib/support/thd_win32.c
+++ b/src/core/lib/support/thd_windows.c
@@ -35,7 +35,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -114,4 +114,4 @@ void gpr_thd_join(gpr_thd_id t) {
destroy_thread(info);
}
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/core/lib/support/time_win32.c b/src/core/lib/support/time_windows.c
index 9e924ab3f4..6459732879 100644
--- a/src/core/lib/support/time_win32.c
+++ b/src/core/lib/support/time_windows.c
@@ -35,7 +35,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32_TIME
+#ifdef GPR_WINDOWS_TIME
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@@ -107,4 +107,4 @@ void gpr_sleep_until(gpr_timespec until) {
}
}
-#endif /* GPR_WIN32_TIME */
+#endif /* GPR_WINDOWS_TIME */
diff --git a/src/core/lib/support/tmpfile_msys.c b/src/core/lib/support/tmpfile_msys.c
index 2fdc89a64f..4f566c4c28 100644
--- a/src/core/lib/support/tmpfile_msys.c
+++ b/src/core/lib/support/tmpfile_msys.c
@@ -44,7 +44,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/lib/support/string_win32.h"
+#include "src/core/lib/support/string_windows.h"
#include "src/core/lib/support/tmpfile.h"
FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) {
diff --git a/src/core/lib/support/tmpfile_win32.c b/src/core/lib/support/tmpfile_windows.c
index 9ac73128c3..542f53e589 100644
--- a/src/core/lib/support/tmpfile_win32.c
+++ b/src/core/lib/support/tmpfile_windows.c
@@ -33,7 +33,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32_TMPFILE
+#ifdef GPR_WINDOWS_TMPFILE
#include <io.h>
#include <stdio.h>
@@ -44,7 +44,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/lib/support/string_win32.h"
+#include "src/core/lib/support/string_windows.h"
#include "src/core/lib/support/tmpfile.h"
FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) {
@@ -81,4 +81,4 @@ end:
return result;
}
-#endif /* GPR_WIN32_TMPFILE */
+#endif /* GPR_WINDOWS_TMPFILE */
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 3915ba57a2..04291b0ee0 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -40,6 +40,7 @@
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/slice.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
@@ -52,7 +53,9 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
+#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
+#include "src/core/lib/transport/transport.h"
/** The maximum number of concurrent batches possible.
Based upon the maximum number of individually queueable ops in the batch
@@ -65,12 +68,6 @@
- status/close recv (depending on client/server) */
#define MAX_CONCURRENT_BATCHES 6
-typedef struct {
- grpc_ioreq_completion_func on_complete;
- void *user_data;
- int success;
-} completed_request;
-
#define MAX_SEND_EXTRA_METADATA_COUNT 3
/* Status data for a request can come from several sources; this
@@ -97,25 +94,6 @@ typedef struct {
grpc_mdstr *details;
} received_status;
-/* How far through the GRPC stream have we read? */
-typedef enum {
- /* We are still waiting for initial metadata to complete */
- READ_STATE_INITIAL = 0,
- /* We have gotten initial metadata, and are reading either
- messages or trailing metadata */
- READ_STATE_GOT_INITIAL_METADATA,
- /* The stream is closed for reading */
- READ_STATE_READ_CLOSED,
- /* The stream is closed for reading & writing */
- READ_STATE_STREAM_CLOSED
-} read_state;
-
-typedef enum {
- WRITE_STATE_INITIAL = 0,
- WRITE_STATE_STARTED,
- WRITE_STATE_WRITE_CLOSED
-} write_state;
-
typedef struct batch_control {
grpc_call *call;
grpc_cq_completion cq_completion;
@@ -135,6 +113,7 @@ typedef struct batch_control {
struct grpc_call {
grpc_completion_queue *cq;
+ grpc_polling_entity pollent;
grpc_channel *channel;
grpc_call *parent;
grpc_call *first_child;
@@ -176,10 +155,10 @@ struct grpc_call {
received_status status[STATUS_SOURCE_COUNT];
/* Call stats: only valid after trailing metadata received */
- grpc_transport_stream_stats stats;
+ grpc_call_stats stats;
- /* Compression algorithm for the call */
- grpc_compression_algorithm compression_algorithm;
+ /* Compression algorithm for *incoming* data */
+ grpc_compression_algorithm incoming_compression_algorithm;
/* Supported encodings (compression algorithms), a bitset */
uint32_t encodings_accepted_by_peer;
@@ -238,18 +217,19 @@ static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
grpc_status_code status,
const char *description);
+static grpc_call_error close_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
+ const char *description);
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
grpc_error *error);
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
-grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask,
- grpc_completion_queue *cq,
- const void *server_transport_data,
- grpc_mdelem **add_initial_metadata,
- size_t add_initial_metadata_count,
- gpr_timespec send_deadline) {
+grpc_call *grpc_call_create(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
+ const void *server_transport_data, grpc_mdelem **add_initial_metadata,
+ size_t add_initial_metadata_count, gpr_timespec send_deadline) {
size_t i, j;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -286,9 +266,20 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
call->context, server_transport_data,
CALL_STACK_FROM_CALL(call));
if (cq != NULL) {
+ GPR_ASSERT(
+ pollset_set_alternative == NULL &&
+ "Only one of 'cq' and 'pollset_set_alternative' should be non-NULL.");
GRPC_CQ_INTERNAL_REF(cq, "bind");
- grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call),
- grpc_cq_pollset(cq));
+ call->pollent =
+ grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
+ }
+ if (pollset_set_alternative != NULL) {
+ call->pollent =
+ grpc_polling_entity_create_from_pollset_set(pollset_set_alternative);
+ }
+ if (!grpc_polling_entity_is_empty(&call->pollent)) {
+ grpc_call_stack_set_pollset_or_pollset_set(
+ &exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
if (parent_call != NULL) {
GRPC_CALL_INTERNAL_REF(parent_call, "child");
@@ -343,10 +334,16 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq) {
GPR_ASSERT(cq);
+
+ if (grpc_polling_entity_pollset_set(&call->pollent) != NULL) {
+ gpr_log(GPR_ERROR, "A pollset_set is already registered for this call.");
+ abort();
+ }
call->cq = cq;
GRPC_CQ_INTERNAL_REF(cq, "bind");
- grpc_call_stack_set_pollset(exec_ctx, CALL_STACK_FROM_CALL(call),
- grpc_cq_pollset(cq));
+ call->pollent = grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
+ grpc_call_stack_set_pollset_or_pollset_set(
+ exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
@@ -394,7 +391,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
grpc_channel *channel = c->channel;
- grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), c);
+ grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->stats, c);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
GPR_TIMER_END("destroy_call", 0);
}
@@ -409,21 +406,27 @@ static void set_status_code(grpc_call *call, status_source source,
/* TODO(ctiller): what to do about the flush that was previously here */
}
-static void set_compression_algorithm(grpc_call *call,
- grpc_compression_algorithm algo) {
+static void set_incoming_compression_algorithm(
+ grpc_call *call, grpc_compression_algorithm algo) {
GPR_ASSERT(algo < GRPC_COMPRESS_ALGORITHMS_COUNT);
- call->compression_algorithm = algo;
+ call->incoming_compression_algorithm = algo;
}
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
grpc_call *call) {
grpc_compression_algorithm algorithm;
gpr_mu_lock(&call->mu);
- algorithm = call->compression_algorithm;
+ algorithm = call->incoming_compression_algorithm;
gpr_mu_unlock(&call->mu);
return algorithm;
}
+static grpc_compression_algorithm compression_algorithm_for_level_locked(
+ grpc_call *call, grpc_compression_level level) {
+ return grpc_compression_algorithm_for_level(level,
+ call->encodings_accepted_by_peer);
+}
+
uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
uint32_t flags;
gpr_mu_lock(&call->mu);
@@ -549,15 +552,28 @@ static grpc_linked_mdelem *linked_from_md(grpc_metadata *md) {
return (grpc_linked_mdelem *)&md->internal_data;
}
+static grpc_metadata *get_md_elem(grpc_metadata *metadata,
+ grpc_metadata *additional_metadata, int i,
+ int count) {
+ grpc_metadata *res =
+ i < count ? &metadata[i] : &additional_metadata[i - count];
+ GPR_ASSERT(res);
+ return res;
+}
+
static int prepare_application_metadata(grpc_call *call, int count,
grpc_metadata *metadata,
int is_trailing,
- int prepend_extra_metadata) {
+ int prepend_extra_metadata,
+ grpc_metadata *additional_metadata,
+ int additional_metadata_count) {
+ int total_count = count + additional_metadata_count;
int i;
grpc_metadata_batch *batch =
&call->metadata_batch[0 /* is_receiving */][is_trailing];
- for (i = 0; i < count; i++) {
- grpc_metadata *md = &metadata[i];
+ for (i = 0; i < total_count; i++) {
+ const grpc_metadata *md =
+ get_md_elem(metadata, additional_metadata, i, count);
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
l->md = grpc_mdelem_from_string_and_buffer(
@@ -576,9 +592,10 @@ static int prepare_application_metadata(grpc_call *call, int count,
break;
}
}
- if (i != count) {
+ if (i != total_count) {
for (int j = 0; j <= i; j++) {
- grpc_metadata *md = &metadata[j];
+ const grpc_metadata *md =
+ get_md_elem(metadata, additional_metadata, j, count);
grpc_linked_mdelem *l = (grpc_linked_mdelem *)&md->internal_data;
GRPC_MDELEM_UNREF(l->md);
}
@@ -599,24 +616,36 @@ static int prepare_application_metadata(grpc_call *call, int count,
}
}
}
- for (i = 1; i < count; i++) {
- linked_from_md(&metadata[i])->prev = linked_from_md(&metadata[i - 1]);
+ for (i = 1; i < total_count; i++) {
+ grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count);
+ grpc_metadata *prev_md =
+ get_md_elem(metadata, additional_metadata, i - 1, count);
+ linked_from_md(md)->prev = linked_from_md(prev_md);
}
- for (i = 0; i < count - 1; i++) {
- linked_from_md(&metadata[i])->next = linked_from_md(&metadata[i + 1]);
+ for (i = 0; i < total_count - 1; i++) {
+ grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count);
+ grpc_metadata *next_md =
+ get_md_elem(metadata, additional_metadata, i + 1, count);
+ linked_from_md(md)->next = linked_from_md(next_md);
}
- switch (prepend_extra_metadata * 2 + (count != 0)) {
+
+ switch (prepend_extra_metadata * 2 + (total_count != 0)) {
case 0:
/* no prepend, no metadata => nothing to do */
batch->list.head = batch->list.tail = NULL;
break;
- case 1:
+ case 1: {
/* metadata, but no prepend */
- batch->list.head = linked_from_md(&metadata[0]);
- batch->list.tail = linked_from_md(&metadata[count - 1]);
+ grpc_metadata *first_md =
+ get_md_elem(metadata, additional_metadata, 0, count);
+ grpc_metadata *last_md =
+ get_md_elem(metadata, additional_metadata, total_count - 1, count);
+ batch->list.head = linked_from_md(first_md);
+ batch->list.tail = linked_from_md(last_md);
batch->list.head->prev = NULL;
batch->list.tail->next = NULL;
break;
+ }
case 2:
/* prepend, but no md */
batch->list.head = &call->send_extra_metadata[0];
@@ -625,17 +654,22 @@ static int prepare_application_metadata(grpc_call *call, int count,
batch->list.head->prev = NULL;
batch->list.tail->next = NULL;
break;
- case 3:
+ case 3: {
/* prepend AND md */
+ grpc_metadata *first_md =
+ get_md_elem(metadata, additional_metadata, 0, count);
+ grpc_metadata *last_md =
+ get_md_elem(metadata, additional_metadata, total_count - 1, count);
batch->list.head = &call->send_extra_metadata[0];
call->send_extra_metadata[call->send_extra_metadata_count - 1].next =
- linked_from_md(&metadata[0]);
- linked_from_md(&metadata[0])->prev =
+ linked_from_md(first_md);
+ linked_from_md(first_md)->prev =
&call->send_extra_metadata[call->send_extra_metadata_count - 1];
- batch->list.tail = linked_from_md(&metadata[count - 1]);
+ batch->list.tail = linked_from_md(last_md);
batch->list.head->prev = NULL;
batch->list.tail->next = NULL;
break;
+ }
default:
GPR_UNREACHABLE_CODE(return 0);
}
@@ -704,48 +738,103 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
return r;
}
-typedef struct cancel_closure {
+typedef struct termination_closure {
grpc_closure closure;
grpc_call *call;
grpc_status_code status;
-} cancel_closure;
+ gpr_slice optional_message;
+ grpc_closure *op_closure;
+ enum { TC_CANCEL, TC_CLOSE } type;
+} termination_closure;
+
+static void done_termination(grpc_exec_ctx *exec_ctx, void *tcp,
+ grpc_error *error) {
+ termination_closure *tc = tcp;
+ switch (tc->type) {
+ case TC_CANCEL:
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "cancel");
+ break;
+ case TC_CLOSE:
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, tc->call, "close");
+ break;
+ }
+ gpr_slice_unref(tc->optional_message);
+ grpc_exec_ctx_sched(exec_ctx, tc->op_closure, GRPC_ERROR_NONE, NULL);
+ gpr_free(tc);
+}
-static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, grpc_error *error) {
- cancel_closure *cc = ccp;
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, cc->call, "cancel");
- gpr_free(cc);
+static void send_cancel(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
+ grpc_transport_stream_op op;
+ termination_closure *tc = tcp;
+ memset(&op, 0, sizeof(op));
+ op.cancel_with_status = tc->status;
+ /* reuse closure to catch completion */
+ grpc_closure_init(&tc->closure, done_termination, tc);
+ op.on_complete = &tc->closure;
+ execute_op(exec_ctx, tc->call, &op);
}
-static void send_cancel(grpc_exec_ctx *exec_ctx, void *ccp, grpc_error *error) {
+static void send_close(grpc_exec_ctx *exec_ctx, void *tcp, grpc_error *error) {
grpc_transport_stream_op op;
- cancel_closure *cc = ccp;
+ termination_closure *tc = tcp;
memset(&op, 0, sizeof(op));
- op.cancel_with_status = cc->status;
+ tc->optional_message = gpr_slice_ref(tc->optional_message);
+ grpc_transport_stream_op_add_close(&op, tc->status, &tc->optional_message);
/* reuse closure to catch completion */
- grpc_closure_init(&cc->closure, done_cancel, cc);
- op.on_complete = &cc->closure;
- execute_op(exec_ctx, cc->call, &op);
+ grpc_closure_init(&tc->closure, done_termination, tc);
+ tc->op_closure = op.on_complete;
+ op.on_complete = &tc->closure;
+ execute_op(exec_ctx, tc->call, &op);
+}
+
+static grpc_call_error terminate_with_status(grpc_exec_ctx *exec_ctx,
+ termination_closure *tc) {
+ grpc_mdstr *details = NULL;
+ if (GPR_SLICE_LENGTH(tc->optional_message) > 0) {
+ tc->optional_message = gpr_slice_ref(tc->optional_message);
+ details = grpc_mdstr_from_slice(tc->optional_message);
+ }
+
+ set_status_code(tc->call, STATUS_FROM_API_OVERRIDE, (uint32_t)tc->status);
+ set_status_details(tc->call, STATUS_FROM_API_OVERRIDE, details);
+
+ if (tc->type == TC_CANCEL) {
+ grpc_closure_init(&tc->closure, send_cancel, tc);
+ GRPC_CALL_INTERNAL_REF(tc->call, "cancel");
+ } else if (tc->type == TC_CLOSE) {
+ grpc_closure_init(&tc->closure, send_close, tc);
+ GRPC_CALL_INTERNAL_REF(tc->call, "close");
+ }
+ grpc_exec_ctx_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE, NULL);
+ return GRPC_CALL_OK;
}
static grpc_call_error cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
grpc_status_code status,
const char *description) {
- grpc_mdstr *details =
- description ? grpc_mdstr_from_string(description) : NULL;
- cancel_closure *cc = gpr_malloc(sizeof(*cc));
-
+ termination_closure *tc = gpr_malloc(sizeof(*tc));
+ memset(tc, 0, sizeof(termination_closure));
+ tc->type = TC_CANCEL;
+ tc->call = c;
+ tc->optional_message = gpr_slice_from_copied_string(description);
GPR_ASSERT(status != GRPC_STATUS_OK);
+ tc->status = status;
- set_status_code(c, STATUS_FROM_API_OVERRIDE, (uint32_t)status);
- set_status_details(c, STATUS_FROM_API_OVERRIDE, details);
+ return terminate_with_status(exec_ctx, tc);
+}
- grpc_closure_init(&cc->closure, send_cancel, cc);
- cc->call = c;
- cc->status = status;
- GRPC_CALL_INTERNAL_REF(c, "cancel");
- grpc_exec_ctx_push(exec_ctx, &cc->closure, GRPC_ERROR_NONE, NULL);
+static grpc_call_error close_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+ grpc_status_code status,
+ const char *description) {
+ termination_closure *tc = gpr_malloc(sizeof(*tc));
+ memset(tc, 0, sizeof(termination_closure));
+ tc->type = TC_CLOSE;
+ tc->call = c;
+ tc->optional_message = gpr_slice_from_copied_string(description);
+ GPR_ASSERT(status != GRPC_STATUS_OK);
+ tc->status = status;
- return GRPC_CALL_OK;
+ return terminate_with_status(exec_ctx, tc);
}
static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
@@ -886,9 +975,9 @@ static grpc_mdelem *recv_initial_filter(void *callp, grpc_mdelem *elem) {
if (elem == NULL) {
return NULL;
} else if (elem->key == GRPC_MDSTR_GRPC_ENCODING) {
- GPR_TIMER_BEGIN("compression_algorithm", 0);
- set_compression_algorithm(call, decode_compression(elem));
- GPR_TIMER_END("compression_algorithm", 0);
+ GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
+ set_incoming_compression_algorithm(call, decode_compression(elem));
+ GPR_TIMER_END("incoming_compression_algorithm", 0);
return NULL;
} else if (elem->key == GRPC_MDSTR_GRPC_ACCEPT_ENCODING) {
GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
@@ -970,7 +1059,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
grpc_call *call = bctl->call;
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
- grpc_exec_ctx_push(exec_ctx, bctl->notify_tag, bctl->error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, bctl->notify_tag, bctl->error, NULL);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
@@ -1020,6 +1109,9 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
call->receiving_slice);
continue_receiving_slices(exec_ctx, bctl);
} else {
+ if (grpc_trace_operation_failures) {
+ GRPC_LOG_IF_ERROR("receiving_slice_ready", GRPC_ERROR_REF(error));
+ }
grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
call->receiving_stream = NULL;
grpc_byte_buffer_destroy(*call->receiving_buffer);
@@ -1053,9 +1145,9 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl,
} else {
call->test_only_last_message_flags = call->receiving_stream->flags;
if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
- (call->compression_algorithm > GRPC_COMPRESS_NONE)) {
+ (call->incoming_compression_algorithm > GRPC_COMPRESS_NONE)) {
*call->receiving_buffer = grpc_raw_compressed_byte_buffer_create(
- NULL, 0, call->compression_algorithm);
+ NULL, 0, call->incoming_compression_algorithm);
} else {
*call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
}
@@ -1083,6 +1175,56 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
}
}
+static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
+ batch_control *bctl) {
+ grpc_call *call = bctl->call;
+ /* validate call->incoming_compression_algorithm */
+ if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) {
+ const grpc_compression_algorithm algo =
+ call->incoming_compression_algorithm;
+ char *error_msg = NULL;
+ const grpc_compression_options compression_options =
+ grpc_channel_compression_options(call->channel);
+ /* check if algorithm is known */
+ if (algo >= GRPC_COMPRESS_ALGORITHMS_COUNT) {
+ gpr_asprintf(&error_msg, "Invalid compression algorithm value '%d'.",
+ algo);
+ gpr_log(GPR_ERROR, "%s", error_msg);
+ close_with_status(exec_ctx, call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ } else if (grpc_compression_options_is_algorithm_enabled(
+ &compression_options, algo) == 0) {
+ /* check if algorithm is supported by current channel config */
+ char *algo_name;
+ grpc_compression_algorithm_name(algo, &algo_name);
+ gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
+ algo_name);
+ gpr_log(GPR_ERROR, "%s", error_msg);
+ close_with_status(exec_ctx, call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
+ } else {
+ call->incoming_compression_algorithm = algo;
+ }
+ gpr_free(error_msg);
+ }
+
+ /* make sure the received grpc-encoding is amongst the ones listed in
+ * grpc-accept-encoding */
+ GPR_ASSERT(call->encodings_accepted_by_peer != 0);
+ if (!GPR_BITGET(call->encodings_accepted_by_peer,
+ call->incoming_compression_algorithm)) {
+ extern int grpc_compression_trace;
+ if (grpc_compression_trace) {
+ char *algo_name;
+ grpc_compression_algorithm_name(call->incoming_compression_algorithm,
+ &algo_name);
+ gpr_log(GPR_ERROR,
+ "Compression algorithm (grpc-encoding = '%s') not present in "
+ "the bitset of accepted encodings (grpc-accept-encodings: "
+ "'0x%x')",
+ algo_name, call->encodings_accepted_by_peer);
+ }
+ }
+}
+
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
batch_control *bctl = bctlp;
@@ -1097,24 +1239,10 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
grpc_metadata_batch_filter(md, recv_initial_filter, call);
- /* make sure the received grpc-encoding is amongst the ones listed in
- * grpc-accept-encoding */
-
- GPR_ASSERT(call->encodings_accepted_by_peer != 0);
- if (!GPR_BITGET(call->encodings_accepted_by_peer,
- call->compression_algorithm)) {
- extern int grpc_compression_trace;
- if (grpc_compression_trace) {
- char *algo_name;
- grpc_compression_algorithm_name(call->compression_algorithm,
- &algo_name);
- gpr_log(GPR_ERROR,
- "Compression algorithm (grpc-encoding = '%s') not present in "
- "the bitset of accepted encodings (grpc-accept-encodings: "
- "'0x%x')",
- algo_name, call->encodings_accepted_by_peer);
- }
- }
+ GPR_TIMER_BEGIN("validate_filtered_metadata", 0);
+ validate_filtered_metadata(exec_ctx, bctl);
+ GPR_TIMER_END("validate_filtered_metadata", 0);
+
if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
0 &&
!call->is_client) {
@@ -1129,7 +1257,7 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
grpc_closure *saved_rsr_closure = grpc_closure_create(
receiving_stream_ready, call->saved_receiving_stream_ready_bctlp);
call->saved_receiving_stream_ready_bctlp = NULL;
- grpc_exec_ctx_push(exec_ctx, saved_rsr_closure, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, saved_rsr_closure, error, NULL);
}
gpr_mu_unlock(&call->mu);
@@ -1264,7 +1392,40 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
- if (op->data.send_initial_metadata.count > INT_MAX) {
+ /* process compression level */
+ grpc_metadata compression_md;
+ memset(&compression_md, 0, sizeof(grpc_metadata));
+ size_t additional_metadata_count = 0;
+ grpc_compression_level effective_compression_level;
+ bool level_set = false;
+ if (op->data.send_initial_metadata.maybe_compression_level.is_set) {
+ effective_compression_level =
+ op->data.send_initial_metadata.maybe_compression_level.level;
+ level_set = true;
+ } else {
+ const grpc_compression_options copts =
+ grpc_channel_compression_options(call->channel);
+ level_set = copts.default_level.is_set;
+ if (level_set) {
+ effective_compression_level = copts.default_level.level;
+ }
+ }
+ if (level_set && !call->is_client) {
+ const grpc_compression_algorithm calgo =
+ compression_algorithm_for_level_locked(
+ call, effective_compression_level);
+ char *calgo_name;
+ grpc_compression_algorithm_name(calgo, &calgo_name);
+ // the following will be picked up by the compress filter and used as
+ // the call's compression algorithm.
+ compression_md.key = GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY;
+ compression_md.value = calgo_name;
+ compression_md.value_length = strlen(calgo_name);
+ additional_metadata_count++;
+ }
+
+ if (op->data.send_initial_metadata.count + additional_metadata_count >
+ INT_MAX) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1272,7 +1433,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->sent_initial_metadata = 1;
if (!prepare_application_metadata(
call, (int)op->data.send_initial_metadata.count,
- op->data.send_initial_metadata.metadata, 0, call->is_client)) {
+ op->data.send_initial_metadata.metadata, 0, call->is_client,
+ &compression_md, (int)additional_metadata_count)) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1360,7 +1522,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (!prepare_application_metadata(
call,
(int)op->data.send_status_from_server.trailing_metadata_count,
- op->data.send_status_from_server.trailing_metadata, 1, 1)) {
+ op->data.send_status_from_server.trailing_metadata, 1, 1, NULL,
+ 0)) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1432,7 +1595,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->recv_final_op = 1;
stream_op.recv_trailing_metadata =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
- stream_op.collect_stats = &call->stats;
+ stream_op.collect_stats = &call->stats.transport_stream_stats;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
/* Flag validation: currently allow no flags */
@@ -1454,7 +1617,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->recv_final_op = 1;
stream_op.recv_trailing_metadata =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
- stream_op.collect_stats = &call->stats;
+ stream_op.collect_stats = &call->stats.transport_stream_stats;
break;
}
}
@@ -1549,9 +1712,10 @@ uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; }
grpc_compression_algorithm grpc_call_compression_for_level(
grpc_call *call, grpc_compression_level level) {
gpr_mu_lock(&call->mu);
- const uint32_t accepted_encodings = call->encodings_accepted_by_peer;
+ grpc_compression_algorithm algo =
+ compression_algorithm_for_level_locked(call, level);
gpr_mu_unlock(&call->mu);
- return grpc_compression_algorithm_for_level(level, accepted_encodings);
+ return algo;
}
const char *grpc_call_error_to_string(grpc_call_error error) {
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 2725e060b8..b640345c21 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -53,6 +53,8 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask,
grpc_completion_queue *cq,
+ /* if not NULL, it'll be used in lieu of \a cq */
+ grpc_pollset_set *pollset_set_alternative,
const void *server_transport_data,
grpc_mdelem **add_initial_metadata,
size_t add_initial_metadata_count,
diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.c
index a6d1d5149f..31c074f15d 100644
--- a/src/core/lib/surface/call_log_batch.c
+++ b/src/core/lib/surface/call_log_batch.c
@@ -112,7 +112,7 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
size_t i;
for (i = 0; i < nops; i++) {
tmp = grpc_op_string(&ops[i]);
- gpr_log(file, line, severity, "ops[%d]: %s", i, tmp);
+ gpr_log(file, line, severity, "ops[%" PRIuPTR "]: %s", i, tmp);
gpr_free(tmp);
}
}
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 3c68dc0c4b..8936e7164f 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -36,16 +36,17 @@
#include <stdlib.h>
#include <string.h>
+#include <grpc/compression.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
-#include "src/core/lib/surface/init.h"
#include "src/core/lib/transport/static_metadata.h"
/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
@@ -64,10 +65,12 @@ typedef struct registered_call {
struct grpc_channel {
int is_client;
uint32_t max_message_length;
+ grpc_compression_options compression_options;
grpc_mdelem *default_authority;
gpr_mu registered_call_mu;
registered_call *registered_calls;
+
char *target;
};
@@ -112,6 +115,7 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
channel->registered_calls = NULL;
channel->max_message_length = DEFAULT_MAX_MESSAGE_LENGTH;
+ grpc_compression_options_init(&channel->compression_options);
if (args) {
for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_MESSAGE_LENGTH)) {
@@ -152,6 +156,27 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
":authority", args->args[i].value.string);
}
}
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
+ channel->compression_options.default_level.is_set = true;
+ GPR_ASSERT(args->args[i].value.integer >= 0 &&
+ args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT);
+ channel->compression_options.default_level.level =
+ (grpc_compression_level)args->args[i].value.integer;
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
+ channel->compression_options.default_algorithm.is_set = true;
+ GPR_ASSERT(args->args[i].value.integer >= 0 &&
+ args->args[i].value.integer <
+ GRPC_COMPRESS_ALGORITHMS_COUNT);
+ channel->compression_options.default_algorithm.algorithm =
+ (grpc_compression_algorithm)args->args[i].value.integer;
+ } else if (0 ==
+ strcmp(args->args[i].key,
+ GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
+ channel->compression_options.enabled_algorithms_bitset =
+ (uint32_t)args->args[i].value.integer |
+ 0x1; /* always support no compression */
}
}
grpc_channel_args_destroy(args);
@@ -167,12 +192,14 @@ char *grpc_channel_get_target(grpc_channel *channel) {
static grpc_call *grpc_channel_create_call_internal(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
- grpc_completion_queue *cq, grpc_mdelem *path_mdelem,
- grpc_mdelem *authority_mdelem, gpr_timespec deadline) {
+ grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
+ grpc_mdelem *path_mdelem, grpc_mdelem *authority_mdelem,
+ gpr_timespec deadline) {
grpc_mdelem *send_metadata[2];
size_t num_metadata = 0;
GPR_ASSERT(channel->is_client);
+ GPR_ASSERT(!(cq != NULL && pollset_set_alternative != NULL));
send_metadata[num_metadata++] = path_mdelem;
if (authority_mdelem != NULL) {
@@ -181,8 +208,9 @@ static grpc_call *grpc_channel_create_call_internal(
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
- return grpc_call_create(channel, parent_call, propagation_mask, cq, NULL,
- send_metadata, num_metadata, deadline);
+ return grpc_call_create(channel, parent_call, propagation_mask, cq,
+ pollset_set_alternative, NULL, send_metadata,
+ num_metadata, deadline);
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,
@@ -202,7 +230,22 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
(int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
- channel, parent_call, propagation_mask, cq,
+ channel, parent_call, propagation_mask, cq, NULL,
+ grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
+ grpc_mdstr_from_string(method)),
+ host ? grpc_mdelem_from_metadata_strings(GRPC_MDSTR_AUTHORITY,
+ grpc_mdstr_from_string(host))
+ : NULL,
+ deadline);
+}
+
+grpc_call *grpc_channel_create_pollset_set_call(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_pollset_set *pollset_set, const char *method, const char *host,
+ gpr_timespec deadline, void *reserved) {
+ GPR_ASSERT(!reserved);
+ return grpc_channel_create_call_internal(
+ channel, parent_call, propagation_mask, NULL, pollset_set,
grpc_mdelem_from_metadata_strings(GRPC_MDSTR_PATH,
grpc_mdstr_from_string(method)),
host ? grpc_mdelem_from_metadata_strings(GRPC_MDSTR_AUTHORITY,
@@ -246,7 +289,7 @@ grpc_call *grpc_channel_create_registered_call(
(int)deadline.tv_nsec, (int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
- channel, parent_call, propagation_mask, completion_queue,
+ channel, parent_call, propagation_mask, completion_queue, NULL,
GRPC_MDELEM_REF(rc->path),
rc->authority ? GRPC_MDELEM_REF(rc->authority) : NULL, deadline);
}
@@ -307,6 +350,11 @@ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {
return CHANNEL_STACK_FROM_CHANNEL(channel);
}
+grpc_compression_options grpc_channel_compression_options(
+ const grpc_channel *channel) {
+ return channel->compression_options;
+}
+
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
switch (i) {
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 22dae930e4..7eff7b8883 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -42,6 +42,11 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
grpc_channel_stack_type channel_stack_type,
grpc_transport *optional_transport);
+grpc_call *grpc_channel_create_pollset_set_call(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_pollset_set *pollset_set, const char *method, const char *host,
+ gpr_timespec deadline, void *reserved);
+
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
@@ -71,4 +76,8 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_channel_internal_unref(exec_ctx, channel)
#endif
+/** Return the channel's compression options. */
+grpc_compression_options grpc_channel_compression_options(
+ const grpc_channel *channel);
+
#endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index ecc127876b..2cc6aa74e0 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -50,6 +50,8 @@
#include "src/core/lib/surface/event_string.h"
#include "src/core/lib/surface/surface_trace.h"
+int grpc_trace_operation_failures;
+
typedef struct {
grpc_pollset_worker **worker;
void *tag;
@@ -231,12 +233,16 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
#endif
GPR_TIMER_BEGIN("grpc_cq_end_op", 0);
- if (grpc_api_trace) {
+ if (grpc_api_trace ||
+ (grpc_trace_operation_failures && error != GRPC_ERROR_NONE)) {
const char *errmsg = grpc_error_string(error);
GRPC_API_TRACE(
"grpc_cq_end_op(exec_ctx=%p, cc=%p, tag=%p, error=%s, done=%p, "
"done_arg=%p, storage=%p)",
7, (exec_ctx, cc, tag, errmsg, done, done_arg, storage));
+ if (grpc_trace_operation_failures) {
+ gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
+ }
grpc_error_free_string(errmsg);
}
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index a866858cb5..b9dd1092b6 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -39,6 +39,8 @@
#include <grpc/grpc.h>
#include "src/core/lib/iomgr/pollset.h"
+extern int grpc_trace_operation_failures;
+
typedef struct grpc_cq_completion {
/** user supplied tag */
void *tag;
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 1c8b709015..f07039cb94 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -165,6 +165,7 @@ void grpc_init(void) {
&grpc_trace_channel_stack_builder);
grpc_register_tracer("http1", &grpc_http1_trace);
grpc_register_tracer("compression", &grpc_compression_trace);
+ grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 5c15b676f8..5ea4cba5d1 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -92,17 +92,17 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
if (op->on_connectivity_state_change) {
- GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE);
- *op->connectivity_state = GRPC_CHANNEL_FATAL_FAILURE;
- grpc_exec_ctx_push(exec_ctx, op->on_connectivity_state_change,
- GRPC_ERROR_NONE, NULL);
+ GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
+ *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
+ grpc_exec_ctx_sched(exec_ctx, op->on_connectivity_state_change,
+ GRPC_ERROR_NONE, NULL);
}
if (op->on_consumed != NULL) {
- grpc_exec_ctx_push(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
}
if (op->send_ping != NULL) {
- grpc_exec_ctx_push(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("lame client channel"), NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE("lame client channel"), NULL);
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
}
@@ -111,6 +111,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ const grpc_call_stats *stats,
void *and_free_memory) {
gpr_free(and_free_memory);
}
@@ -130,7 +131,7 @@ const grpc_channel_filter grpc_lame_filter = {
lame_start_transport_op,
sizeof(call_data),
init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index f261321750..def6e5068b 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -344,8 +344,8 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_exec_ctx_push(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
+ NULL);
}
}
@@ -526,7 +526,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_exec_ctx_push(exec_ctx, &calld->kill_zombie_closure, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, error, NULL);
return;
}
@@ -571,8 +571,8 @@ static void finish_start_new_rpc(
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_exec_ctx_push(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
+ NULL);
return;
}
@@ -757,8 +757,8 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
}
- grpc_exec_ctx_push(exec_ctx, calld->on_done_recv_initial_metadata, error,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv_initial_metadata, error,
+ NULL);
}
static void server_mutate_op(grpc_call_element *elem,
@@ -794,8 +794,8 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
- grpc_exec_ctx_push(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_ERROR_NONE, NULL);
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
@@ -812,9 +812,9 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
- grpc_call *call =
- grpc_call_create(chand->channel, NULL, 0, NULL, transport_server_data,
- NULL, 0, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call *call = grpc_call_create(chand->channel, NULL, 0, NULL, NULL,
+ transport_server_data, NULL, 0,
+ gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
call_data *calld = elem->call_data;
@@ -831,7 +831,7 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
channel_data *chand = cd;
grpc_server *server = chand->server;
- if (chand->connectivity_state != GRPC_CHANNEL_FATAL_FAILURE) {
+ if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_transport_op op;
memset(&op, 0, sizeof(op));
op.on_connectivity_state_change = &chand->channel_connectivity_changed,
@@ -864,7 +864,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- void *ignored) {
+ const grpc_call_stats *stats, void *ignored) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
@@ -929,7 +929,7 @@ const grpc_channel_filter grpc_server_top_filter = {
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
- grpc_call_stack_ignore_set_pollset,
+ grpc_call_stack_ignore_set_pollset_or_pollset_set,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
@@ -1339,8 +1339,8 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
grpc_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
- grpc_exec_ctx_push(exec_ctx, &calld->kill_zombie_closure,
- GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
+ GRPC_ERROR_NONE, NULL);
} else {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 3286af9fb2..054f112127 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -51,8 +51,8 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
return "READY";
case GRPC_CHANNEL_TRANSIENT_FAILURE:
return "TRANSIENT_FAILURE";
- case GRPC_CHANNEL_FATAL_FAILURE:
- return "FATAL_FAILURE";
+ case GRPC_CHANNEL_SHUTDOWN:
+ return "SHUTDOWN";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
@@ -73,13 +73,13 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
while ((w = tracker->watchers)) {
tracker->watchers = w->next;
- if (GRPC_CHANNEL_FATAL_FAILURE != *w->current) {
- *w->current = GRPC_CHANNEL_FATAL_FAILURE;
+ if (GRPC_CHANNEL_SHUTDOWN != *w->current) {
+ *w->current = GRPC_CHANNEL_SHUTDOWN;
error = GRPC_ERROR_NONE;
} else {
error = GRPC_ERROR_CREATE("Shutdown connectivity owner");
}
- grpc_exec_ctx_push(exec_ctx, w->notify, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, w->notify, error, NULL);
gpr_free(w);
}
GRPC_ERROR_UNREF(tracker->current_error);
@@ -114,7 +114,7 @@ int grpc_connectivity_state_notify_on_state_change(
if (current == NULL) {
grpc_connectivity_state_watcher *w = tracker->watchers;
if (w != NULL && w->notify == notify) {
- grpc_exec_ctx_push(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
tracker->watchers = w->next;
gpr_free(w);
return 0;
@@ -122,7 +122,7 @@ int grpc_connectivity_state_notify_on_state_change(
while (w != NULL) {
grpc_connectivity_state_watcher *rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) {
- grpc_exec_ctx_push(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
w->next = w->next->next;
gpr_free(rm_candidate);
return 0;
@@ -133,8 +133,8 @@ int grpc_connectivity_state_notify_on_state_change(
} else {
if (tracker->current_state != *current) {
*current = tracker->current_state;
- grpc_exec_ctx_push(exec_ctx, notify,
- GRPC_ERROR_REF(tracker->current_error), NULL);
+ grpc_exec_ctx_sched(exec_ctx, notify,
+ GRPC_ERROR_REF(tracker->current_error), NULL);
} else {
grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
w->current = current;
@@ -164,7 +164,7 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
case GRPC_CHANNEL_READY:
GPR_ASSERT(error == GRPC_ERROR_NONE);
break;
- case GRPC_CHANNEL_FATAL_FAILURE:
+ case GRPC_CHANNEL_SHUTDOWN:
case GRPC_CHANNEL_TRANSIENT_FAILURE:
GPR_ASSERT(error != GRPC_ERROR_NONE);
break;
@@ -174,13 +174,13 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
if (tracker->current_state == state) {
return;
}
- GPR_ASSERT(tracker->current_state != GRPC_CHANNEL_FATAL_FAILURE);
+ GPR_ASSERT(tracker->current_state != GRPC_CHANNEL_SHUTDOWN);
tracker->current_state = state;
while ((w = tracker->watchers) != NULL) {
*w->current = tracker->current_state;
tracker->watchers = w->next;
- grpc_exec_ctx_push(exec_ctx, w->notify,
- GRPC_ERROR_REF(tracker->current_error), NULL);
+ grpc_exec_ctx_sched(exec_ctx, w->notify,
+ GRPC_ERROR_REF(tracker->current_error), NULL);
gpr_free(w);
}
}
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.c
index 82c8e239f6..0677f29766 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.c
@@ -129,7 +129,10 @@ typedef struct mdtab_shard {
internal_metadata **elems;
size_t count;
size_t capacity;
- size_t free;
+ /** Estimate of the number of unreferenced mdelems in the hash table.
+ This will eventually converge to the exact number, but it's instantaneous
+ accuracy is not guaranteed */
+ gpr_atm free_estimate;
} mdtab_shard;
#define LOG2_STRTAB_SHARD_COUNT 5
@@ -217,7 +220,7 @@ void grpc_mdctx_global_init(void) {
mdtab_shard *shard = &g_mdtab_shard[i];
gpr_mu_init(&shard->mu);
shard->count = 0;
- shard->free = 0;
+ gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_MDTAB_CAPACITY;
shard->elems = gpr_malloc(sizeof(*shard->elems) * shard->capacity);
memset(shard->elems, 0, sizeof(*shard->elems) * shard->capacity);
@@ -232,7 +235,7 @@ void grpc_mdctx_global_shutdown(void) {
gc_mdtab(shard);
/* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
if (shard->count != 0) {
- gpr_log(GPR_DEBUG, "WARNING: %d metadata elements were leaked",
+ gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata elements were leaked",
shard->count);
if (grpc_iomgr_abort_on_leaks()) {
abort();
@@ -245,7 +248,7 @@ void grpc_mdctx_global_shutdown(void) {
gpr_mu_destroy(&shard->mu);
/* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
if (shard->count != 0) {
- gpr_log(GPR_DEBUG, "WARNING: %d metadata strings were leaked",
+ gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata strings were leaked",
shard->count);
for (size_t j = 0; j < shard->capacity; j++) {
for (internal_string *s = shard->strs[j]; s; s = s->bucket_next) {
@@ -281,10 +284,8 @@ static void ref_md_locked(mdtab_shard *shard,
grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
#endif
- if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 2)) {
- shard->free--;
- } else {
- GPR_ASSERT(1 != gpr_atm_no_barrier_fetch_add(&md->refcnt, -1));
+ if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
+ gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
}
}
@@ -447,6 +448,7 @@ static void gc_mdtab(mdtab_shard *shard) {
size_t i;
internal_metadata **prev_next;
internal_metadata *md, *next;
+ gpr_atm num_freed = 0;
GPR_TIMER_BEGIN("gc_mdtab", 0);
for (i = 0; i < shard->capacity; i++) {
@@ -463,13 +465,14 @@ static void gc_mdtab(mdtab_shard *shard) {
}
gpr_free(md);
*prev_next = next;
- shard->free--;
+ num_freed++;
shard->count--;
} else {
prev_next = &md->bucket_next;
}
}
}
+ gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -num_freed);
GPR_TIMER_END("gc_mdtab", 0);
}
@@ -504,7 +507,8 @@ static void grow_mdtab(mdtab_shard *shard) {
}
static void rehash_mdtab(mdtab_shard *shard) {
- if (shard->free > shard->capacity / 4) {
+ if (gpr_atm_no_barrier_load(&shard->free_estimate) >
+ (gpr_atm)(shard->capacity / 4)) {
gc_mdtab(shard);
} else {
grow_mdtab(shard);
@@ -553,7 +557,7 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *mkey,
/* not found: create a new pair */
md = gpr_malloc(sizeof(internal_metadata));
- gpr_atm_rel_store(&md->refcnt, 2);
+ gpr_atm_rel_store(&md->refcnt, 1);
md->key = key;
md->value = value;
md->user_data = 0;
@@ -645,7 +649,7 @@ grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
this function - meaning that no adjustment to mdtab_free is necessary,
simplifying the logic here to be just an atomic increment */
/* use C assert to have this removed in opt builds */
- assert(gpr_atm_no_barrier_load(&md->refcnt) >= 2);
+ assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
return gmd;
}
@@ -662,18 +666,13 @@ void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
#endif
- if (2 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
- uint32_t hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
+ uint32_t hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
+ if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
+ /* once the refcount hits zero, some other thread can come along and
+ free md at any time: it's unsafe from this point on to access it */
mdtab_shard *shard =
&g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
- GPR_TIMER_BEGIN("grpc_mdelem_unref.to_zero", 0);
- gpr_mu_lock(&shard->mu);
- if (1 == gpr_atm_no_barrier_load(&md->refcnt)) {
- shard->free++;
- gpr_atm_no_barrier_store(&md->refcnt, 0);
- }
- gpr_mu_unlock(&shard->mu);
- GPR_TIMER_END("grpc_mdelem_unref.to_zero", 0);
+ gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
}
}
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 73b0041fd4..c5f16e530d 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -48,7 +48,7 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4, 8, 6, 2, 4, 8, 6, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] =
{11, 35, 10, 35, 12, 35, 12, 49, 13, 35, 14, 35, 15, 35, 16, 35, 17, 35,
@@ -56,10 +56,10 @@ const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2] =
30, 18, 30, 35, 31, 35, 32, 35, 36, 35, 37, 35, 38, 35, 39, 35, 42, 33,
42, 34, 42, 48, 42, 53, 42, 54, 42, 55, 42, 56, 43, 33, 43, 48, 43, 53,
46, 0, 46, 1, 46, 2, 50, 35, 57, 35, 58, 35, 59, 35, 60, 35, 61, 35,
- 62, 35, 63, 35, 64, 35, 65, 35, 66, 40, 66, 68, 66, 71, 67, 79, 67, 80,
- 69, 35, 70, 35, 72, 35, 73, 35, 74, 35, 75, 35, 76, 41, 76, 51, 76, 52,
- 77, 35, 78, 35, 81, 3, 81, 4, 81, 5, 81, 6, 81, 7, 81, 8, 81, 9,
- 82, 35, 83, 84, 85, 35, 86, 35, 87, 35, 88, 35, 89, 35};
+ 62, 35, 63, 35, 64, 35, 65, 35, 66, 35, 67, 40, 67, 69, 67, 72, 68, 80,
+ 68, 81, 70, 35, 71, 35, 73, 35, 74, 35, 75, 35, 76, 35, 77, 41, 77, 51,
+ 77, 52, 78, 35, 79, 35, 82, 3, 82, 4, 82, 5, 82, 6, 82, 7, 82, 8,
+ 82, 9, 83, 35, 84, 85, 86, 35, 87, 35, 88, 35, 89, 35, 90, 35};
const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"0",
@@ -126,6 +126,7 @@ const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"if-unmodified-since",
"last-modified",
"link",
+ "load-reporting",
"location",
"max-forwards",
":method",
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index f9d8bcdc8f..5ff0d2f3bc 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -44,7 +44,7 @@
#include "src/core/lib/transport/metadata.h"
-#define GRPC_STATIC_MDSTR_COUNT 90
+#define GRPC_STATIC_MDSTR_COUNT 91
extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
/* "0" */
#define GRPC_MDSTR_0 (&grpc_static_mdstr_table[0])
@@ -175,60 +175,62 @@ extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[62])
/* "link" */
#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[63])
+/* "load-reporting" */
+#define GRPC_MDSTR_LOAD_REPORTING (&grpc_static_mdstr_table[64])
/* "location" */
-#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[64])
+#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[65])
/* "max-forwards" */
-#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[65])
+#define GRPC_MDSTR_MAX_FORWARDS (&grpc_static_mdstr_table[66])
/* ":method" */
-#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[66])
+#define GRPC_MDSTR_METHOD (&grpc_static_mdstr_table[67])
/* ":path" */
-#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[67])
+#define GRPC_MDSTR_PATH (&grpc_static_mdstr_table[68])
/* "POST" */
-#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[68])
+#define GRPC_MDSTR_POST (&grpc_static_mdstr_table[69])
/* "proxy-authenticate" */
-#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[69])
+#define GRPC_MDSTR_PROXY_AUTHENTICATE (&grpc_static_mdstr_table[70])
/* "proxy-authorization" */
-#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[70])
+#define GRPC_MDSTR_PROXY_AUTHORIZATION (&grpc_static_mdstr_table[71])
/* "PUT" */
-#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[71])
+#define GRPC_MDSTR_PUT (&grpc_static_mdstr_table[72])
/* "range" */
-#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[72])
+#define GRPC_MDSTR_RANGE (&grpc_static_mdstr_table[73])
/* "referer" */
-#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[73])
+#define GRPC_MDSTR_REFERER (&grpc_static_mdstr_table[74])
/* "refresh" */
-#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[74])
+#define GRPC_MDSTR_REFRESH (&grpc_static_mdstr_table[75])
/* "retry-after" */
-#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[75])
+#define GRPC_MDSTR_RETRY_AFTER (&grpc_static_mdstr_table[76])
/* ":scheme" */
-#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[76])
+#define GRPC_MDSTR_SCHEME (&grpc_static_mdstr_table[77])
/* "server" */
-#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[77])
+#define GRPC_MDSTR_SERVER (&grpc_static_mdstr_table[78])
/* "set-cookie" */
-#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[78])
+#define GRPC_MDSTR_SET_COOKIE (&grpc_static_mdstr_table[79])
/* "/" */
-#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[79])
+#define GRPC_MDSTR_SLASH (&grpc_static_mdstr_table[80])
/* "/index.html" */
-#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[80])
+#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (&grpc_static_mdstr_table[81])
/* ":status" */
-#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[81])
+#define GRPC_MDSTR_STATUS (&grpc_static_mdstr_table[82])
/* "strict-transport-security" */
-#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[82])
+#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (&grpc_static_mdstr_table[83])
/* "te" */
-#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[83])
+#define GRPC_MDSTR_TE (&grpc_static_mdstr_table[84])
/* "trailers" */
-#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[84])
+#define GRPC_MDSTR_TRAILERS (&grpc_static_mdstr_table[85])
/* "transfer-encoding" */
-#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[85])
+#define GRPC_MDSTR_TRANSFER_ENCODING (&grpc_static_mdstr_table[86])
/* "user-agent" */
-#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[86])
+#define GRPC_MDSTR_USER_AGENT (&grpc_static_mdstr_table[87])
/* "vary" */
-#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[87])
+#define GRPC_MDSTR_VARY (&grpc_static_mdstr_table[88])
/* "via" */
-#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[88])
+#define GRPC_MDSTR_VIA (&grpc_static_mdstr_table[89])
/* "www-authenticate" */
-#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[89])
+#define GRPC_MDSTR_WWW_AUTHENTICATE (&grpc_static_mdstr_table[90])
-#define GRPC_STATIC_MDELEM_COUNT 79
+#define GRPC_STATIC_MDELEM_COUNT 80
extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
/* "accept-charset": "" */
@@ -333,71 +335,73 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
#define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45])
/* "link": "" */
#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[46])
+/* "load-reporting": "" */
+#define GRPC_MDELEM_LOAD_REPORTING_EMPTY (&grpc_static_mdelem_table[47])
/* "location": "" */
-#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[47])
+#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[48])
/* "max-forwards": "" */
-#define GRPC_MDELEM_MAX_FORWARDS_EMPTY (&grpc_static_mdelem_table[48])
+#define GRPC_MDELEM_MAX_FORWARDS_EMPTY (&grpc_static_mdelem_table[49])
/* ":method": "GET" */
-#define GRPC_MDELEM_METHOD_GET (&grpc_static_mdelem_table[49])
+#define GRPC_MDELEM_METHOD_GET (&grpc_static_mdelem_table[50])
/* ":method": "POST" */
-#define GRPC_MDELEM_METHOD_POST (&grpc_static_mdelem_table[50])
+#define GRPC_MDELEM_METHOD_POST (&grpc_static_mdelem_table[51])
/* ":method": "PUT" */
-#define GRPC_MDELEM_METHOD_PUT (&grpc_static_mdelem_table[51])
+#define GRPC_MDELEM_METHOD_PUT (&grpc_static_mdelem_table[52])
/* ":path": "/" */
-#define GRPC_MDELEM_PATH_SLASH (&grpc_static_mdelem_table[52])
+#define GRPC_MDELEM_PATH_SLASH (&grpc_static_mdelem_table[53])
/* ":path": "/index.html" */
-#define GRPC_MDELEM_PATH_SLASH_INDEX_DOT_HTML (&grpc_static_mdelem_table[53])
+#define GRPC_MDELEM_PATH_SLASH_INDEX_DOT_HTML (&grpc_static_mdelem_table[54])
/* "proxy-authenticate": "" */
-#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[54])
+#define GRPC_MDELEM_PROXY_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[55])
/* "proxy-authorization": "" */
-#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[55])
+#define GRPC_MDELEM_PROXY_AUTHORIZATION_EMPTY (&grpc_static_mdelem_table[56])
/* "range": "" */
-#define GRPC_MDELEM_RANGE_EMPTY (&grpc_static_mdelem_table[56])
+#define GRPC_MDELEM_RANGE_EMPTY (&grpc_static_mdelem_table[57])
/* "referer": "" */
-#define GRPC_MDELEM_REFERER_EMPTY (&grpc_static_mdelem_table[57])
+#define GRPC_MDELEM_REFERER_EMPTY (&grpc_static_mdelem_table[58])
/* "refresh": "" */
-#define GRPC_MDELEM_REFRESH_EMPTY (&grpc_static_mdelem_table[58])
+#define GRPC_MDELEM_REFRESH_EMPTY (&grpc_static_mdelem_table[59])
/* "retry-after": "" */
-#define GRPC_MDELEM_RETRY_AFTER_EMPTY (&grpc_static_mdelem_table[59])
+#define GRPC_MDELEM_RETRY_AFTER_EMPTY (&grpc_static_mdelem_table[60])
/* ":scheme": "grpc" */
-#define GRPC_MDELEM_SCHEME_GRPC (&grpc_static_mdelem_table[60])
+#define GRPC_MDELEM_SCHEME_GRPC (&grpc_static_mdelem_table[61])
/* ":scheme": "http" */
-#define GRPC_MDELEM_SCHEME_HTTP (&grpc_static_mdelem_table[61])
+#define GRPC_MDELEM_SCHEME_HTTP (&grpc_static_mdelem_table[62])
/* ":scheme": "https" */
-#define GRPC_MDELEM_SCHEME_HTTPS (&grpc_static_mdelem_table[62])
+#define GRPC_MDELEM_SCHEME_HTTPS (&grpc_static_mdelem_table[63])
/* "server": "" */
-#define GRPC_MDELEM_SERVER_EMPTY (&grpc_static_mdelem_table[63])
+#define GRPC_MDELEM_SERVER_EMPTY (&grpc_static_mdelem_table[64])
/* "set-cookie": "" */
-#define GRPC_MDELEM_SET_COOKIE_EMPTY (&grpc_static_mdelem_table[64])
+#define GRPC_MDELEM_SET_COOKIE_EMPTY (&grpc_static_mdelem_table[65])
/* ":status": "200" */
-#define GRPC_MDELEM_STATUS_200 (&grpc_static_mdelem_table[65])
+#define GRPC_MDELEM_STATUS_200 (&grpc_static_mdelem_table[66])
/* ":status": "204" */
-#define GRPC_MDELEM_STATUS_204 (&grpc_static_mdelem_table[66])
+#define GRPC_MDELEM_STATUS_204 (&grpc_static_mdelem_table[67])
/* ":status": "206" */
-#define GRPC_MDELEM_STATUS_206 (&grpc_static_mdelem_table[67])
+#define GRPC_MDELEM_STATUS_206 (&grpc_static_mdelem_table[68])
/* ":status": "304" */
-#define GRPC_MDELEM_STATUS_304 (&grpc_static_mdelem_table[68])
+#define GRPC_MDELEM_STATUS_304 (&grpc_static_mdelem_table[69])
/* ":status": "400" */
-#define GRPC_MDELEM_STATUS_400 (&grpc_static_mdelem_table[69])
+#define GRPC_MDELEM_STATUS_400 (&grpc_static_mdelem_table[70])
/* ":status": "404" */
-#define GRPC_MDELEM_STATUS_404 (&grpc_static_mdelem_table[70])
+#define GRPC_MDELEM_STATUS_404 (&grpc_static_mdelem_table[71])
/* ":status": "500" */
-#define GRPC_MDELEM_STATUS_500 (&grpc_static_mdelem_table[71])
+#define GRPC_MDELEM_STATUS_500 (&grpc_static_mdelem_table[72])
/* "strict-transport-security": "" */
#define GRPC_MDELEM_STRICT_TRANSPORT_SECURITY_EMPTY \
- (&grpc_static_mdelem_table[72])
+ (&grpc_static_mdelem_table[73])
/* "te": "trailers" */
-#define GRPC_MDELEM_TE_TRAILERS (&grpc_static_mdelem_table[73])
+#define GRPC_MDELEM_TE_TRAILERS (&grpc_static_mdelem_table[74])
/* "transfer-encoding": "" */
-#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY (&grpc_static_mdelem_table[74])
+#define GRPC_MDELEM_TRANSFER_ENCODING_EMPTY (&grpc_static_mdelem_table[75])
/* "user-agent": "" */
-#define GRPC_MDELEM_USER_AGENT_EMPTY (&grpc_static_mdelem_table[75])
+#define GRPC_MDELEM_USER_AGENT_EMPTY (&grpc_static_mdelem_table[76])
/* "vary": "" */
-#define GRPC_MDELEM_VARY_EMPTY (&grpc_static_mdelem_table[76])
+#define GRPC_MDELEM_VARY_EMPTY (&grpc_static_mdelem_table[77])
/* "via": "" */
-#define GRPC_MDELEM_VIA_EMPTY (&grpc_static_mdelem_table[77])
+#define GRPC_MDELEM_VIA_EMPTY (&grpc_static_mdelem_table[78])
/* "www-authenticate": "" */
-#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[78])
+#define GRPC_MDELEM_WWW_AUTHENTICATE_EMPTY (&grpc_static_mdelem_table[79])
extern const uint8_t
grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT * 2];
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index fdf0f4b2aa..1105494a85 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -60,7 +60,7 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
#endif
if (gpr_unref(&refcount->refs)) {
- grpc_exec_ctx_push(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE, NULL);
+ grpc_exec_ctx_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE, NULL);
}
}
@@ -125,10 +125,19 @@ void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
transport->vtable->perform_op(exec_ctx, transport, op);
}
-void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport, grpc_stream *stream,
- grpc_pollset *pollset) {
- transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
+void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
+ grpc_stream *stream,
+ grpc_polling_entity *pollent) {
+ grpc_pollset *pollset;
+ grpc_pollset_set *pollset_set;
+ if ((pollset = grpc_polling_entity_pollset(pollent)) != NULL) {
+ transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
+ } else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) != NULL) {
+ transport->vtable->set_pollset_set(exec_ctx, transport, stream,
+ pollset_set);
+ } else {
+ abort();
+ }
}
void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
@@ -146,11 +155,11 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op,
grpc_error *error) {
- grpc_exec_ctx_push(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error),
- NULL);
- grpc_exec_ctx_push(exec_ctx, op->recv_initial_metadata_ready,
- GRPC_ERROR_REF(error), NULL);
- grpc_exec_ctx_push(exec_ctx, op->on_complete, error, NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error),
+ NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->recv_initial_metadata_ready,
+ GRPC_ERROR_REF(error), NULL);
+ grpc_exec_ctx_sched(exec_ctx, op->on_complete, error, NULL);
}
void grpc_transport_stream_op_add_cancellation(grpc_transport_stream_op *op,
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index f2d750e870..a46ccb643c 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -37,6 +37,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
+#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/transport/byte_stream.h"
@@ -197,9 +198,8 @@ int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount,
const void *server_data);
-void grpc_transport_set_pollset(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport, grpc_stream *stream,
- grpc_pollset *pollset);
+void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
+ grpc_stream *stream, grpc_polling_entity *pollent);
/* Destroy transport data for a stream.
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index 956155eec8..fc7140671b 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -53,6 +53,10 @@ typedef struct grpc_transport_vtable {
void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
grpc_stream *stream, grpc_pollset *pollset);
+ /* implementation of grpc_transport_set_pollset */
+ void (*set_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
+ grpc_stream *stream, grpc_pollset_set *pollset_set);
+
/* implementation of grpc_transport_perform_stream_op */
void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
grpc_stream *stream, grpc_transport_stream_op *op);
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
new file mode 100644
index 0000000000..d0b5f5c702
--- /dev/null
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/grpc.h>
+
+extern void grpc_chttp2_plugin_init(void);
+extern void grpc_chttp2_plugin_shutdown(void);
+extern void grpc_client_config_init(void);
+extern void grpc_client_config_shutdown(void);
+
+void grpc_register_built_in_plugins(void) {
+ grpc_register_plugin(grpc_chttp2_plugin_init,
+ grpc_chttp2_plugin_shutdown);
+ grpc_register_plugin(grpc_client_config_init,
+ grpc_client_config_shutdown);
+}
diff --git a/src/core/plugin_registry/grpc_plugin_registry.c b/src/core/plugin_registry/grpc_plugin_registry.c
index 822aa6d8b7..905cd59e23 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_plugin_registry.c
@@ -45,6 +45,8 @@ extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
extern void grpc_resolver_sockaddr_shutdown(void);
+extern void grpc_load_reporting_plugin_init(void);
+extern void grpc_load_reporting_plugin_shutdown(void);
extern void census_grpc_plugin_init(void);
extern void census_grpc_plugin_shutdown(void);
@@ -61,6 +63,8 @@ void grpc_register_built_in_plugins(void) {
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
grpc_resolver_sockaddr_shutdown);
+ grpc_register_plugin(grpc_load_reporting_plugin_init,
+ grpc_load_reporting_plugin_shutdown);
grpc_register_plugin(census_grpc_plugin_init,
census_grpc_plugin_shutdown);
}
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
index a6108ae7a9..7995078725 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
@@ -41,6 +41,8 @@ extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
extern void grpc_resolver_sockaddr_shutdown(void);
+extern void grpc_load_reporting_plugin_init(void);
+extern void grpc_load_reporting_plugin_shutdown(void);
extern void grpc_lb_policy_pick_first_init(void);
extern void grpc_lb_policy_pick_first_shutdown(void);
extern void grpc_lb_policy_round_robin_init(void);
@@ -57,6 +59,8 @@ void grpc_register_built_in_plugins(void) {
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
grpc_resolver_sockaddr_shutdown);
+ grpc_register_plugin(grpc_load_reporting_plugin_init,
+ grpc_load_reporting_plugin_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,
diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc
index 32c7794ade..0ba77a5057 100644
--- a/src/cpp/client/client_context.cc
+++ b/src/cpp/client/client_context.cc
@@ -33,15 +33,14 @@
#include <grpc++/client_context.h>
-#include <grpc++/security/credentials.h>
-#include <grpc++/server_context.h>
-#include <grpc++/support/time.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
-#include "src/core/lib/channel/compress_filter.h"
+#include <grpc++/security/credentials.h>
+#include <grpc++/server_context.h>
+#include <grpc++/support/time.h>
namespace grpc {
@@ -64,6 +63,7 @@ ClientContext::ClientContext()
call_(nullptr),
call_canceled_(false),
deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)),
+ census_context_(nullptr),
propagate_from_call_(nullptr) {
g_client_callbacks->DefaultConstructor(this);
}
@@ -112,7 +112,7 @@ void ClientContext::set_compression_algorithm(
abort();
}
GPR_ASSERT(algorithm_name != nullptr);
- AddMetadata(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, algorithm_name);
+ AddMetadata(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, algorithm_name);
}
void ClientContext::TryCancel() {
diff --git a/src/cpp/client/create_channel_posix.cc b/src/cpp/client/create_channel_posix.cc
new file mode 100644
index 0000000000..60cfed3d62
--- /dev/null
+++ b/src/cpp/client/create_channel_posix.cc
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/channel.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/impl/grpc_library.h>
+#include <grpc/grpc.h>
+#include <grpc/grpc_posix.h>
+
+#include "src/cpp/client/create_channel_internal.h"
+
+namespace grpc {
+
+#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
+
+std::shared_ptr<Channel> CreateInsecureChannelFromFd(const grpc::string& target,
+ int fd) {
+ internal::GrpcLibrary init_lib;
+ init_lib.init();
+ return CreateChannelInternal(
+ "", grpc_insecure_channel_create_from_fd(target.c_str(), fd, nullptr));
+}
+
+#endif // GPR_SUPPORT_CHANNELS_FROM_FD
+
+} // namespace grpc
diff --git a/src/cpp/codegen/codegen_init.cc b/src/cpp/codegen/codegen_init.cc
index c5d22124b7..103a7c1258 100644
--- a/src/cpp/codegen/codegen_init.cc
+++ b/src/cpp/codegen/codegen_init.cc
@@ -34,12 +34,12 @@
#include <grpc++/impl/codegen/core_codegen_interface.h>
#include <grpc++/impl/codegen/grpc_library.h>
-/// Initializes the global gRPC variables for the codegen library. These will
+/// Null-initializes the global gRPC variables for the codegen library. These
/// stay null in the absence of of grpc++ library. In this case, no gRPC
/// features such as the ability to perform calls will be available. Trying to
/// perform them would result in a segmentation fault when trying to deference
/// the following nulled globals. These should be associated with actual
/// as part of the instantiation of a \a grpc::GrpcLibraryInitializer variable.
-grpc::CoreCodegenInterface* grpc::g_core_codegen_interface = nullptr;
-grpc::GrpcLibraryInterface* grpc::g_glip = nullptr;
+grpc::CoreCodegenInterface* grpc::g_core_codegen_interface;
+grpc::GrpcLibraryInterface* grpc::g_glip;
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index 8e8d42eb29..cc35aa69ba 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -31,7 +31,7 @@
*
*/
-#include "src/cpp/common/core_codegen.h"
+#include <grpc++/impl/codegen/core_codegen.h>
#include <stdlib.h>
diff --git a/src/cpp/common/core_codegen.h b/src/cpp/common/core_codegen.h
deleted file mode 100644
index 656b11e7e7..0000000000
--- a/src/cpp/common/core_codegen.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// This file should be compiled as part of grpc++.
-
-#include <grpc++/impl/codegen/core_codegen_interface.h>
-#include <grpc/byte_buffer.h>
-#include <grpc/impl/codegen/grpc_types.h>
-
-namespace grpc {
-
-/// Implementation of the core codegen interface.
-class CoreCodegen : public CoreCodegenInterface {
- private:
- grpc_completion_queue* grpc_completion_queue_create(void* reserved) override;
- void grpc_completion_queue_destroy(grpc_completion_queue* cq) override;
- grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
- gpr_timespec deadline,
- void* reserved) override;
-
- void* gpr_malloc(size_t size) override;
- void gpr_free(void* p) override;
-
- void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override;
-
- void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
- grpc_byte_buffer* buffer) override;
- void grpc_byte_buffer_reader_destroy(
- grpc_byte_buffer_reader* reader) override;
- int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
- gpr_slice* slice) override;
-
- grpc_byte_buffer* grpc_raw_byte_buffer_create(gpr_slice* slice,
- size_t nslices) override;
-
- gpr_slice gpr_slice_malloc(size_t length) override;
- void gpr_slice_unref(gpr_slice slice) override;
- gpr_slice gpr_slice_split_tail(gpr_slice* s, size_t split) override;
- void gpr_slice_buffer_add(gpr_slice_buffer* sb, gpr_slice slice) override;
- void gpr_slice_buffer_pop(gpr_slice_buffer* sb) override;
-
- void grpc_metadata_array_init(grpc_metadata_array* array) override;
- void grpc_metadata_array_destroy(grpc_metadata_array* array) override;
-
- gpr_timespec gpr_inf_future(gpr_clock_type type) override;
-
- virtual const Status& ok() override;
- virtual const Status& cancelled() override;
-
- void assert_fail(const char* failed_assertion) override;
-};
-
-} // namespace grpc
diff --git a/src/cpp/ext/proto_server_reflection.cc b/src/cpp/ext/proto_server_reflection.cc
new file mode 100644
index 0000000000..3973bfb58e
--- /dev/null
+++ b/src/cpp/ext/proto_server_reflection.cc
@@ -0,0 +1,227 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <unordered_set>
+#include <vector>
+
+#include <grpc++/grpc++.h>
+
+#include "src/cpp/ext/proto_server_reflection.h"
+
+using grpc::Status;
+using grpc::StatusCode;
+using grpc::reflection::v1alpha::ServerReflectionRequest;
+using grpc::reflection::v1alpha::ExtensionRequest;
+using grpc::reflection::v1alpha::ServerReflectionResponse;
+using grpc::reflection::v1alpha::ListServiceResponse;
+using grpc::reflection::v1alpha::ServiceResponse;
+using grpc::reflection::v1alpha::ExtensionNumberResponse;
+using grpc::reflection::v1alpha::ErrorResponse;
+using grpc::reflection::v1alpha::FileDescriptorResponse;
+
+namespace grpc {
+
+ProtoServerReflection::ProtoServerReflection()
+ : descriptor_pool_(protobuf::DescriptorPool::generated_pool()) {}
+
+void ProtoServerReflection::SetServiceList(
+ const std::vector<grpc::string>* services) {
+ services_ = services;
+}
+
+Status ProtoServerReflection::ServerReflectionInfo(
+ ServerContext* context,
+ ServerReaderWriter<ServerReflectionResponse, ServerReflectionRequest>*
+ stream) {
+ ServerReflectionRequest request;
+ ServerReflectionResponse response;
+ Status status;
+ while (stream->Read(&request)) {
+ switch (request.message_request_case()) {
+ case ServerReflectionRequest::MessageRequestCase::kFileByFilename:
+ status = GetFileByName(context, request.file_by_filename(), &response);
+ break;
+ case ServerReflectionRequest::MessageRequestCase::kFileContainingSymbol:
+ status = GetFileContainingSymbol(
+ context, request.file_containing_symbol(), &response);
+ break;
+ case ServerReflectionRequest::MessageRequestCase::
+ kFileContainingExtension:
+ status = GetFileContainingExtension(
+ context, &request.file_containing_extension(), &response);
+ break;
+ case ServerReflectionRequest::MessageRequestCase::
+ kAllExtensionNumbersOfType:
+ status = GetAllExtensionNumbers(
+ context, request.all_extension_numbers_of_type(),
+ response.mutable_all_extension_numbers_response());
+ break;
+ case ServerReflectionRequest::MessageRequestCase::kListServices:
+ status =
+ ListService(context, response.mutable_list_services_response());
+ break;
+ default:
+ status = Status(StatusCode::UNIMPLEMENTED, "");
+ }
+
+ if (!status.ok()) {
+ FillErrorResponse(status, response.mutable_error_response());
+ }
+ response.set_valid_host(request.host());
+ response.set_allocated_original_request(
+ new ServerReflectionRequest(request));
+ stream->Write(response);
+ }
+
+ return Status::OK;
+}
+
+void ProtoServerReflection::FillErrorResponse(const Status& status,
+ ErrorResponse* error_response) {
+ error_response->set_error_code(status.error_code());
+ error_response->set_error_message(status.error_message());
+}
+
+Status ProtoServerReflection::ListService(ServerContext* context,
+ ListServiceResponse* response) {
+ if (services_ == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Services not found.");
+ }
+ for (auto it = services_->begin(); it != services_->end(); ++it) {
+ ServiceResponse* service_response = response->add_service();
+ service_response->set_name(*it);
+ }
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetFileByName(
+ ServerContext* context, const grpc::string& filename,
+ ServerReflectionResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::FileDescriptor* file_desc =
+ descriptor_pool_->FindFileByName(filename);
+ if (file_desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "File not found.");
+ }
+ std::unordered_set<grpc::string> seen_files;
+ FillFileDescriptorResponse(file_desc, response, &seen_files);
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetFileContainingSymbol(
+ ServerContext* context, const grpc::string& symbol,
+ ServerReflectionResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::FileDescriptor* file_desc =
+ descriptor_pool_->FindFileContainingSymbol(symbol);
+ if (file_desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Symbol not found.");
+ }
+ std::unordered_set<grpc::string> seen_files;
+ FillFileDescriptorResponse(file_desc, response, &seen_files);
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetFileContainingExtension(
+ ServerContext* context, const ExtensionRequest* request,
+ ServerReflectionResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::Descriptor* desc =
+ descriptor_pool_->FindMessageTypeByName(request->containing_type());
+ if (desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Type not found.");
+ }
+
+ const protobuf::FieldDescriptor* field_desc =
+ descriptor_pool_->FindExtensionByNumber(desc,
+ request->extension_number());
+ if (field_desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Extension not found.");
+ }
+ std::unordered_set<grpc::string> seen_files;
+ FillFileDescriptorResponse(field_desc->file(), response, &seen_files);
+ return Status::OK;
+}
+
+Status ProtoServerReflection::GetAllExtensionNumbers(
+ ServerContext* context, const grpc::string& type,
+ ExtensionNumberResponse* response) {
+ if (descriptor_pool_ == nullptr) {
+ return Status::CANCELLED;
+ }
+
+ const protobuf::Descriptor* desc =
+ descriptor_pool_->FindMessageTypeByName(type);
+ if (desc == nullptr) {
+ return Status(StatusCode::NOT_FOUND, "Type not found.");
+ }
+
+ std::vector<const protobuf::FieldDescriptor*> extensions;
+ descriptor_pool_->FindAllExtensions(desc, &extensions);
+ for (auto it = extensions.begin(); it != extensions.end(); it++) {
+ response->add_extension_number((*it)->number());
+ }
+ response->set_base_type_name(type);
+ return Status::OK;
+}
+
+void ProtoServerReflection::FillFileDescriptorResponse(
+ const protobuf::FileDescriptor* file_desc,
+ ServerReflectionResponse* response,
+ std::unordered_set<grpc::string>* seen_files) {
+ if (seen_files->find(file_desc->name()) != seen_files->end()) {
+ return;
+ }
+ seen_files->insert(file_desc->name());
+
+ protobuf::FileDescriptorProto file_desc_proto;
+ grpc::string data;
+ file_desc->CopyTo(&file_desc_proto);
+ file_desc_proto.SerializeToString(&data);
+ response->mutable_file_descriptor_response()->add_file_descriptor_proto(data);
+
+ for (int i = 0; i < file_desc->dependency_count(); ++i) {
+ FillFileDescriptorResponse(file_desc->dependency(i), response, seen_files);
+ }
+}
+
+} // namespace grpc
diff --git a/src/cpp/ext/proto_server_reflection.h b/src/cpp/ext/proto_server_reflection.h
new file mode 100644
index 0000000000..23c130513d
--- /dev/null
+++ b/src/cpp/ext/proto_server_reflection.h
@@ -0,0 +1,94 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H
+#define GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H
+
+#include <unordered_set>
+#include <vector>
+
+#include <grpc++/ext/reflection.grpc.pb.h>
+#include <grpc++/grpc++.h>
+
+namespace grpc {
+
+class ProtoServerReflection GRPC_FINAL
+ : public reflection::v1alpha::ServerReflection::Service {
+ public:
+ ProtoServerReflection();
+
+ // Add the full names of registered services
+ void SetServiceList(const std::vector<grpc::string>* services);
+
+ // implementation of ServerReflectionInfo(stream ServerReflectionRequest) rpc
+ // in ServerReflection service
+ Status ServerReflectionInfo(
+ ServerContext* context,
+ ServerReaderWriter<reflection::v1alpha::ServerReflectionResponse,
+ reflection::v1alpha::ServerReflectionRequest>* stream)
+ GRPC_OVERRIDE;
+
+ private:
+ Status ListService(ServerContext* context,
+ reflection::v1alpha::ListServiceResponse* response);
+
+ Status GetFileByName(ServerContext* context, const grpc::string& file_name,
+ reflection::v1alpha::ServerReflectionResponse* response);
+
+ Status GetFileContainingSymbol(
+ ServerContext* context, const grpc::string& symbol,
+ reflection::v1alpha::ServerReflectionResponse* response);
+
+ Status GetFileContainingExtension(
+ ServerContext* context,
+ const reflection::v1alpha::ExtensionRequest* request,
+ reflection::v1alpha::ServerReflectionResponse* response);
+
+ Status GetAllExtensionNumbers(
+ ServerContext* context, const grpc::string& type,
+ reflection::v1alpha::ExtensionNumberResponse* response);
+
+ void FillFileDescriptorResponse(
+ const protobuf::FileDescriptor* file_desc,
+ reflection::v1alpha::ServerReflectionResponse* response,
+ std::unordered_set<grpc::string>* seen_files);
+
+ void FillErrorResponse(const Status& status,
+ reflection::v1alpha::ErrorResponse* error_response);
+
+ const protobuf::DescriptorPool* descriptor_pool_;
+ const std::vector<string>* services_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_EXT_PROTO_SERVER_REFLECTION_H
diff --git a/src/cpp/ext/proto_server_reflection_plugin.cc b/src/cpp/ext/proto_server_reflection_plugin.cc
new file mode 100644
index 0000000000..5b806ce1ae
--- /dev/null
+++ b/src/cpp/ext/proto_server_reflection_plugin.cc
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/ext/proto_server_reflection_plugin.h>
+#include <grpc++/impl/server_builder_plugin.h>
+#include <grpc++/impl/server_initializer.h>
+#include <grpc++/server.h>
+
+#include "src/cpp/ext/proto_server_reflection.h"
+
+namespace grpc {
+namespace reflection {
+
+ProtoServerReflectionPlugin::ProtoServerReflectionPlugin()
+ : reflection_service_(new grpc::ProtoServerReflection()) {}
+
+grpc::string ProtoServerReflectionPlugin::name() {
+ return "proto_server_reflection";
+}
+
+void ProtoServerReflectionPlugin::InitServer(grpc::ServerInitializer* si) {
+ si->RegisterService(reflection_service_);
+}
+
+void ProtoServerReflectionPlugin::Finish(grpc::ServerInitializer* si) {
+ reflection_service_->SetServiceList(si->GetServiceList());
+}
+
+void ProtoServerReflectionPlugin::ChangeArguments(const grpc::string& name,
+ void* value) {}
+
+bool ProtoServerReflectionPlugin::has_sync_methods() const {
+ if (reflection_service_) {
+ return reflection_service_->has_synchronous_methods();
+ }
+ return false;
+}
+
+bool ProtoServerReflectionPlugin::has_async_methods() const {
+ if (reflection_service_) {
+ return reflection_service_->has_async_methods();
+ }
+ return false;
+}
+
+static std::unique_ptr< ::grpc::ServerBuilderPlugin> CreateProtoReflection() {
+ return std::unique_ptr< ::grpc::ServerBuilderPlugin>(
+ new ProtoServerReflectionPlugin());
+}
+
+void InitProtoReflectionServerBuilderPlugin() {
+ static bool already_here = false;
+ if (already_here) return;
+ already_here = true;
+ ::grpc::ServerBuilder::InternalAddPluginFactory(&CreateProtoReflection);
+}
+
+// Force InitProtoReflectionServerBuilderPlugin() to be called at static
+// initialization time.
+struct StaticProtoReflectionPluginInitializer {
+ StaticProtoReflectionPluginInitializer() {
+ InitProtoReflectionServerBuilderPlugin();
+ }
+} static_proto_reflection_plugin_initializer;
+
+} // namespace reflection
+} // namespace grpc
diff --git a/src/cpp/ext/reflection.grpc.pb.cc b/src/cpp/ext/reflection.grpc.pb.cc
new file mode 100644
index 0000000000..b046dfc1b8
--- /dev/null
+++ b/src/cpp/ext/reflection.grpc.pb.cc
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+// Generated by the gRPC protobuf plugin.
+// If you make any local change, they will be lost.
+// source: reflection.proto
+
+#include <grpc++/ext/reflection.pb.h>
+#include <grpc++/ext/reflection.grpc.pb.h>
+
+#include <grpc++/impl/codegen/async_stream.h>
+#include <grpc++/impl/codegen/async_unary_call.h>
+#include <grpc++/impl/codegen/channel_interface.h>
+#include <grpc++/impl/codegen/client_unary_call.h>
+#include <grpc++/impl/codegen/method_handler_impl.h>
+#include <grpc++/impl/codegen/rpc_service_method.h>
+#include <grpc++/impl/codegen/service_type.h>
+#include <grpc++/impl/codegen/sync_stream.h>
+namespace grpc {
+namespace reflection {
+namespace v1alpha {
+
+static const char* ServerReflection_method_names[] = {
+ "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo",
+};
+
+std::unique_ptr< ServerReflection::Stub> ServerReflection::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) {
+ std::unique_ptr< ServerReflection::Stub> stub(new ServerReflection::Stub(channel));
+ return stub;
+}
+
+ServerReflection::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel)
+ : channel_(channel), rpcmethod_ServerReflectionInfo_(ServerReflection_method_names[0], ::grpc::RpcMethod::BIDI_STREAMING, channel)
+ {}
+
+::grpc::ClientReaderWriter< ::grpc::reflection::v1alpha::ServerReflectionRequest, ::grpc::reflection::v1alpha::ServerReflectionResponse>* ServerReflection::Stub::ServerReflectionInfoRaw(::grpc::ClientContext* context) {
+ return new ::grpc::ClientReaderWriter< ::grpc::reflection::v1alpha::ServerReflectionRequest, ::grpc::reflection::v1alpha::ServerReflectionResponse>(channel_.get(), rpcmethod_ServerReflectionInfo_, context);
+}
+
+::grpc::ClientAsyncReaderWriter< ::grpc::reflection::v1alpha::ServerReflectionRequest, ::grpc::reflection::v1alpha::ServerReflectionResponse>* ServerReflection::Stub::AsyncServerReflectionInfoRaw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) {
+ return new ::grpc::ClientAsyncReaderWriter< ::grpc::reflection::v1alpha::ServerReflectionRequest, ::grpc::reflection::v1alpha::ServerReflectionResponse>(channel_.get(), cq, rpcmethod_ServerReflectionInfo_, context, tag);
+}
+
+ServerReflection::Service::Service() {
+ (void)ServerReflection_method_names;
+ AddMethod(new ::grpc::RpcServiceMethod(
+ ServerReflection_method_names[0],
+ ::grpc::RpcMethod::BIDI_STREAMING,
+ new ::grpc::BidiStreamingHandler< ServerReflection::Service, ::grpc::reflection::v1alpha::ServerReflectionRequest, ::grpc::reflection::v1alpha::ServerReflectionResponse>(
+ std::mem_fn(&ServerReflection::Service::ServerReflectionInfo), this)));
+}
+
+ServerReflection::Service::~Service() {
+}
+
+::grpc::Status ServerReflection::Service::ServerReflectionInfo(::grpc::ServerContext* context, ::grpc::ServerReaderWriter< ::grpc::reflection::v1alpha::ServerReflectionResponse, ::grpc::reflection::v1alpha::ServerReflectionRequest>* stream) {
+ (void) context;
+ (void) stream;
+ return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
+}
+
+
+} // namespace grpc
+} // namespace reflection
+} // namespace v1alpha
+
diff --git a/src/cpp/ext/reflection.pb.cc b/src/cpp/ext/reflection.pb.cc
new file mode 100644
index 0000000000..b73a65d0a0
--- /dev/null
+++ b/src/cpp/ext/reflection.pb.cc
@@ -0,0 +1,3946 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: reflection.proto
+
+#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
+#include <grpc++/ext/reflection.pb.h>
+
+#include <algorithm>
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/port.h>
+#include <google/protobuf/stubs/once.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/wire_format_lite_inl.h>
+#include <google/protobuf/descriptor.h>
+#include <google/protobuf/generated_message_reflection.h>
+#include <google/protobuf/reflection_ops.h>
+#include <google/protobuf/wire_format.h>
+// @@protoc_insertion_point(includes)
+
+namespace grpc {
+namespace reflection {
+namespace v1alpha {
+
+namespace {
+
+const ::google::protobuf::Descriptor* ServerReflectionRequest_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ServerReflectionRequest_reflection_ = NULL;
+struct ServerReflectionRequestOneofInstance {
+ ::google::protobuf::internal::ArenaStringPtr file_by_filename_;
+ ::google::protobuf::internal::ArenaStringPtr file_containing_symbol_;
+ const ::grpc::reflection::v1alpha::ExtensionRequest* file_containing_extension_;
+ ::google::protobuf::internal::ArenaStringPtr all_extension_numbers_of_type_;
+ ::google::protobuf::internal::ArenaStringPtr list_services_;
+}* ServerReflectionRequest_default_oneof_instance_ = NULL;
+const ::google::protobuf::Descriptor* ExtensionRequest_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ExtensionRequest_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ServerReflectionResponse_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ServerReflectionResponse_reflection_ = NULL;
+struct ServerReflectionResponseOneofInstance {
+ const ::grpc::reflection::v1alpha::FileDescriptorResponse* file_descriptor_response_;
+ const ::grpc::reflection::v1alpha::ExtensionNumberResponse* all_extension_numbers_response_;
+ const ::grpc::reflection::v1alpha::ListServiceResponse* list_services_response_;
+ const ::grpc::reflection::v1alpha::ErrorResponse* error_response_;
+}* ServerReflectionResponse_default_oneof_instance_ = NULL;
+const ::google::protobuf::Descriptor* FileDescriptorResponse_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ FileDescriptorResponse_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ExtensionNumberResponse_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ExtensionNumberResponse_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ListServiceResponse_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ListServiceResponse_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ServiceResponse_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ServiceResponse_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ErrorResponse_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ ErrorResponse_reflection_ = NULL;
+
+} // namespace
+
+
+void protobuf_AssignDesc_reflection_2eproto() {
+ protobuf_AddDesc_reflection_2eproto();
+ const ::google::protobuf::FileDescriptor* file =
+ ::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
+ "reflection.proto");
+ GOOGLE_CHECK(file != NULL);
+ ServerReflectionRequest_descriptor_ = file->message_type(0);
+ static const int ServerReflectionRequest_offsets_[7] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionRequest, host_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionRequest_default_oneof_instance_, file_by_filename_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionRequest_default_oneof_instance_, file_containing_symbol_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionRequest_default_oneof_instance_, file_containing_extension_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionRequest_default_oneof_instance_, all_extension_numbers_of_type_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionRequest_default_oneof_instance_, list_services_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionRequest, message_request_),
+ };
+ ServerReflectionRequest_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ServerReflectionRequest_descriptor_,
+ ServerReflectionRequest::default_instance_,
+ ServerReflectionRequest_offsets_,
+ -1,
+ -1,
+ -1,
+ ServerReflectionRequest_default_oneof_instance_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionRequest, _oneof_case_[0]),
+ sizeof(ServerReflectionRequest),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionRequest, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionRequest, _is_default_instance_));
+ ExtensionRequest_descriptor_ = file->message_type(1);
+ static const int ExtensionRequest_offsets_[2] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionRequest, containing_type_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionRequest, extension_number_),
+ };
+ ExtensionRequest_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ExtensionRequest_descriptor_,
+ ExtensionRequest::default_instance_,
+ ExtensionRequest_offsets_,
+ -1,
+ -1,
+ -1,
+ sizeof(ExtensionRequest),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionRequest, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionRequest, _is_default_instance_));
+ ServerReflectionResponse_descriptor_ = file->message_type(2);
+ static const int ServerReflectionResponse_offsets_[7] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionResponse, valid_host_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionResponse, original_request_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionResponse_default_oneof_instance_, file_descriptor_response_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionResponse_default_oneof_instance_, all_extension_numbers_response_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionResponse_default_oneof_instance_, list_services_response_),
+ PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ServerReflectionResponse_default_oneof_instance_, error_response_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionResponse, message_response_),
+ };
+ ServerReflectionResponse_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ServerReflectionResponse_descriptor_,
+ ServerReflectionResponse::default_instance_,
+ ServerReflectionResponse_offsets_,
+ -1,
+ -1,
+ -1,
+ ServerReflectionResponse_default_oneof_instance_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionResponse, _oneof_case_[0]),
+ sizeof(ServerReflectionResponse),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionResponse, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerReflectionResponse, _is_default_instance_));
+ FileDescriptorResponse_descriptor_ = file->message_type(3);
+ static const int FileDescriptorResponse_offsets_[1] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FileDescriptorResponse, file_descriptor_proto_),
+ };
+ FileDescriptorResponse_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ FileDescriptorResponse_descriptor_,
+ FileDescriptorResponse::default_instance_,
+ FileDescriptorResponse_offsets_,
+ -1,
+ -1,
+ -1,
+ sizeof(FileDescriptorResponse),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FileDescriptorResponse, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FileDescriptorResponse, _is_default_instance_));
+ ExtensionNumberResponse_descriptor_ = file->message_type(4);
+ static const int ExtensionNumberResponse_offsets_[2] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionNumberResponse, base_type_name_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionNumberResponse, extension_number_),
+ };
+ ExtensionNumberResponse_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ExtensionNumberResponse_descriptor_,
+ ExtensionNumberResponse::default_instance_,
+ ExtensionNumberResponse_offsets_,
+ -1,
+ -1,
+ -1,
+ sizeof(ExtensionNumberResponse),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionNumberResponse, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ExtensionNumberResponse, _is_default_instance_));
+ ListServiceResponse_descriptor_ = file->message_type(5);
+ static const int ListServiceResponse_offsets_[1] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ListServiceResponse, service_),
+ };
+ ListServiceResponse_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ListServiceResponse_descriptor_,
+ ListServiceResponse::default_instance_,
+ ListServiceResponse_offsets_,
+ -1,
+ -1,
+ -1,
+ sizeof(ListServiceResponse),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ListServiceResponse, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ListServiceResponse, _is_default_instance_));
+ ServiceResponse_descriptor_ = file->message_type(6);
+ static const int ServiceResponse_offsets_[1] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServiceResponse, name_),
+ };
+ ServiceResponse_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ServiceResponse_descriptor_,
+ ServiceResponse::default_instance_,
+ ServiceResponse_offsets_,
+ -1,
+ -1,
+ -1,
+ sizeof(ServiceResponse),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServiceResponse, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServiceResponse, _is_default_instance_));
+ ErrorResponse_descriptor_ = file->message_type(7);
+ static const int ErrorResponse_offsets_[2] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ErrorResponse, error_code_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ErrorResponse, error_message_),
+ };
+ ErrorResponse_reflection_ =
+ ::google::protobuf::internal::GeneratedMessageReflection::NewGeneratedMessageReflection(
+ ErrorResponse_descriptor_,
+ ErrorResponse::default_instance_,
+ ErrorResponse_offsets_,
+ -1,
+ -1,
+ -1,
+ sizeof(ErrorResponse),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ErrorResponse, _internal_metadata_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ErrorResponse, _is_default_instance_));
+}
+
+namespace {
+
+GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
+inline void protobuf_AssignDescriptorsOnce() {
+ ::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
+ &protobuf_AssignDesc_reflection_2eproto);
+}
+
+void protobuf_RegisterTypes(const ::std::string&) {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ServerReflectionRequest_descriptor_, &ServerReflectionRequest::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ExtensionRequest_descriptor_, &ExtensionRequest::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ServerReflectionResponse_descriptor_, &ServerReflectionResponse::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ FileDescriptorResponse_descriptor_, &FileDescriptorResponse::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ExtensionNumberResponse_descriptor_, &ExtensionNumberResponse::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ListServiceResponse_descriptor_, &ListServiceResponse::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ServiceResponse_descriptor_, &ServiceResponse::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ ErrorResponse_descriptor_, &ErrorResponse::default_instance());
+}
+
+} // namespace
+
+void protobuf_ShutdownFile_reflection_2eproto() {
+ delete ServerReflectionRequest::default_instance_;
+ delete ServerReflectionRequest_default_oneof_instance_;
+ delete ServerReflectionRequest_reflection_;
+ delete ExtensionRequest::default_instance_;
+ delete ExtensionRequest_reflection_;
+ delete ServerReflectionResponse::default_instance_;
+ delete ServerReflectionResponse_default_oneof_instance_;
+ delete ServerReflectionResponse_reflection_;
+ delete FileDescriptorResponse::default_instance_;
+ delete FileDescriptorResponse_reflection_;
+ delete ExtensionNumberResponse::default_instance_;
+ delete ExtensionNumberResponse_reflection_;
+ delete ListServiceResponse::default_instance_;
+ delete ListServiceResponse_reflection_;
+ delete ServiceResponse::default_instance_;
+ delete ServiceResponse_reflection_;
+ delete ErrorResponse::default_instance_;
+ delete ErrorResponse_reflection_;
+}
+
+void protobuf_AddDesc_reflection_2eproto() {
+ static bool already_here = false;
+ if (already_here) return;
+ already_here = true;
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ ::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
+ "\n\020reflection.proto\022\027grpc.reflection.v1al"
+ "pha\"\212\002\n\027ServerReflectionRequest\022\014\n\004host\030"
+ "\001 \001(\t\022\032\n\020file_by_filename\030\003 \001(\tH\000\022 \n\026fil"
+ "e_containing_symbol\030\004 \001(\tH\000\022N\n\031file_cont"
+ "aining_extension\030\005 \001(\0132).grpc.reflection"
+ ".v1alpha.ExtensionRequestH\000\022\'\n\035all_exten"
+ "sion_numbers_of_type\030\006 \001(\tH\000\022\027\n\rlist_ser"
+ "vices\030\007 \001(\tH\000B\021\n\017message_request\"E\n\020Exte"
+ "nsionRequest\022\027\n\017containing_type\030\001 \001(\t\022\030\n"
+ "\020extension_number\030\002 \001(\005\"\321\003\n\030ServerReflec"
+ "tionResponse\022\022\n\nvalid_host\030\001 \001(\t\022J\n\020orig"
+ "inal_request\030\002 \001(\01320.grpc.reflection.v1a"
+ "lpha.ServerReflectionRequest\022S\n\030file_des"
+ "criptor_response\030\004 \001(\0132/.grpc.reflection"
+ ".v1alpha.FileDescriptorResponseH\000\022Z\n\036all"
+ "_extension_numbers_response\030\005 \001(\01320.grpc"
+ ".reflection.v1alpha.ExtensionNumberRespo"
+ "nseH\000\022N\n\026list_services_response\030\006 \001(\0132,."
+ "grpc.reflection.v1alpha.ListServiceRespo"
+ "nseH\000\022@\n\016error_response\030\007 \001(\0132&.grpc.ref"
+ "lection.v1alpha.ErrorResponseH\000B\022\n\020messa"
+ "ge_response\"7\n\026FileDescriptorResponse\022\035\n"
+ "\025file_descriptor_proto\030\001 \003(\014\"K\n\027Extensio"
+ "nNumberResponse\022\026\n\016base_type_name\030\001 \001(\t\022"
+ "\030\n\020extension_number\030\002 \003(\005\"P\n\023ListService"
+ "Response\0229\n\007service\030\001 \003(\0132(.grpc.reflect"
+ "ion.v1alpha.ServiceResponse\"\037\n\017ServiceRe"
+ "sponse\022\014\n\004name\030\001 \001(\t\":\n\rErrorResponse\022\022\n"
+ "\nerror_code\030\001 \001(\005\022\025\n\rerror_message\030\002 \001(\t"
+ "2\223\001\n\020ServerReflection\022\177\n\024ServerReflectio"
+ "nInfo\0220.grpc.reflection.v1alpha.ServerRe"
+ "flectionRequest\0321.grpc.reflection.v1alph"
+ "a.ServerReflectionResponse(\0010\001b\006proto3", 1318);
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
+ "reflection.proto", &protobuf_RegisterTypes);
+ ServerReflectionRequest::default_instance_ = new ServerReflectionRequest();
+ ServerReflectionRequest_default_oneof_instance_ = new ServerReflectionRequestOneofInstance();
+ ExtensionRequest::default_instance_ = new ExtensionRequest();
+ ServerReflectionResponse::default_instance_ = new ServerReflectionResponse();
+ ServerReflectionResponse_default_oneof_instance_ = new ServerReflectionResponseOneofInstance();
+ FileDescriptorResponse::default_instance_ = new FileDescriptorResponse();
+ ExtensionNumberResponse::default_instance_ = new ExtensionNumberResponse();
+ ListServiceResponse::default_instance_ = new ListServiceResponse();
+ ServiceResponse::default_instance_ = new ServiceResponse();
+ ErrorResponse::default_instance_ = new ErrorResponse();
+ ServerReflectionRequest::default_instance_->InitAsDefaultInstance();
+ ExtensionRequest::default_instance_->InitAsDefaultInstance();
+ ServerReflectionResponse::default_instance_->InitAsDefaultInstance();
+ FileDescriptorResponse::default_instance_->InitAsDefaultInstance();
+ ExtensionNumberResponse::default_instance_->InitAsDefaultInstance();
+ ListServiceResponse::default_instance_->InitAsDefaultInstance();
+ ServiceResponse::default_instance_->InitAsDefaultInstance();
+ ErrorResponse::default_instance_->InitAsDefaultInstance();
+ ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_reflection_2eproto);
+}
+
+// Force AddDescriptors() to be called at static initialization time.
+struct StaticDescriptorInitializer_reflection_2eproto {
+ StaticDescriptorInitializer_reflection_2eproto() {
+ protobuf_AddDesc_reflection_2eproto();
+ }
+} static_descriptor_initializer_reflection_2eproto_;
+
+namespace {
+
+static void MergeFromFail(int line) GOOGLE_ATTRIBUTE_COLD;
+static void MergeFromFail(int line) {
+ GOOGLE_CHECK(false) << __FILE__ << ":" << line;
+}
+
+} // namespace
+
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ServerReflectionRequest::kHostFieldNumber;
+const int ServerReflectionRequest::kFileByFilenameFieldNumber;
+const int ServerReflectionRequest::kFileContainingSymbolFieldNumber;
+const int ServerReflectionRequest::kFileContainingExtensionFieldNumber;
+const int ServerReflectionRequest::kAllExtensionNumbersOfTypeFieldNumber;
+const int ServerReflectionRequest::kListServicesFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ServerReflectionRequest::ServerReflectionRequest()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ServerReflectionRequest)
+}
+
+void ServerReflectionRequest::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+ ServerReflectionRequest_default_oneof_instance_->file_by_filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ ServerReflectionRequest_default_oneof_instance_->file_containing_symbol_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ ServerReflectionRequest_default_oneof_instance_->file_containing_extension_ = const_cast< ::grpc::reflection::v1alpha::ExtensionRequest*>(&::grpc::reflection::v1alpha::ExtensionRequest::default_instance());
+ ServerReflectionRequest_default_oneof_instance_->all_extension_numbers_of_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ ServerReflectionRequest_default_oneof_instance_->list_services_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+
+ServerReflectionRequest::ServerReflectionRequest(const ServerReflectionRequest& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ServerReflectionRequest)
+}
+
+void ServerReflectionRequest::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ host_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ clear_has_message_request();
+}
+
+ServerReflectionRequest::~ServerReflectionRequest() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ServerReflectionRequest)
+ SharedDtor();
+}
+
+void ServerReflectionRequest::SharedDtor() {
+ host_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (has_message_request()) {
+ clear_message_request();
+ }
+ if (this != default_instance_) {
+ }
+}
+
+void ServerReflectionRequest::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ServerReflectionRequest::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ServerReflectionRequest_descriptor_;
+}
+
+const ServerReflectionRequest& ServerReflectionRequest::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ServerReflectionRequest* ServerReflectionRequest::default_instance_ = NULL;
+
+ServerReflectionRequest* ServerReflectionRequest::New(::google::protobuf::Arena* arena) const {
+ ServerReflectionRequest* n = new ServerReflectionRequest;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ServerReflectionRequest::clear_message_request() {
+// @@protoc_insertion_point(one_of_clear_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ switch(message_request_case()) {
+ case kFileByFilename: {
+ message_request_.file_by_filename_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ break;
+ }
+ case kFileContainingSymbol: {
+ message_request_.file_containing_symbol_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ break;
+ }
+ case kFileContainingExtension: {
+ delete message_request_.file_containing_extension_;
+ break;
+ }
+ case kAllExtensionNumbersOfType: {
+ message_request_.all_extension_numbers_of_type_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ break;
+ }
+ case kListServices: {
+ message_request_.list_services_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ break;
+ }
+ case MESSAGE_REQUEST_NOT_SET: {
+ break;
+ }
+ }
+ _oneof_case_[0] = MESSAGE_REQUEST_NOT_SET;
+}
+
+
+void ServerReflectionRequest::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ host_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ clear_message_request();
+}
+
+bool ServerReflectionRequest::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string host = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_host()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->host().data(), this->host().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.host"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(26)) goto parse_file_by_filename;
+ break;
+ }
+
+ // optional string file_by_filename = 3;
+ case 3: {
+ if (tag == 26) {
+ parse_file_by_filename:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_file_by_filename()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->file_by_filename().data(), this->file_by_filename().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(34)) goto parse_file_containing_symbol;
+ break;
+ }
+
+ // optional string file_containing_symbol = 4;
+ case 4: {
+ if (tag == 34) {
+ parse_file_containing_symbol:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_file_containing_symbol()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->file_containing_symbol().data(), this->file_containing_symbol().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(42)) goto parse_file_containing_extension;
+ break;
+ }
+
+ // optional .grpc.reflection.v1alpha.ExtensionRequest file_containing_extension = 5;
+ case 5: {
+ if (tag == 42) {
+ parse_file_containing_extension:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_file_containing_extension()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(50)) goto parse_all_extension_numbers_of_type;
+ break;
+ }
+
+ // optional string all_extension_numbers_of_type = 6;
+ case 6: {
+ if (tag == 50) {
+ parse_all_extension_numbers_of_type:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_all_extension_numbers_of_type()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->all_extension_numbers_of_type().data(), this->all_extension_numbers_of_type().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(58)) goto parse_list_services;
+ break;
+ }
+
+ // optional string list_services = 7;
+ case 7: {
+ if (tag == 58) {
+ parse_list_services:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_list_services()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->list_services().data(), this->list_services().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.list_services"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ServerReflectionRequest)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ServerReflectionRequest)
+ return false;
+#undef DO_
+}
+
+void ServerReflectionRequest::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ // optional string host = 1;
+ if (this->host().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->host().data(), this->host().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.host");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->host(), output);
+ }
+
+ // optional string file_by_filename = 3;
+ if (has_file_by_filename()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->file_by_filename().data(), this->file_by_filename().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 3, this->file_by_filename(), output);
+ }
+
+ // optional string file_containing_symbol = 4;
+ if (has_file_containing_symbol()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->file_containing_symbol().data(), this->file_containing_symbol().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 4, this->file_containing_symbol(), output);
+ }
+
+ // optional .grpc.reflection.v1alpha.ExtensionRequest file_containing_extension = 5;
+ if (has_file_containing_extension()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 5, *message_request_.file_containing_extension_, output);
+ }
+
+ // optional string all_extension_numbers_of_type = 6;
+ if (has_all_extension_numbers_of_type()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->all_extension_numbers_of_type().data(), this->all_extension_numbers_of_type().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 6, this->all_extension_numbers_of_type(), output);
+ }
+
+ // optional string list_services = 7;
+ if (has_list_services()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->list_services().data(), this->list_services().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.list_services");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 7, this->list_services(), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ServerReflectionRequest)
+}
+
+::google::protobuf::uint8* ServerReflectionRequest::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ // optional string host = 1;
+ if (this->host().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->host().data(), this->host().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.host");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->host(), target);
+ }
+
+ // optional string file_by_filename = 3;
+ if (has_file_by_filename()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->file_by_filename().data(), this->file_by_filename().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 3, this->file_by_filename(), target);
+ }
+
+ // optional string file_containing_symbol = 4;
+ if (has_file_containing_symbol()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->file_containing_symbol().data(), this->file_containing_symbol().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 4, this->file_containing_symbol(), target);
+ }
+
+ // optional .grpc.reflection.v1alpha.ExtensionRequest file_containing_extension = 5;
+ if (has_file_containing_extension()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 5, *message_request_.file_containing_extension_, target);
+ }
+
+ // optional string all_extension_numbers_of_type = 6;
+ if (has_all_extension_numbers_of_type()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->all_extension_numbers_of_type().data(), this->all_extension_numbers_of_type().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 6, this->all_extension_numbers_of_type(), target);
+ }
+
+ // optional string list_services = 7;
+ if (has_list_services()) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->list_services().data(), this->list_services().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionRequest.list_services");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 7, this->list_services(), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ServerReflectionRequest)
+ return target;
+}
+
+int ServerReflectionRequest::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ int total_size = 0;
+
+ // optional string host = 1;
+ if (this->host().size() > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->host());
+ }
+
+ switch (message_request_case()) {
+ // optional string file_by_filename = 3;
+ case kFileByFilename: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->file_by_filename());
+ break;
+ }
+ // optional string file_containing_symbol = 4;
+ case kFileContainingSymbol: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->file_containing_symbol());
+ break;
+ }
+ // optional .grpc.reflection.v1alpha.ExtensionRequest file_containing_extension = 5;
+ case kFileContainingExtension: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ *message_request_.file_containing_extension_);
+ break;
+ }
+ // optional string all_extension_numbers_of_type = 6;
+ case kAllExtensionNumbersOfType: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->all_extension_numbers_of_type());
+ break;
+ }
+ // optional string list_services = 7;
+ case kListServices: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->list_services());
+ break;
+ }
+ case MESSAGE_REQUEST_NOT_SET: {
+ break;
+ }
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ServerReflectionRequest::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ServerReflectionRequest* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ServerReflectionRequest>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ServerReflectionRequest)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ServerReflectionRequest)
+ MergeFrom(*source);
+ }
+}
+
+void ServerReflectionRequest::MergeFrom(const ServerReflectionRequest& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ switch (from.message_request_case()) {
+ case kFileByFilename: {
+ set_file_by_filename(from.file_by_filename());
+ break;
+ }
+ case kFileContainingSymbol: {
+ set_file_containing_symbol(from.file_containing_symbol());
+ break;
+ }
+ case kFileContainingExtension: {
+ mutable_file_containing_extension()->::grpc::reflection::v1alpha::ExtensionRequest::MergeFrom(from.file_containing_extension());
+ break;
+ }
+ case kAllExtensionNumbersOfType: {
+ set_all_extension_numbers_of_type(from.all_extension_numbers_of_type());
+ break;
+ }
+ case kListServices: {
+ set_list_services(from.list_services());
+ break;
+ }
+ case MESSAGE_REQUEST_NOT_SET: {
+ break;
+ }
+ }
+ if (from.host().size() > 0) {
+
+ host_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.host_);
+ }
+}
+
+void ServerReflectionRequest::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ServerReflectionRequest::CopyFrom(const ServerReflectionRequest& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ServerReflectionRequest)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ServerReflectionRequest::IsInitialized() const {
+
+ return true;
+}
+
+void ServerReflectionRequest::Swap(ServerReflectionRequest* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ServerReflectionRequest::InternalSwap(ServerReflectionRequest* other) {
+ host_.Swap(&other->host_);
+ std::swap(message_request_, other->message_request_);
+ std::swap(_oneof_case_[0], other->_oneof_case_[0]);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ServerReflectionRequest::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ServerReflectionRequest_descriptor_;
+ metadata.reflection = ServerReflectionRequest_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ServerReflectionRequest
+
+// optional string host = 1;
+void ServerReflectionRequest::clear_host() {
+ host_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ const ::std::string& ServerReflectionRequest::host() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+ return host_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ServerReflectionRequest::set_host(const ::std::string& value) {
+
+ host_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+}
+ void ServerReflectionRequest::set_host(const char* value) {
+
+ host_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+}
+ void ServerReflectionRequest::set_host(const char* value, size_t size) {
+
+ host_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+}
+ ::std::string* ServerReflectionRequest::mutable_host() {
+
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+ return host_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServerReflectionRequest::release_host() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+
+ return host_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ServerReflectionRequest::set_allocated_host(::std::string* host) {
+ if (host != NULL) {
+
+ } else {
+
+ }
+ host_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), host);
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionRequest.host)
+}
+
+// optional string file_by_filename = 3;
+bool ServerReflectionRequest::has_file_by_filename() const {
+ return message_request_case() == kFileByFilename;
+}
+void ServerReflectionRequest::set_has_file_by_filename() {
+ _oneof_case_[0] = kFileByFilename;
+}
+void ServerReflectionRequest::clear_file_by_filename() {
+ if (has_file_by_filename()) {
+ message_request_.file_by_filename_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ clear_has_message_request();
+ }
+}
+ const ::std::string& ServerReflectionRequest::file_by_filename() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+ if (has_file_by_filename()) {
+ return message_request_.file_by_filename_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ return *&::google::protobuf::internal::GetEmptyStringAlreadyInited();
+}
+ void ServerReflectionRequest::set_file_by_filename(const ::std::string& value) {
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+ if (!has_file_by_filename()) {
+ clear_message_request();
+ set_has_file_by_filename();
+ message_request_.file_by_filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.file_by_filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+}
+ void ServerReflectionRequest::set_file_by_filename(const char* value) {
+ if (!has_file_by_filename()) {
+ clear_message_request();
+ set_has_file_by_filename();
+ message_request_.file_by_filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.file_by_filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+}
+ void ServerReflectionRequest::set_file_by_filename(const char* value, size_t size) {
+ if (!has_file_by_filename()) {
+ clear_message_request();
+ set_has_file_by_filename();
+ message_request_.file_by_filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.file_by_filename_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
+ reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+}
+ ::std::string* ServerReflectionRequest::mutable_file_by_filename() {
+ if (!has_file_by_filename()) {
+ clear_message_request();
+ set_has_file_by_filename();
+ message_request_.file_by_filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+ return message_request_.file_by_filename_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServerReflectionRequest::release_file_by_filename() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+ if (has_file_by_filename()) {
+ clear_has_message_request();
+ return message_request_.file_by_filename_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ } else {
+ return NULL;
+ }
+}
+ void ServerReflectionRequest::set_allocated_file_by_filename(::std::string* file_by_filename) {
+ if (!has_file_by_filename()) {
+ message_request_.file_by_filename_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ clear_message_request();
+ if (file_by_filename != NULL) {
+ set_has_file_by_filename();
+ message_request_.file_by_filename_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ file_by_filename);
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionRequest.file_by_filename)
+}
+
+// optional string file_containing_symbol = 4;
+bool ServerReflectionRequest::has_file_containing_symbol() const {
+ return message_request_case() == kFileContainingSymbol;
+}
+void ServerReflectionRequest::set_has_file_containing_symbol() {
+ _oneof_case_[0] = kFileContainingSymbol;
+}
+void ServerReflectionRequest::clear_file_containing_symbol() {
+ if (has_file_containing_symbol()) {
+ message_request_.file_containing_symbol_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ clear_has_message_request();
+ }
+}
+ const ::std::string& ServerReflectionRequest::file_containing_symbol() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+ if (has_file_containing_symbol()) {
+ return message_request_.file_containing_symbol_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ return *&::google::protobuf::internal::GetEmptyStringAlreadyInited();
+}
+ void ServerReflectionRequest::set_file_containing_symbol(const ::std::string& value) {
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+ if (!has_file_containing_symbol()) {
+ clear_message_request();
+ set_has_file_containing_symbol();
+ message_request_.file_containing_symbol_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.file_containing_symbol_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+}
+ void ServerReflectionRequest::set_file_containing_symbol(const char* value) {
+ if (!has_file_containing_symbol()) {
+ clear_message_request();
+ set_has_file_containing_symbol();
+ message_request_.file_containing_symbol_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.file_containing_symbol_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+}
+ void ServerReflectionRequest::set_file_containing_symbol(const char* value, size_t size) {
+ if (!has_file_containing_symbol()) {
+ clear_message_request();
+ set_has_file_containing_symbol();
+ message_request_.file_containing_symbol_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.file_containing_symbol_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
+ reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+}
+ ::std::string* ServerReflectionRequest::mutable_file_containing_symbol() {
+ if (!has_file_containing_symbol()) {
+ clear_message_request();
+ set_has_file_containing_symbol();
+ message_request_.file_containing_symbol_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+ return message_request_.file_containing_symbol_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServerReflectionRequest::release_file_containing_symbol() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+ if (has_file_containing_symbol()) {
+ clear_has_message_request();
+ return message_request_.file_containing_symbol_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ } else {
+ return NULL;
+ }
+}
+ void ServerReflectionRequest::set_allocated_file_containing_symbol(::std::string* file_containing_symbol) {
+ if (!has_file_containing_symbol()) {
+ message_request_.file_containing_symbol_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ clear_message_request();
+ if (file_containing_symbol != NULL) {
+ set_has_file_containing_symbol();
+ message_request_.file_containing_symbol_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ file_containing_symbol);
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_symbol)
+}
+
+// optional .grpc.reflection.v1alpha.ExtensionRequest file_containing_extension = 5;
+bool ServerReflectionRequest::has_file_containing_extension() const {
+ return message_request_case() == kFileContainingExtension;
+}
+void ServerReflectionRequest::set_has_file_containing_extension() {
+ _oneof_case_[0] = kFileContainingExtension;
+}
+void ServerReflectionRequest::clear_file_containing_extension() {
+ if (has_file_containing_extension()) {
+ delete message_request_.file_containing_extension_;
+ clear_has_message_request();
+ }
+}
+ const ::grpc::reflection::v1alpha::ExtensionRequest& ServerReflectionRequest::file_containing_extension() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension)
+ return has_file_containing_extension()
+ ? *message_request_.file_containing_extension_
+ : ::grpc::reflection::v1alpha::ExtensionRequest::default_instance();
+}
+::grpc::reflection::v1alpha::ExtensionRequest* ServerReflectionRequest::mutable_file_containing_extension() {
+ if (!has_file_containing_extension()) {
+ clear_message_request();
+ set_has_file_containing_extension();
+ message_request_.file_containing_extension_ = new ::grpc::reflection::v1alpha::ExtensionRequest;
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension)
+ return message_request_.file_containing_extension_;
+}
+::grpc::reflection::v1alpha::ExtensionRequest* ServerReflectionRequest::release_file_containing_extension() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension)
+ if (has_file_containing_extension()) {
+ clear_has_message_request();
+ ::grpc::reflection::v1alpha::ExtensionRequest* temp = message_request_.file_containing_extension_;
+ message_request_.file_containing_extension_ = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+void ServerReflectionRequest::set_allocated_file_containing_extension(::grpc::reflection::v1alpha::ExtensionRequest* file_containing_extension) {
+ clear_message_request();
+ if (file_containing_extension) {
+ set_has_file_containing_extension();
+ message_request_.file_containing_extension_ = file_containing_extension;
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension)
+}
+
+// optional string all_extension_numbers_of_type = 6;
+bool ServerReflectionRequest::has_all_extension_numbers_of_type() const {
+ return message_request_case() == kAllExtensionNumbersOfType;
+}
+void ServerReflectionRequest::set_has_all_extension_numbers_of_type() {
+ _oneof_case_[0] = kAllExtensionNumbersOfType;
+}
+void ServerReflectionRequest::clear_all_extension_numbers_of_type() {
+ if (has_all_extension_numbers_of_type()) {
+ message_request_.all_extension_numbers_of_type_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ clear_has_message_request();
+ }
+}
+ const ::std::string& ServerReflectionRequest::all_extension_numbers_of_type() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+ if (has_all_extension_numbers_of_type()) {
+ return message_request_.all_extension_numbers_of_type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ return *&::google::protobuf::internal::GetEmptyStringAlreadyInited();
+}
+ void ServerReflectionRequest::set_all_extension_numbers_of_type(const ::std::string& value) {
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+ if (!has_all_extension_numbers_of_type()) {
+ clear_message_request();
+ set_has_all_extension_numbers_of_type();
+ message_request_.all_extension_numbers_of_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.all_extension_numbers_of_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+}
+ void ServerReflectionRequest::set_all_extension_numbers_of_type(const char* value) {
+ if (!has_all_extension_numbers_of_type()) {
+ clear_message_request();
+ set_has_all_extension_numbers_of_type();
+ message_request_.all_extension_numbers_of_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.all_extension_numbers_of_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+}
+ void ServerReflectionRequest::set_all_extension_numbers_of_type(const char* value, size_t size) {
+ if (!has_all_extension_numbers_of_type()) {
+ clear_message_request();
+ set_has_all_extension_numbers_of_type();
+ message_request_.all_extension_numbers_of_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.all_extension_numbers_of_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
+ reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+}
+ ::std::string* ServerReflectionRequest::mutable_all_extension_numbers_of_type() {
+ if (!has_all_extension_numbers_of_type()) {
+ clear_message_request();
+ set_has_all_extension_numbers_of_type();
+ message_request_.all_extension_numbers_of_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+ return message_request_.all_extension_numbers_of_type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServerReflectionRequest::release_all_extension_numbers_of_type() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+ if (has_all_extension_numbers_of_type()) {
+ clear_has_message_request();
+ return message_request_.all_extension_numbers_of_type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ } else {
+ return NULL;
+ }
+}
+ void ServerReflectionRequest::set_allocated_all_extension_numbers_of_type(::std::string* all_extension_numbers_of_type) {
+ if (!has_all_extension_numbers_of_type()) {
+ message_request_.all_extension_numbers_of_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ clear_message_request();
+ if (all_extension_numbers_of_type != NULL) {
+ set_has_all_extension_numbers_of_type();
+ message_request_.all_extension_numbers_of_type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ all_extension_numbers_of_type);
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionRequest.all_extension_numbers_of_type)
+}
+
+// optional string list_services = 7;
+bool ServerReflectionRequest::has_list_services() const {
+ return message_request_case() == kListServices;
+}
+void ServerReflectionRequest::set_has_list_services() {
+ _oneof_case_[0] = kListServices;
+}
+void ServerReflectionRequest::clear_list_services() {
+ if (has_list_services()) {
+ message_request_.list_services_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ clear_has_message_request();
+ }
+}
+ const ::std::string& ServerReflectionRequest::list_services() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+ if (has_list_services()) {
+ return message_request_.list_services_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ return *&::google::protobuf::internal::GetEmptyStringAlreadyInited();
+}
+ void ServerReflectionRequest::set_list_services(const ::std::string& value) {
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+ if (!has_list_services()) {
+ clear_message_request();
+ set_has_list_services();
+ message_request_.list_services_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.list_services_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+}
+ void ServerReflectionRequest::set_list_services(const char* value) {
+ if (!has_list_services()) {
+ clear_message_request();
+ set_has_list_services();
+ message_request_.list_services_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.list_services_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+}
+ void ServerReflectionRequest::set_list_services(const char* value, size_t size) {
+ if (!has_list_services()) {
+ clear_message_request();
+ set_has_list_services();
+ message_request_.list_services_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ message_request_.list_services_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(
+ reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+}
+ ::std::string* ServerReflectionRequest::mutable_list_services() {
+ if (!has_list_services()) {
+ clear_message_request();
+ set_has_list_services();
+ message_request_.list_services_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+ return message_request_.list_services_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServerReflectionRequest::release_list_services() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+ if (has_list_services()) {
+ clear_has_message_request();
+ return message_request_.list_services_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ } else {
+ return NULL;
+ }
+}
+ void ServerReflectionRequest::set_allocated_list_services(::std::string* list_services) {
+ if (!has_list_services()) {
+ message_request_.list_services_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ clear_message_request();
+ if (list_services != NULL) {
+ set_has_list_services();
+ message_request_.list_services_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ list_services);
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionRequest.list_services)
+}
+
+bool ServerReflectionRequest::has_message_request() const {
+ return message_request_case() != MESSAGE_REQUEST_NOT_SET;
+}
+void ServerReflectionRequest::clear_has_message_request() {
+ _oneof_case_[0] = MESSAGE_REQUEST_NOT_SET;
+}
+ServerReflectionRequest::MessageRequestCase ServerReflectionRequest::message_request_case() const {
+ return ServerReflectionRequest::MessageRequestCase(_oneof_case_[0]);
+}
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ExtensionRequest::kContainingTypeFieldNumber;
+const int ExtensionRequest::kExtensionNumberFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ExtensionRequest::ExtensionRequest()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ExtensionRequest)
+}
+
+void ExtensionRequest::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+}
+
+ExtensionRequest::ExtensionRequest(const ExtensionRequest& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ExtensionRequest)
+}
+
+void ExtensionRequest::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ containing_type_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ extension_number_ = 0;
+}
+
+ExtensionRequest::~ExtensionRequest() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ExtensionRequest)
+ SharedDtor();
+}
+
+void ExtensionRequest::SharedDtor() {
+ containing_type_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (this != default_instance_) {
+ }
+}
+
+void ExtensionRequest::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ExtensionRequest::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ExtensionRequest_descriptor_;
+}
+
+const ExtensionRequest& ExtensionRequest::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ExtensionRequest* ExtensionRequest::default_instance_ = NULL;
+
+ExtensionRequest* ExtensionRequest::New(::google::protobuf::Arena* arena) const {
+ ExtensionRequest* n = new ExtensionRequest;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ExtensionRequest::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ExtensionRequest)
+ containing_type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ extension_number_ = 0;
+}
+
+bool ExtensionRequest::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ExtensionRequest)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string containing_type = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_containing_type()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->containing_type().data(), this->containing_type().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ExtensionRequest.containing_type"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(16)) goto parse_extension_number;
+ break;
+ }
+
+ // optional int32 extension_number = 2;
+ case 2: {
+ if (tag == 16) {
+ parse_extension_number:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &extension_number_)));
+
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ExtensionRequest)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ExtensionRequest)
+ return false;
+#undef DO_
+}
+
+void ExtensionRequest::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ExtensionRequest)
+ // optional string containing_type = 1;
+ if (this->containing_type().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->containing_type().data(), this->containing_type().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ExtensionRequest.containing_type");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->containing_type(), output);
+ }
+
+ // optional int32 extension_number = 2;
+ if (this->extension_number() != 0) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->extension_number(), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ExtensionRequest)
+}
+
+::google::protobuf::uint8* ExtensionRequest::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ExtensionRequest)
+ // optional string containing_type = 1;
+ if (this->containing_type().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->containing_type().data(), this->containing_type().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ExtensionRequest.containing_type");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->containing_type(), target);
+ }
+
+ // optional int32 extension_number = 2;
+ if (this->extension_number() != 0) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(2, this->extension_number(), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ExtensionRequest)
+ return target;
+}
+
+int ExtensionRequest::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ExtensionRequest)
+ int total_size = 0;
+
+ // optional string containing_type = 1;
+ if (this->containing_type().size() > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->containing_type());
+ }
+
+ // optional int32 extension_number = 2;
+ if (this->extension_number() != 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->extension_number());
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ExtensionRequest::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ExtensionRequest)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ExtensionRequest* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ExtensionRequest>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ExtensionRequest)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ExtensionRequest)
+ MergeFrom(*source);
+ }
+}
+
+void ExtensionRequest::MergeFrom(const ExtensionRequest& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ExtensionRequest)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ if (from.containing_type().size() > 0) {
+
+ containing_type_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.containing_type_);
+ }
+ if (from.extension_number() != 0) {
+ set_extension_number(from.extension_number());
+ }
+}
+
+void ExtensionRequest::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ExtensionRequest)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ExtensionRequest::CopyFrom(const ExtensionRequest& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ExtensionRequest)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ExtensionRequest::IsInitialized() const {
+
+ return true;
+}
+
+void ExtensionRequest::Swap(ExtensionRequest* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ExtensionRequest::InternalSwap(ExtensionRequest* other) {
+ containing_type_.Swap(&other->containing_type_);
+ std::swap(extension_number_, other->extension_number_);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ExtensionRequest::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ExtensionRequest_descriptor_;
+ metadata.reflection = ExtensionRequest_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ExtensionRequest
+
+// optional string containing_type = 1;
+void ExtensionRequest::clear_containing_type() {
+ containing_type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ const ::std::string& ExtensionRequest::containing_type() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+ return containing_type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ExtensionRequest::set_containing_type(const ::std::string& value) {
+
+ containing_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+}
+ void ExtensionRequest::set_containing_type(const char* value) {
+
+ containing_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+}
+ void ExtensionRequest::set_containing_type(const char* value, size_t size) {
+
+ containing_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+}
+ ::std::string* ExtensionRequest::mutable_containing_type() {
+
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+ return containing_type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ExtensionRequest::release_containing_type() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+
+ return containing_type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ExtensionRequest::set_allocated_containing_type(::std::string* containing_type) {
+ if (containing_type != NULL) {
+
+ } else {
+
+ }
+ containing_type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), containing_type);
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ExtensionRequest.containing_type)
+}
+
+// optional int32 extension_number = 2;
+void ExtensionRequest::clear_extension_number() {
+ extension_number_ = 0;
+}
+ ::google::protobuf::int32 ExtensionRequest::extension_number() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ExtensionRequest.extension_number)
+ return extension_number_;
+}
+ void ExtensionRequest::set_extension_number(::google::protobuf::int32 value) {
+
+ extension_number_ = value;
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ExtensionRequest.extension_number)
+}
+
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ServerReflectionResponse::kValidHostFieldNumber;
+const int ServerReflectionResponse::kOriginalRequestFieldNumber;
+const int ServerReflectionResponse::kFileDescriptorResponseFieldNumber;
+const int ServerReflectionResponse::kAllExtensionNumbersResponseFieldNumber;
+const int ServerReflectionResponse::kListServicesResponseFieldNumber;
+const int ServerReflectionResponse::kErrorResponseFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ServerReflectionResponse::ServerReflectionResponse()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ServerReflectionResponse)
+}
+
+void ServerReflectionResponse::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+ original_request_ = const_cast< ::grpc::reflection::v1alpha::ServerReflectionRequest*>(&::grpc::reflection::v1alpha::ServerReflectionRequest::default_instance());
+ ServerReflectionResponse_default_oneof_instance_->file_descriptor_response_ = const_cast< ::grpc::reflection::v1alpha::FileDescriptorResponse*>(&::grpc::reflection::v1alpha::FileDescriptorResponse::default_instance());
+ ServerReflectionResponse_default_oneof_instance_->all_extension_numbers_response_ = const_cast< ::grpc::reflection::v1alpha::ExtensionNumberResponse*>(&::grpc::reflection::v1alpha::ExtensionNumberResponse::default_instance());
+ ServerReflectionResponse_default_oneof_instance_->list_services_response_ = const_cast< ::grpc::reflection::v1alpha::ListServiceResponse*>(&::grpc::reflection::v1alpha::ListServiceResponse::default_instance());
+ ServerReflectionResponse_default_oneof_instance_->error_response_ = const_cast< ::grpc::reflection::v1alpha::ErrorResponse*>(&::grpc::reflection::v1alpha::ErrorResponse::default_instance());
+}
+
+ServerReflectionResponse::ServerReflectionResponse(const ServerReflectionResponse& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ServerReflectionResponse)
+}
+
+void ServerReflectionResponse::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ valid_host_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ original_request_ = NULL;
+ clear_has_message_response();
+}
+
+ServerReflectionResponse::~ServerReflectionResponse() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ServerReflectionResponse)
+ SharedDtor();
+}
+
+void ServerReflectionResponse::SharedDtor() {
+ valid_host_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (has_message_response()) {
+ clear_message_response();
+ }
+ if (this != default_instance_) {
+ delete original_request_;
+ }
+}
+
+void ServerReflectionResponse::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ServerReflectionResponse::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ServerReflectionResponse_descriptor_;
+}
+
+const ServerReflectionResponse& ServerReflectionResponse::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ServerReflectionResponse* ServerReflectionResponse::default_instance_ = NULL;
+
+ServerReflectionResponse* ServerReflectionResponse::New(::google::protobuf::Arena* arena) const {
+ ServerReflectionResponse* n = new ServerReflectionResponse;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ServerReflectionResponse::clear_message_response() {
+// @@protoc_insertion_point(one_of_clear_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ switch(message_response_case()) {
+ case kFileDescriptorResponse: {
+ delete message_response_.file_descriptor_response_;
+ break;
+ }
+ case kAllExtensionNumbersResponse: {
+ delete message_response_.all_extension_numbers_response_;
+ break;
+ }
+ case kListServicesResponse: {
+ delete message_response_.list_services_response_;
+ break;
+ }
+ case kErrorResponse: {
+ delete message_response_.error_response_;
+ break;
+ }
+ case MESSAGE_RESPONSE_NOT_SET: {
+ break;
+ }
+ }
+ _oneof_case_[0] = MESSAGE_RESPONSE_NOT_SET;
+}
+
+
+void ServerReflectionResponse::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ valid_host_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (GetArenaNoVirtual() == NULL && original_request_ != NULL) delete original_request_;
+ original_request_ = NULL;
+ clear_message_response();
+}
+
+bool ServerReflectionResponse::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string valid_host = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_valid_host()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->valid_host().data(), this->valid_host().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServerReflectionResponse.valid_host"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(18)) goto parse_original_request;
+ break;
+ }
+
+ // optional .grpc.reflection.v1alpha.ServerReflectionRequest original_request = 2;
+ case 2: {
+ if (tag == 18) {
+ parse_original_request:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_original_request()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(34)) goto parse_file_descriptor_response;
+ break;
+ }
+
+ // optional .grpc.reflection.v1alpha.FileDescriptorResponse file_descriptor_response = 4;
+ case 4: {
+ if (tag == 34) {
+ parse_file_descriptor_response:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_file_descriptor_response()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(42)) goto parse_all_extension_numbers_response;
+ break;
+ }
+
+ // optional .grpc.reflection.v1alpha.ExtensionNumberResponse all_extension_numbers_response = 5;
+ case 5: {
+ if (tag == 42) {
+ parse_all_extension_numbers_response:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_all_extension_numbers_response()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(50)) goto parse_list_services_response;
+ break;
+ }
+
+ // optional .grpc.reflection.v1alpha.ListServiceResponse list_services_response = 6;
+ case 6: {
+ if (tag == 50) {
+ parse_list_services_response:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_list_services_response()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(58)) goto parse_error_response;
+ break;
+ }
+
+ // optional .grpc.reflection.v1alpha.ErrorResponse error_response = 7;
+ case 7: {
+ if (tag == 58) {
+ parse_error_response:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_error_response()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ServerReflectionResponse)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ServerReflectionResponse)
+ return false;
+#undef DO_
+}
+
+void ServerReflectionResponse::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ // optional string valid_host = 1;
+ if (this->valid_host().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->valid_host().data(), this->valid_host().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionResponse.valid_host");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->valid_host(), output);
+ }
+
+ // optional .grpc.reflection.v1alpha.ServerReflectionRequest original_request = 2;
+ if (this->has_original_request()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 2, *this->original_request_, output);
+ }
+
+ // optional .grpc.reflection.v1alpha.FileDescriptorResponse file_descriptor_response = 4;
+ if (has_file_descriptor_response()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 4, *message_response_.file_descriptor_response_, output);
+ }
+
+ // optional .grpc.reflection.v1alpha.ExtensionNumberResponse all_extension_numbers_response = 5;
+ if (has_all_extension_numbers_response()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 5, *message_response_.all_extension_numbers_response_, output);
+ }
+
+ // optional .grpc.reflection.v1alpha.ListServiceResponse list_services_response = 6;
+ if (has_list_services_response()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 6, *message_response_.list_services_response_, output);
+ }
+
+ // optional .grpc.reflection.v1alpha.ErrorResponse error_response = 7;
+ if (has_error_response()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 7, *message_response_.error_response_, output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ServerReflectionResponse)
+}
+
+::google::protobuf::uint8* ServerReflectionResponse::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ // optional string valid_host = 1;
+ if (this->valid_host().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->valid_host().data(), this->valid_host().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServerReflectionResponse.valid_host");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->valid_host(), target);
+ }
+
+ // optional .grpc.reflection.v1alpha.ServerReflectionRequest original_request = 2;
+ if (this->has_original_request()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 2, *this->original_request_, target);
+ }
+
+ // optional .grpc.reflection.v1alpha.FileDescriptorResponse file_descriptor_response = 4;
+ if (has_file_descriptor_response()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 4, *message_response_.file_descriptor_response_, target);
+ }
+
+ // optional .grpc.reflection.v1alpha.ExtensionNumberResponse all_extension_numbers_response = 5;
+ if (has_all_extension_numbers_response()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 5, *message_response_.all_extension_numbers_response_, target);
+ }
+
+ // optional .grpc.reflection.v1alpha.ListServiceResponse list_services_response = 6;
+ if (has_list_services_response()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 6, *message_response_.list_services_response_, target);
+ }
+
+ // optional .grpc.reflection.v1alpha.ErrorResponse error_response = 7;
+ if (has_error_response()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 7, *message_response_.error_response_, target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ServerReflectionResponse)
+ return target;
+}
+
+int ServerReflectionResponse::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ int total_size = 0;
+
+ // optional string valid_host = 1;
+ if (this->valid_host().size() > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->valid_host());
+ }
+
+ // optional .grpc.reflection.v1alpha.ServerReflectionRequest original_request = 2;
+ if (this->has_original_request()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ *this->original_request_);
+ }
+
+ switch (message_response_case()) {
+ // optional .grpc.reflection.v1alpha.FileDescriptorResponse file_descriptor_response = 4;
+ case kFileDescriptorResponse: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ *message_response_.file_descriptor_response_);
+ break;
+ }
+ // optional .grpc.reflection.v1alpha.ExtensionNumberResponse all_extension_numbers_response = 5;
+ case kAllExtensionNumbersResponse: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ *message_response_.all_extension_numbers_response_);
+ break;
+ }
+ // optional .grpc.reflection.v1alpha.ListServiceResponse list_services_response = 6;
+ case kListServicesResponse: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ *message_response_.list_services_response_);
+ break;
+ }
+ // optional .grpc.reflection.v1alpha.ErrorResponse error_response = 7;
+ case kErrorResponse: {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ *message_response_.error_response_);
+ break;
+ }
+ case MESSAGE_RESPONSE_NOT_SET: {
+ break;
+ }
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ServerReflectionResponse::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ServerReflectionResponse* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ServerReflectionResponse>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ServerReflectionResponse)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ServerReflectionResponse)
+ MergeFrom(*source);
+ }
+}
+
+void ServerReflectionResponse::MergeFrom(const ServerReflectionResponse& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ switch (from.message_response_case()) {
+ case kFileDescriptorResponse: {
+ mutable_file_descriptor_response()->::grpc::reflection::v1alpha::FileDescriptorResponse::MergeFrom(from.file_descriptor_response());
+ break;
+ }
+ case kAllExtensionNumbersResponse: {
+ mutable_all_extension_numbers_response()->::grpc::reflection::v1alpha::ExtensionNumberResponse::MergeFrom(from.all_extension_numbers_response());
+ break;
+ }
+ case kListServicesResponse: {
+ mutable_list_services_response()->::grpc::reflection::v1alpha::ListServiceResponse::MergeFrom(from.list_services_response());
+ break;
+ }
+ case kErrorResponse: {
+ mutable_error_response()->::grpc::reflection::v1alpha::ErrorResponse::MergeFrom(from.error_response());
+ break;
+ }
+ case MESSAGE_RESPONSE_NOT_SET: {
+ break;
+ }
+ }
+ if (from.valid_host().size() > 0) {
+
+ valid_host_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.valid_host_);
+ }
+ if (from.has_original_request()) {
+ mutable_original_request()->::grpc::reflection::v1alpha::ServerReflectionRequest::MergeFrom(from.original_request());
+ }
+}
+
+void ServerReflectionResponse::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ServerReflectionResponse::CopyFrom(const ServerReflectionResponse& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ServerReflectionResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ServerReflectionResponse::IsInitialized() const {
+
+ return true;
+}
+
+void ServerReflectionResponse::Swap(ServerReflectionResponse* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ServerReflectionResponse::InternalSwap(ServerReflectionResponse* other) {
+ valid_host_.Swap(&other->valid_host_);
+ std::swap(original_request_, other->original_request_);
+ std::swap(message_response_, other->message_response_);
+ std::swap(_oneof_case_[0], other->_oneof_case_[0]);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ServerReflectionResponse::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ServerReflectionResponse_descriptor_;
+ metadata.reflection = ServerReflectionResponse_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ServerReflectionResponse
+
+// optional string valid_host = 1;
+void ServerReflectionResponse::clear_valid_host() {
+ valid_host_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ const ::std::string& ServerReflectionResponse::valid_host() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+ return valid_host_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ServerReflectionResponse::set_valid_host(const ::std::string& value) {
+
+ valid_host_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+}
+ void ServerReflectionResponse::set_valid_host(const char* value) {
+
+ valid_host_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+}
+ void ServerReflectionResponse::set_valid_host(const char* value, size_t size) {
+
+ valid_host_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+}
+ ::std::string* ServerReflectionResponse::mutable_valid_host() {
+
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+ return valid_host_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServerReflectionResponse::release_valid_host() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+
+ return valid_host_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ServerReflectionResponse::set_allocated_valid_host(::std::string* valid_host) {
+ if (valid_host != NULL) {
+
+ } else {
+
+ }
+ valid_host_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), valid_host);
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionResponse.valid_host)
+}
+
+// optional .grpc.reflection.v1alpha.ServerReflectionRequest original_request = 2;
+bool ServerReflectionResponse::has_original_request() const {
+ return !_is_default_instance_ && original_request_ != NULL;
+}
+void ServerReflectionResponse::clear_original_request() {
+ if (GetArenaNoVirtual() == NULL && original_request_ != NULL) delete original_request_;
+ original_request_ = NULL;
+}
+const ::grpc::reflection::v1alpha::ServerReflectionRequest& ServerReflectionResponse::original_request() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionResponse.original_request)
+ return original_request_ != NULL ? *original_request_ : *default_instance_->original_request_;
+}
+::grpc::reflection::v1alpha::ServerReflectionRequest* ServerReflectionResponse::mutable_original_request() {
+
+ if (original_request_ == NULL) {
+ original_request_ = new ::grpc::reflection::v1alpha::ServerReflectionRequest;
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionResponse.original_request)
+ return original_request_;
+}
+::grpc::reflection::v1alpha::ServerReflectionRequest* ServerReflectionResponse::release_original_request() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionResponse.original_request)
+
+ ::grpc::reflection::v1alpha::ServerReflectionRequest* temp = original_request_;
+ original_request_ = NULL;
+ return temp;
+}
+void ServerReflectionResponse::set_allocated_original_request(::grpc::reflection::v1alpha::ServerReflectionRequest* original_request) {
+ delete original_request_;
+ original_request_ = original_request;
+ if (original_request) {
+
+ } else {
+
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionResponse.original_request)
+}
+
+// optional .grpc.reflection.v1alpha.FileDescriptorResponse file_descriptor_response = 4;
+bool ServerReflectionResponse::has_file_descriptor_response() const {
+ return message_response_case() == kFileDescriptorResponse;
+}
+void ServerReflectionResponse::set_has_file_descriptor_response() {
+ _oneof_case_[0] = kFileDescriptorResponse;
+}
+void ServerReflectionResponse::clear_file_descriptor_response() {
+ if (has_file_descriptor_response()) {
+ delete message_response_.file_descriptor_response_;
+ clear_has_message_response();
+ }
+}
+ const ::grpc::reflection::v1alpha::FileDescriptorResponse& ServerReflectionResponse::file_descriptor_response() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response)
+ return has_file_descriptor_response()
+ ? *message_response_.file_descriptor_response_
+ : ::grpc::reflection::v1alpha::FileDescriptorResponse::default_instance();
+}
+::grpc::reflection::v1alpha::FileDescriptorResponse* ServerReflectionResponse::mutable_file_descriptor_response() {
+ if (!has_file_descriptor_response()) {
+ clear_message_response();
+ set_has_file_descriptor_response();
+ message_response_.file_descriptor_response_ = new ::grpc::reflection::v1alpha::FileDescriptorResponse;
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response)
+ return message_response_.file_descriptor_response_;
+}
+::grpc::reflection::v1alpha::FileDescriptorResponse* ServerReflectionResponse::release_file_descriptor_response() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response)
+ if (has_file_descriptor_response()) {
+ clear_has_message_response();
+ ::grpc::reflection::v1alpha::FileDescriptorResponse* temp = message_response_.file_descriptor_response_;
+ message_response_.file_descriptor_response_ = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+void ServerReflectionResponse::set_allocated_file_descriptor_response(::grpc::reflection::v1alpha::FileDescriptorResponse* file_descriptor_response) {
+ clear_message_response();
+ if (file_descriptor_response) {
+ set_has_file_descriptor_response();
+ message_response_.file_descriptor_response_ = file_descriptor_response;
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response)
+}
+
+// optional .grpc.reflection.v1alpha.ExtensionNumberResponse all_extension_numbers_response = 5;
+bool ServerReflectionResponse::has_all_extension_numbers_response() const {
+ return message_response_case() == kAllExtensionNumbersResponse;
+}
+void ServerReflectionResponse::set_has_all_extension_numbers_response() {
+ _oneof_case_[0] = kAllExtensionNumbersResponse;
+}
+void ServerReflectionResponse::clear_all_extension_numbers_response() {
+ if (has_all_extension_numbers_response()) {
+ delete message_response_.all_extension_numbers_response_;
+ clear_has_message_response();
+ }
+}
+ const ::grpc::reflection::v1alpha::ExtensionNumberResponse& ServerReflectionResponse::all_extension_numbers_response() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response)
+ return has_all_extension_numbers_response()
+ ? *message_response_.all_extension_numbers_response_
+ : ::grpc::reflection::v1alpha::ExtensionNumberResponse::default_instance();
+}
+::grpc::reflection::v1alpha::ExtensionNumberResponse* ServerReflectionResponse::mutable_all_extension_numbers_response() {
+ if (!has_all_extension_numbers_response()) {
+ clear_message_response();
+ set_has_all_extension_numbers_response();
+ message_response_.all_extension_numbers_response_ = new ::grpc::reflection::v1alpha::ExtensionNumberResponse;
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response)
+ return message_response_.all_extension_numbers_response_;
+}
+::grpc::reflection::v1alpha::ExtensionNumberResponse* ServerReflectionResponse::release_all_extension_numbers_response() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response)
+ if (has_all_extension_numbers_response()) {
+ clear_has_message_response();
+ ::grpc::reflection::v1alpha::ExtensionNumberResponse* temp = message_response_.all_extension_numbers_response_;
+ message_response_.all_extension_numbers_response_ = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+void ServerReflectionResponse::set_allocated_all_extension_numbers_response(::grpc::reflection::v1alpha::ExtensionNumberResponse* all_extension_numbers_response) {
+ clear_message_response();
+ if (all_extension_numbers_response) {
+ set_has_all_extension_numbers_response();
+ message_response_.all_extension_numbers_response_ = all_extension_numbers_response;
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response)
+}
+
+// optional .grpc.reflection.v1alpha.ListServiceResponse list_services_response = 6;
+bool ServerReflectionResponse::has_list_services_response() const {
+ return message_response_case() == kListServicesResponse;
+}
+void ServerReflectionResponse::set_has_list_services_response() {
+ _oneof_case_[0] = kListServicesResponse;
+}
+void ServerReflectionResponse::clear_list_services_response() {
+ if (has_list_services_response()) {
+ delete message_response_.list_services_response_;
+ clear_has_message_response();
+ }
+}
+ const ::grpc::reflection::v1alpha::ListServiceResponse& ServerReflectionResponse::list_services_response() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response)
+ return has_list_services_response()
+ ? *message_response_.list_services_response_
+ : ::grpc::reflection::v1alpha::ListServiceResponse::default_instance();
+}
+::grpc::reflection::v1alpha::ListServiceResponse* ServerReflectionResponse::mutable_list_services_response() {
+ if (!has_list_services_response()) {
+ clear_message_response();
+ set_has_list_services_response();
+ message_response_.list_services_response_ = new ::grpc::reflection::v1alpha::ListServiceResponse;
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response)
+ return message_response_.list_services_response_;
+}
+::grpc::reflection::v1alpha::ListServiceResponse* ServerReflectionResponse::release_list_services_response() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response)
+ if (has_list_services_response()) {
+ clear_has_message_response();
+ ::grpc::reflection::v1alpha::ListServiceResponse* temp = message_response_.list_services_response_;
+ message_response_.list_services_response_ = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+void ServerReflectionResponse::set_allocated_list_services_response(::grpc::reflection::v1alpha::ListServiceResponse* list_services_response) {
+ clear_message_response();
+ if (list_services_response) {
+ set_has_list_services_response();
+ message_response_.list_services_response_ = list_services_response;
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response)
+}
+
+// optional .grpc.reflection.v1alpha.ErrorResponse error_response = 7;
+bool ServerReflectionResponse::has_error_response() const {
+ return message_response_case() == kErrorResponse;
+}
+void ServerReflectionResponse::set_has_error_response() {
+ _oneof_case_[0] = kErrorResponse;
+}
+void ServerReflectionResponse::clear_error_response() {
+ if (has_error_response()) {
+ delete message_response_.error_response_;
+ clear_has_message_response();
+ }
+}
+ const ::grpc::reflection::v1alpha::ErrorResponse& ServerReflectionResponse::error_response() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServerReflectionResponse.error_response)
+ return has_error_response()
+ ? *message_response_.error_response_
+ : ::grpc::reflection::v1alpha::ErrorResponse::default_instance();
+}
+::grpc::reflection::v1alpha::ErrorResponse* ServerReflectionResponse::mutable_error_response() {
+ if (!has_error_response()) {
+ clear_message_response();
+ set_has_error_response();
+ message_response_.error_response_ = new ::grpc::reflection::v1alpha::ErrorResponse;
+ }
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServerReflectionResponse.error_response)
+ return message_response_.error_response_;
+}
+::grpc::reflection::v1alpha::ErrorResponse* ServerReflectionResponse::release_error_response() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServerReflectionResponse.error_response)
+ if (has_error_response()) {
+ clear_has_message_response();
+ ::grpc::reflection::v1alpha::ErrorResponse* temp = message_response_.error_response_;
+ message_response_.error_response_ = NULL;
+ return temp;
+ } else {
+ return NULL;
+ }
+}
+void ServerReflectionResponse::set_allocated_error_response(::grpc::reflection::v1alpha::ErrorResponse* error_response) {
+ clear_message_response();
+ if (error_response) {
+ set_has_error_response();
+ message_response_.error_response_ = error_response;
+ }
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServerReflectionResponse.error_response)
+}
+
+bool ServerReflectionResponse::has_message_response() const {
+ return message_response_case() != MESSAGE_RESPONSE_NOT_SET;
+}
+void ServerReflectionResponse::clear_has_message_response() {
+ _oneof_case_[0] = MESSAGE_RESPONSE_NOT_SET;
+}
+ServerReflectionResponse::MessageResponseCase ServerReflectionResponse::message_response_case() const {
+ return ServerReflectionResponse::MessageResponseCase(_oneof_case_[0]);
+}
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int FileDescriptorResponse::kFileDescriptorProtoFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+FileDescriptorResponse::FileDescriptorResponse()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.FileDescriptorResponse)
+}
+
+void FileDescriptorResponse::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+}
+
+FileDescriptorResponse::FileDescriptorResponse(const FileDescriptorResponse& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.FileDescriptorResponse)
+}
+
+void FileDescriptorResponse::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+}
+
+FileDescriptorResponse::~FileDescriptorResponse() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.FileDescriptorResponse)
+ SharedDtor();
+}
+
+void FileDescriptorResponse::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void FileDescriptorResponse::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* FileDescriptorResponse::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return FileDescriptorResponse_descriptor_;
+}
+
+const FileDescriptorResponse& FileDescriptorResponse::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+FileDescriptorResponse* FileDescriptorResponse::default_instance_ = NULL;
+
+FileDescriptorResponse* FileDescriptorResponse::New(::google::protobuf::Arena* arena) const {
+ FileDescriptorResponse* n = new FileDescriptorResponse;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void FileDescriptorResponse::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ file_descriptor_proto_.Clear();
+}
+
+bool FileDescriptorResponse::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // repeated bytes file_descriptor_proto = 1;
+ case 1: {
+ if (tag == 10) {
+ parse_file_descriptor_proto:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadBytes(
+ input, this->add_file_descriptor_proto()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(10)) goto parse_file_descriptor_proto;
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.FileDescriptorResponse)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.FileDescriptorResponse)
+ return false;
+#undef DO_
+}
+
+void FileDescriptorResponse::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ // repeated bytes file_descriptor_proto = 1;
+ for (int i = 0; i < this->file_descriptor_proto_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteBytes(
+ 1, this->file_descriptor_proto(i), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.FileDescriptorResponse)
+}
+
+::google::protobuf::uint8* FileDescriptorResponse::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ // repeated bytes file_descriptor_proto = 1;
+ for (int i = 0; i < this->file_descriptor_proto_size(); i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteBytesToArray(1, this->file_descriptor_proto(i), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.FileDescriptorResponse)
+ return target;
+}
+
+int FileDescriptorResponse::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ int total_size = 0;
+
+ // repeated bytes file_descriptor_proto = 1;
+ total_size += 1 * this->file_descriptor_proto_size();
+ for (int i = 0; i < this->file_descriptor_proto_size(); i++) {
+ total_size += ::google::protobuf::internal::WireFormatLite::BytesSize(
+ this->file_descriptor_proto(i));
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void FileDescriptorResponse::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const FileDescriptorResponse* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const FileDescriptorResponse>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.FileDescriptorResponse)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.FileDescriptorResponse)
+ MergeFrom(*source);
+ }
+}
+
+void FileDescriptorResponse::MergeFrom(const FileDescriptorResponse& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ file_descriptor_proto_.MergeFrom(from.file_descriptor_proto_);
+}
+
+void FileDescriptorResponse::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void FileDescriptorResponse::CopyFrom(const FileDescriptorResponse& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.FileDescriptorResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool FileDescriptorResponse::IsInitialized() const {
+
+ return true;
+}
+
+void FileDescriptorResponse::Swap(FileDescriptorResponse* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void FileDescriptorResponse::InternalSwap(FileDescriptorResponse* other) {
+ file_descriptor_proto_.UnsafeArenaSwap(&other->file_descriptor_proto_);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata FileDescriptorResponse::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = FileDescriptorResponse_descriptor_;
+ metadata.reflection = FileDescriptorResponse_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// FileDescriptorResponse
+
+// repeated bytes file_descriptor_proto = 1;
+int FileDescriptorResponse::file_descriptor_proto_size() const {
+ return file_descriptor_proto_.size();
+}
+void FileDescriptorResponse::clear_file_descriptor_proto() {
+ file_descriptor_proto_.Clear();
+}
+ const ::std::string& FileDescriptorResponse::file_descriptor_proto(int index) const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+ return file_descriptor_proto_.Get(index);
+}
+ ::std::string* FileDescriptorResponse::mutable_file_descriptor_proto(int index) {
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+ return file_descriptor_proto_.Mutable(index);
+}
+ void FileDescriptorResponse::set_file_descriptor_proto(int index, const ::std::string& value) {
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+ file_descriptor_proto_.Mutable(index)->assign(value);
+}
+ void FileDescriptorResponse::set_file_descriptor_proto(int index, const char* value) {
+ file_descriptor_proto_.Mutable(index)->assign(value);
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+}
+ void FileDescriptorResponse::set_file_descriptor_proto(int index, const void* value, size_t size) {
+ file_descriptor_proto_.Mutable(index)->assign(
+ reinterpret_cast<const char*>(value), size);
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+}
+ ::std::string* FileDescriptorResponse::add_file_descriptor_proto() {
+ // @@protoc_insertion_point(field_add_mutable:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+ return file_descriptor_proto_.Add();
+}
+ void FileDescriptorResponse::add_file_descriptor_proto(const ::std::string& value) {
+ file_descriptor_proto_.Add()->assign(value);
+ // @@protoc_insertion_point(field_add:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+}
+ void FileDescriptorResponse::add_file_descriptor_proto(const char* value) {
+ file_descriptor_proto_.Add()->assign(value);
+ // @@protoc_insertion_point(field_add_char:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+}
+ void FileDescriptorResponse::add_file_descriptor_proto(const void* value, size_t size) {
+ file_descriptor_proto_.Add()->assign(reinterpret_cast<const char*>(value), size);
+ // @@protoc_insertion_point(field_add_pointer:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+}
+ const ::google::protobuf::RepeatedPtrField< ::std::string>&
+FileDescriptorResponse::file_descriptor_proto() const {
+ // @@protoc_insertion_point(field_list:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+ return file_descriptor_proto_;
+}
+ ::google::protobuf::RepeatedPtrField< ::std::string>*
+FileDescriptorResponse::mutable_file_descriptor_proto() {
+ // @@protoc_insertion_point(field_mutable_list:grpc.reflection.v1alpha.FileDescriptorResponse.file_descriptor_proto)
+ return &file_descriptor_proto_;
+}
+
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ExtensionNumberResponse::kBaseTypeNameFieldNumber;
+const int ExtensionNumberResponse::kExtensionNumberFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ExtensionNumberResponse::ExtensionNumberResponse()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ExtensionNumberResponse)
+}
+
+void ExtensionNumberResponse::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+}
+
+ExtensionNumberResponse::ExtensionNumberResponse(const ExtensionNumberResponse& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ExtensionNumberResponse)
+}
+
+void ExtensionNumberResponse::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ base_type_name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+
+ExtensionNumberResponse::~ExtensionNumberResponse() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ SharedDtor();
+}
+
+void ExtensionNumberResponse::SharedDtor() {
+ base_type_name_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (this != default_instance_) {
+ }
+}
+
+void ExtensionNumberResponse::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ExtensionNumberResponse::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ExtensionNumberResponse_descriptor_;
+}
+
+const ExtensionNumberResponse& ExtensionNumberResponse::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ExtensionNumberResponse* ExtensionNumberResponse::default_instance_ = NULL;
+
+ExtensionNumberResponse* ExtensionNumberResponse::New(::google::protobuf::Arena* arena) const {
+ ExtensionNumberResponse* n = new ExtensionNumberResponse;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ExtensionNumberResponse::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ base_type_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ extension_number_.Clear();
+}
+
+bool ExtensionNumberResponse::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string base_type_name = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_base_type_name()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->base_type_name().data(), this->base_type_name().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(18)) goto parse_extension_number;
+ break;
+ }
+
+ // repeated int32 extension_number = 2;
+ case 2: {
+ if (tag == 18) {
+ parse_extension_number:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPackedPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, this->mutable_extension_number())));
+ } else if (tag == 16) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ 1, 18, input, this->mutable_extension_number())));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ return false;
+#undef DO_
+}
+
+void ExtensionNumberResponse::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ // optional string base_type_name = 1;
+ if (this->base_type_name().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->base_type_name().data(), this->base_type_name().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->base_type_name(), output);
+ }
+
+ // repeated int32 extension_number = 2;
+ if (this->extension_number_size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::WriteTag(2, ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
+ output->WriteVarint32(_extension_number_cached_byte_size_);
+ }
+ for (int i = 0; i < this->extension_number_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32NoTag(
+ this->extension_number(i), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ExtensionNumberResponse)
+}
+
+::google::protobuf::uint8* ExtensionNumberResponse::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ // optional string base_type_name = 1;
+ if (this->base_type_name().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->base_type_name().data(), this->base_type_name().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->base_type_name(), target);
+ }
+
+ // repeated int32 extension_number = 2;
+ if (this->extension_number_size() > 0) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteTagToArray(
+ 2,
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
+ target);
+ target = ::google::protobuf::io::CodedOutputStream::WriteVarint32ToArray(
+ _extension_number_cached_byte_size_, target);
+ }
+ for (int i = 0; i < this->extension_number_size(); i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteInt32NoTagToArray(this->extension_number(i), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ return target;
+}
+
+int ExtensionNumberResponse::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ int total_size = 0;
+
+ // optional string base_type_name = 1;
+ if (this->base_type_name().size() > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->base_type_name());
+ }
+
+ // repeated int32 extension_number = 2;
+ {
+ int data_size = 0;
+ for (int i = 0; i < this->extension_number_size(); i++) {
+ data_size += ::google::protobuf::internal::WireFormatLite::
+ Int32Size(this->extension_number(i));
+ }
+ if (data_size > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(data_size);
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _extension_number_cached_byte_size_ = data_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ total_size += data_size;
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ExtensionNumberResponse::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ExtensionNumberResponse* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ExtensionNumberResponse>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ MergeFrom(*source);
+ }
+}
+
+void ExtensionNumberResponse::MergeFrom(const ExtensionNumberResponse& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ extension_number_.MergeFrom(from.extension_number_);
+ if (from.base_type_name().size() > 0) {
+
+ base_type_name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.base_type_name_);
+ }
+}
+
+void ExtensionNumberResponse::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ExtensionNumberResponse::CopyFrom(const ExtensionNumberResponse& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ExtensionNumberResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ExtensionNumberResponse::IsInitialized() const {
+
+ return true;
+}
+
+void ExtensionNumberResponse::Swap(ExtensionNumberResponse* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ExtensionNumberResponse::InternalSwap(ExtensionNumberResponse* other) {
+ base_type_name_.Swap(&other->base_type_name_);
+ extension_number_.UnsafeArenaSwap(&other->extension_number_);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ExtensionNumberResponse::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ExtensionNumberResponse_descriptor_;
+ metadata.reflection = ExtensionNumberResponse_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ExtensionNumberResponse
+
+// optional string base_type_name = 1;
+void ExtensionNumberResponse::clear_base_type_name() {
+ base_type_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ const ::std::string& ExtensionNumberResponse::base_type_name() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+ return base_type_name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ExtensionNumberResponse::set_base_type_name(const ::std::string& value) {
+
+ base_type_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+}
+ void ExtensionNumberResponse::set_base_type_name(const char* value) {
+
+ base_type_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+}
+ void ExtensionNumberResponse::set_base_type_name(const char* value, size_t size) {
+
+ base_type_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+}
+ ::std::string* ExtensionNumberResponse::mutable_base_type_name() {
+
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+ return base_type_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ExtensionNumberResponse::release_base_type_name() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+
+ return base_type_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ExtensionNumberResponse::set_allocated_base_type_name(::std::string* base_type_name) {
+ if (base_type_name != NULL) {
+
+ } else {
+
+ }
+ base_type_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), base_type_name);
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ExtensionNumberResponse.base_type_name)
+}
+
+// repeated int32 extension_number = 2;
+int ExtensionNumberResponse::extension_number_size() const {
+ return extension_number_.size();
+}
+void ExtensionNumberResponse::clear_extension_number() {
+ extension_number_.Clear();
+}
+ ::google::protobuf::int32 ExtensionNumberResponse::extension_number(int index) const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ExtensionNumberResponse.extension_number)
+ return extension_number_.Get(index);
+}
+ void ExtensionNumberResponse::set_extension_number(int index, ::google::protobuf::int32 value) {
+ extension_number_.Set(index, value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ExtensionNumberResponse.extension_number)
+}
+ void ExtensionNumberResponse::add_extension_number(::google::protobuf::int32 value) {
+ extension_number_.Add(value);
+ // @@protoc_insertion_point(field_add:grpc.reflection.v1alpha.ExtensionNumberResponse.extension_number)
+}
+ const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&
+ExtensionNumberResponse::extension_number() const {
+ // @@protoc_insertion_point(field_list:grpc.reflection.v1alpha.ExtensionNumberResponse.extension_number)
+ return extension_number_;
+}
+ ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*
+ExtensionNumberResponse::mutable_extension_number() {
+ // @@protoc_insertion_point(field_mutable_list:grpc.reflection.v1alpha.ExtensionNumberResponse.extension_number)
+ return &extension_number_;
+}
+
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ListServiceResponse::kServiceFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ListServiceResponse::ListServiceResponse()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ListServiceResponse)
+}
+
+void ListServiceResponse::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+}
+
+ListServiceResponse::ListServiceResponse(const ListServiceResponse& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ListServiceResponse)
+}
+
+void ListServiceResponse::SharedCtor() {
+ _is_default_instance_ = false;
+ _cached_size_ = 0;
+}
+
+ListServiceResponse::~ListServiceResponse() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ListServiceResponse)
+ SharedDtor();
+}
+
+void ListServiceResponse::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void ListServiceResponse::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ListServiceResponse::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ListServiceResponse_descriptor_;
+}
+
+const ListServiceResponse& ListServiceResponse::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ListServiceResponse* ListServiceResponse::default_instance_ = NULL;
+
+ListServiceResponse* ListServiceResponse::New(::google::protobuf::Arena* arena) const {
+ ListServiceResponse* n = new ListServiceResponse;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ListServiceResponse::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ListServiceResponse)
+ service_.Clear();
+}
+
+bool ListServiceResponse::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ListServiceResponse)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // repeated .grpc.reflection.v1alpha.ServiceResponse service = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(input->IncrementRecursionDepth());
+ parse_loop_service:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtualNoRecursionDepth(
+ input, add_service()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(10)) goto parse_loop_service;
+ input->UnsafeDecrementRecursionDepth();
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ListServiceResponse)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ListServiceResponse)
+ return false;
+#undef DO_
+}
+
+void ListServiceResponse::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ListServiceResponse)
+ // repeated .grpc.reflection.v1alpha.ServiceResponse service = 1;
+ for (unsigned int i = 0, n = this->service_size(); i < n; i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 1, this->service(i), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ListServiceResponse)
+}
+
+::google::protobuf::uint8* ListServiceResponse::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ListServiceResponse)
+ // repeated .grpc.reflection.v1alpha.ServiceResponse service = 1;
+ for (unsigned int i = 0, n = this->service_size(); i < n; i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 1, this->service(i), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ListServiceResponse)
+ return target;
+}
+
+int ListServiceResponse::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ListServiceResponse)
+ int total_size = 0;
+
+ // repeated .grpc.reflection.v1alpha.ServiceResponse service = 1;
+ total_size += 1 * this->service_size();
+ for (int i = 0; i < this->service_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->service(i));
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ListServiceResponse::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ListServiceResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ListServiceResponse* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ListServiceResponse>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ListServiceResponse)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ListServiceResponse)
+ MergeFrom(*source);
+ }
+}
+
+void ListServiceResponse::MergeFrom(const ListServiceResponse& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ListServiceResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ service_.MergeFrom(from.service_);
+}
+
+void ListServiceResponse::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ListServiceResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ListServiceResponse::CopyFrom(const ListServiceResponse& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ListServiceResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ListServiceResponse::IsInitialized() const {
+
+ return true;
+}
+
+void ListServiceResponse::Swap(ListServiceResponse* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ListServiceResponse::InternalSwap(ListServiceResponse* other) {
+ service_.UnsafeArenaSwap(&other->service_);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ListServiceResponse::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ListServiceResponse_descriptor_;
+ metadata.reflection = ListServiceResponse_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ListServiceResponse
+
+// repeated .grpc.reflection.v1alpha.ServiceResponse service = 1;
+int ListServiceResponse::service_size() const {
+ return service_.size();
+}
+void ListServiceResponse::clear_service() {
+ service_.Clear();
+}
+const ::grpc::reflection::v1alpha::ServiceResponse& ListServiceResponse::service(int index) const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ListServiceResponse.service)
+ return service_.Get(index);
+}
+::grpc::reflection::v1alpha::ServiceResponse* ListServiceResponse::mutable_service(int index) {
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ListServiceResponse.service)
+ return service_.Mutable(index);
+}
+::grpc::reflection::v1alpha::ServiceResponse* ListServiceResponse::add_service() {
+ // @@protoc_insertion_point(field_add:grpc.reflection.v1alpha.ListServiceResponse.service)
+ return service_.Add();
+}
+::google::protobuf::RepeatedPtrField< ::grpc::reflection::v1alpha::ServiceResponse >*
+ListServiceResponse::mutable_service() {
+ // @@protoc_insertion_point(field_mutable_list:grpc.reflection.v1alpha.ListServiceResponse.service)
+ return &service_;
+}
+const ::google::protobuf::RepeatedPtrField< ::grpc::reflection::v1alpha::ServiceResponse >&
+ListServiceResponse::service() const {
+ // @@protoc_insertion_point(field_list:grpc.reflection.v1alpha.ListServiceResponse.service)
+ return service_;
+}
+
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ServiceResponse::kNameFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ServiceResponse::ServiceResponse()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ServiceResponse)
+}
+
+void ServiceResponse::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+}
+
+ServiceResponse::ServiceResponse(const ServiceResponse& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ServiceResponse)
+}
+
+void ServiceResponse::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ name_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+
+ServiceResponse::~ServiceResponse() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ServiceResponse)
+ SharedDtor();
+}
+
+void ServiceResponse::SharedDtor() {
+ name_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (this != default_instance_) {
+ }
+}
+
+void ServiceResponse::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ServiceResponse::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ServiceResponse_descriptor_;
+}
+
+const ServiceResponse& ServiceResponse::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ServiceResponse* ServiceResponse::default_instance_ = NULL;
+
+ServiceResponse* ServiceResponse::New(::google::protobuf::Arena* arena) const {
+ ServiceResponse* n = new ServiceResponse;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ServiceResponse::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ServiceResponse)
+ name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+
+bool ServiceResponse::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ServiceResponse)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string name = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_name()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ServiceResponse.name"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ServiceResponse)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ServiceResponse)
+ return false;
+#undef DO_
+}
+
+void ServiceResponse::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ServiceResponse)
+ // optional string name = 1;
+ if (this->name().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServiceResponse.name");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->name(), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ServiceResponse)
+}
+
+::google::protobuf::uint8* ServiceResponse::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ServiceResponse)
+ // optional string name = 1;
+ if (this->name().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ServiceResponse.name");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->name(), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ServiceResponse)
+ return target;
+}
+
+int ServiceResponse::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ServiceResponse)
+ int total_size = 0;
+
+ // optional string name = 1;
+ if (this->name().size() > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->name());
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ServiceResponse::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ServiceResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ServiceResponse* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ServiceResponse>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ServiceResponse)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ServiceResponse)
+ MergeFrom(*source);
+ }
+}
+
+void ServiceResponse::MergeFrom(const ServiceResponse& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ServiceResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ if (from.name().size() > 0) {
+
+ name_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.name_);
+ }
+}
+
+void ServiceResponse::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ServiceResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ServiceResponse::CopyFrom(const ServiceResponse& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ServiceResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ServiceResponse::IsInitialized() const {
+
+ return true;
+}
+
+void ServiceResponse::Swap(ServiceResponse* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ServiceResponse::InternalSwap(ServiceResponse* other) {
+ name_.Swap(&other->name_);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ServiceResponse::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ServiceResponse_descriptor_;
+ metadata.reflection = ServiceResponse_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ServiceResponse
+
+// optional string name = 1;
+void ServiceResponse::clear_name() {
+ name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ const ::std::string& ServiceResponse::name() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ServiceResponse.name)
+ return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ServiceResponse::set_name(const ::std::string& value) {
+
+ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ServiceResponse.name)
+}
+ void ServiceResponse::set_name(const char* value) {
+
+ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ServiceResponse.name)
+}
+ void ServiceResponse::set_name(const char* value, size_t size) {
+
+ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ServiceResponse.name)
+}
+ ::std::string* ServiceResponse::mutable_name() {
+
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ServiceResponse.name)
+ return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ServiceResponse::release_name() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ServiceResponse.name)
+
+ return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ServiceResponse::set_allocated_name(::std::string* name) {
+ if (name != NULL) {
+
+ } else {
+
+ }
+ name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ServiceResponse.name)
+}
+
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// ===================================================================
+
+#if !defined(_MSC_VER) || _MSC_VER >= 1900
+const int ErrorResponse::kErrorCodeFieldNumber;
+const int ErrorResponse::kErrorMessageFieldNumber;
+#endif // !defined(_MSC_VER) || _MSC_VER >= 1900
+
+ErrorResponse::ErrorResponse()
+ : ::google::protobuf::Message(), _internal_metadata_(NULL) {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:grpc.reflection.v1alpha.ErrorResponse)
+}
+
+void ErrorResponse::InitAsDefaultInstance() {
+ _is_default_instance_ = true;
+}
+
+ErrorResponse::ErrorResponse(const ErrorResponse& from)
+ : ::google::protobuf::Message(),
+ _internal_metadata_(NULL) {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:grpc.reflection.v1alpha.ErrorResponse)
+}
+
+void ErrorResponse::SharedCtor() {
+ _is_default_instance_ = false;
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ error_code_ = 0;
+ error_message_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+
+ErrorResponse::~ErrorResponse() {
+ // @@protoc_insertion_point(destructor:grpc.reflection.v1alpha.ErrorResponse)
+ SharedDtor();
+}
+
+void ErrorResponse::SharedDtor() {
+ error_message_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ if (this != default_instance_) {
+ }
+}
+
+void ErrorResponse::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ErrorResponse::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return ErrorResponse_descriptor_;
+}
+
+const ErrorResponse& ErrorResponse::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_reflection_2eproto();
+ return *default_instance_;
+}
+
+ErrorResponse* ErrorResponse::default_instance_ = NULL;
+
+ErrorResponse* ErrorResponse::New(::google::protobuf::Arena* arena) const {
+ ErrorResponse* n = new ErrorResponse;
+ if (arena != NULL) {
+ arena->Own(n);
+ }
+ return n;
+}
+
+void ErrorResponse::Clear() {
+// @@protoc_insertion_point(message_clear_start:grpc.reflection.v1alpha.ErrorResponse)
+ error_code_ = 0;
+ error_message_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+
+bool ErrorResponse::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!GOOGLE_PREDICT_TRUE(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:grpc.reflection.v1alpha.ErrorResponse)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional int32 error_code = 1;
+ case 1: {
+ if (tag == 8) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &error_code_)));
+
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(18)) goto parse_error_message;
+ break;
+ }
+
+ // optional string error_message = 2;
+ case 2: {
+ if (tag == 18) {
+ parse_error_message:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_error_message()));
+ DO_(::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->error_message().data(), this->error_message().length(),
+ ::google::protobuf::internal::WireFormatLite::PARSE,
+ "grpc.reflection.v1alpha.ErrorResponse.error_message"));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:grpc.reflection.v1alpha.ErrorResponse)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:grpc.reflection.v1alpha.ErrorResponse)
+ return false;
+#undef DO_
+}
+
+void ErrorResponse::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:grpc.reflection.v1alpha.ErrorResponse)
+ // optional int32 error_code = 1;
+ if (this->error_code() != 0) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->error_code(), output);
+ }
+
+ // optional string error_message = 2;
+ if (this->error_message().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->error_message().data(), this->error_message().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ErrorResponse.error_message");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 2, this->error_message(), output);
+ }
+
+ // @@protoc_insertion_point(serialize_end:grpc.reflection.v1alpha.ErrorResponse)
+}
+
+::google::protobuf::uint8* ErrorResponse::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:grpc.reflection.v1alpha.ErrorResponse)
+ // optional int32 error_code = 1;
+ if (this->error_code() != 0) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->error_code(), target);
+ }
+
+ // optional string error_message = 2;
+ if (this->error_message().size() > 0) {
+ ::google::protobuf::internal::WireFormatLite::VerifyUtf8String(
+ this->error_message().data(), this->error_message().length(),
+ ::google::protobuf::internal::WireFormatLite::SERIALIZE,
+ "grpc.reflection.v1alpha.ErrorResponse.error_message");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 2, this->error_message(), target);
+ }
+
+ // @@protoc_insertion_point(serialize_to_array_end:grpc.reflection.v1alpha.ErrorResponse)
+ return target;
+}
+
+int ErrorResponse::ByteSize() const {
+// @@protoc_insertion_point(message_byte_size_start:grpc.reflection.v1alpha.ErrorResponse)
+ int total_size = 0;
+
+ // optional int32 error_code = 1;
+ if (this->error_code() != 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->error_code());
+ }
+
+ // optional string error_message = 2;
+ if (this->error_message().size() > 0) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->error_message());
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void ErrorResponse::MergeFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_merge_from_start:grpc.reflection.v1alpha.ErrorResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ const ErrorResponse* source =
+ ::google::protobuf::internal::DynamicCastToGenerated<const ErrorResponse>(
+ &from);
+ if (source == NULL) {
+ // @@protoc_insertion_point(generalized_merge_from_cast_fail:grpc.reflection.v1alpha.ErrorResponse)
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ // @@protoc_insertion_point(generalized_merge_from_cast_success:grpc.reflection.v1alpha.ErrorResponse)
+ MergeFrom(*source);
+ }
+}
+
+void ErrorResponse::MergeFrom(const ErrorResponse& from) {
+// @@protoc_insertion_point(class_specific_merge_from_start:grpc.reflection.v1alpha.ErrorResponse)
+ if (GOOGLE_PREDICT_FALSE(&from == this)) MergeFromFail(__LINE__);
+ if (from.error_code() != 0) {
+ set_error_code(from.error_code());
+ }
+ if (from.error_message().size() > 0) {
+
+ error_message_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.error_message_);
+ }
+}
+
+void ErrorResponse::CopyFrom(const ::google::protobuf::Message& from) {
+// @@protoc_insertion_point(generalized_copy_from_start:grpc.reflection.v1alpha.ErrorResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void ErrorResponse::CopyFrom(const ErrorResponse& from) {
+// @@protoc_insertion_point(class_specific_copy_from_start:grpc.reflection.v1alpha.ErrorResponse)
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool ErrorResponse::IsInitialized() const {
+
+ return true;
+}
+
+void ErrorResponse::Swap(ErrorResponse* other) {
+ if (other == this) return;
+ InternalSwap(other);
+}
+void ErrorResponse::InternalSwap(ErrorResponse* other) {
+ std::swap(error_code_, other->error_code_);
+ error_message_.Swap(&other->error_message_);
+ _internal_metadata_.Swap(&other->_internal_metadata_);
+ std::swap(_cached_size_, other->_cached_size_);
+}
+
+::google::protobuf::Metadata ErrorResponse::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = ErrorResponse_descriptor_;
+ metadata.reflection = ErrorResponse_reflection_;
+ return metadata;
+}
+
+#if PROTOBUF_INLINE_NOT_IN_HEADERS
+// ErrorResponse
+
+// optional int32 error_code = 1;
+void ErrorResponse::clear_error_code() {
+ error_code_ = 0;
+}
+ ::google::protobuf::int32 ErrorResponse::error_code() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ErrorResponse.error_code)
+ return error_code_;
+}
+ void ErrorResponse::set_error_code(::google::protobuf::int32 value) {
+
+ error_code_ = value;
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ErrorResponse.error_code)
+}
+
+// optional string error_message = 2;
+void ErrorResponse::clear_error_message() {
+ error_message_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ const ::std::string& ErrorResponse::error_message() const {
+ // @@protoc_insertion_point(field_get:grpc.reflection.v1alpha.ErrorResponse.error_message)
+ return error_message_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ErrorResponse::set_error_message(const ::std::string& value) {
+
+ error_message_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+ // @@protoc_insertion_point(field_set:grpc.reflection.v1alpha.ErrorResponse.error_message)
+}
+ void ErrorResponse::set_error_message(const char* value) {
+
+ error_message_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+ // @@protoc_insertion_point(field_set_char:grpc.reflection.v1alpha.ErrorResponse.error_message)
+}
+ void ErrorResponse::set_error_message(const char* value, size_t size) {
+
+ error_message_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+ ::std::string(reinterpret_cast<const char*>(value), size));
+ // @@protoc_insertion_point(field_set_pointer:grpc.reflection.v1alpha.ErrorResponse.error_message)
+}
+ ::std::string* ErrorResponse::mutable_error_message() {
+
+ // @@protoc_insertion_point(field_mutable:grpc.reflection.v1alpha.ErrorResponse.error_message)
+ return error_message_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ ::std::string* ErrorResponse::release_error_message() {
+ // @@protoc_insertion_point(field_release:grpc.reflection.v1alpha.ErrorResponse.error_message)
+
+ return error_message_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+ void ErrorResponse::set_allocated_error_message(::std::string* error_message) {
+ if (error_message != NULL) {
+
+ } else {
+
+ }
+ error_message_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), error_message);
+ // @@protoc_insertion_point(field_set_allocated:grpc.reflection.v1alpha.ErrorResponse.error_message)
+}
+
+#endif // PROTOBUF_INLINE_NOT_IN_HEADERS
+
+// @@protoc_insertion_point(namespace_scope)
+
+} // namespace v1alpha
+} // namespace reflection
+} // namespace grpc
+
+// @@protoc_insertion_point(global_scope)
diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc
index f6c3e5747c..fb4c68ebe4 100644
--- a/src/cpp/server/server.cc
+++ b/src/cpp/server/server.cc
@@ -67,7 +67,7 @@ static std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
static gpr_once g_once_init_callbacks = GPR_ONCE_INIT;
static void InitGlobalCallbacks() {
- if (g_callbacks == nullptr) {
+ if (!g_callbacks) {
g_callbacks.reset(new DefaultGlobalCallbacks());
}
}
@@ -324,11 +324,15 @@ Server::~Server() {
}
void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
- GPR_ASSERT(g_callbacks == nullptr);
- GPR_ASSERT(callbacks != nullptr);
+ GPR_ASSERT(!g_callbacks);
+ GPR_ASSERT(callbacks);
g_callbacks.reset(callbacks);
}
+grpc_server* Server::c_server() { return server_; }
+
+CompletionQueue* Server::completion_queue() { return &cq_; }
+
static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
RpcServiceMethod* method) {
switch (method->method_type()) {
diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc
index 54feac3982..45bb858e2e 100644
--- a/src/cpp/server/server_builder.cc
+++ b/src/cpp/server/server_builder.cc
@@ -37,6 +37,8 @@
#include <grpc++/server.h>
#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
+
+#include "include/grpc/support/useful.h"
#include "src/cpp/server/thread_pool_interface.h"
namespace grpc {
@@ -52,12 +54,19 @@ static void do_plugin_list_init(void) {
ServerBuilder::ServerBuilder()
: max_message_size_(-1), generic_service_(nullptr) {
- grpc_compression_options_init(&compression_options_);
gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
- for (auto factory : (*g_plugin_factory_list)) {
- std::unique_ptr<ServerBuilderPlugin> plugin = factory();
- plugins_[plugin->name()] = std::move(plugin);
+ for (auto it = g_plugin_factory_list->begin();
+ it != g_plugin_factory_list->end(); it++) {
+ auto& factory = *it;
+ plugins_.emplace_back(factory());
}
+ // all compression algorithms enabled by default.
+ enabled_compression_algorithms_bitset_ =
+ (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+ memset(&maybe_default_compression_level_, 0,
+ sizeof(maybe_default_compression_level_));
+ memset(&maybe_default_compression_algorithm_, 0,
+ sizeof(maybe_default_compression_algorithm_));
}
std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
@@ -67,35 +76,65 @@ std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
return std::unique_ptr<ServerCompletionQueue>(cq);
}
-void ServerBuilder::RegisterService(Service* service) {
+ServerBuilder& ServerBuilder::RegisterService(Service* service) {
services_.emplace_back(new NamedService(service));
+ return *this;
}
-void ServerBuilder::RegisterService(const grpc::string& addr,
- Service* service) {
+ServerBuilder& ServerBuilder::RegisterService(const grpc::string& addr,
+ Service* service) {
services_.emplace_back(new NamedService(addr, service));
+ return *this;
}
-void ServerBuilder::RegisterAsyncGenericService(AsyncGenericService* service) {
+ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
+ AsyncGenericService* service) {
if (generic_service_) {
gpr_log(GPR_ERROR,
"Adding multiple AsyncGenericService is unsupported for now. "
"Dropping the service %p",
service);
- return;
+ } else {
+ generic_service_ = service;
}
- generic_service_ = service;
+ return *this;
}
-void ServerBuilder::SetOption(std::unique_ptr<ServerBuilderOption> option) {
+ServerBuilder& ServerBuilder::SetOption(
+ std::unique_ptr<ServerBuilderOption> option) {
options_.push_back(std::move(option));
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetCompressionAlgorithmSupportStatus(
+ grpc_compression_algorithm algorithm, bool enabled) {
+ if (enabled) {
+ GPR_BITSET(&enabled_compression_algorithms_bitset_, algorithm);
+ } else {
+ GPR_BITCLEAR(&enabled_compression_algorithms_bitset_, algorithm);
+ }
+ return *this;
}
-void ServerBuilder::AddListeningPort(const grpc::string& addr,
- std::shared_ptr<ServerCredentials> creds,
- int* selected_port) {
+ServerBuilder& ServerBuilder::SetDefaultCompressionLevel(
+ grpc_compression_level level) {
+ maybe_default_compression_level_.level = level;
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm(
+ grpc_compression_algorithm algorithm) {
+ maybe_default_compression_algorithm_.is_set = true;
+ maybe_default_compression_algorithm_.algorithm = algorithm;
+ return *this;
+}
+
+ServerBuilder& ServerBuilder::AddListeningPort(
+ const grpc::string& addr, std::shared_ptr<ServerCredentials> creds,
+ int* selected_port) {
Port port = {addr, creds, selected_port};
ports_.push_back(port);
+ return *this;
}
std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
@@ -103,7 +142,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
bool has_sync_methods = false;
for (auto it = services_.begin(); it != services_.end(); ++it) {
if ((*it)->service->has_synchronous_methods()) {
- if (thread_pool == nullptr) {
+ if (!thread_pool) {
thread_pool.reset(CreateDefaultThreadPool());
has_sync_methods = true;
break;
@@ -115,9 +154,9 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
(*option)->UpdateArguments(&args);
(*option)->UpdatePlugins(&plugins_);
}
- if (thread_pool == nullptr) {
+ if (!thread_pool) {
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
- if ((*plugin).second->has_sync_methods()) {
+ if ((*plugin)->has_sync_methods()) {
thread_pool.reset(CreateDefaultThreadPool());
has_sync_methods = true;
break;
@@ -128,7 +167,15 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
args.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, max_message_size_);
}
args.SetInt(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
- compression_options_.enabled_algorithms_bitset);
+ enabled_compression_algorithms_bitset_);
+ if (maybe_default_compression_level_.is_set) {
+ args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL,
+ maybe_default_compression_level_.level);
+ }
+ if (maybe_default_compression_algorithm_.is_set) {
+ args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
+ maybe_default_compression_algorithm_.algorithm);
+ }
std::unique_ptr<Server> server(
new Server(thread_pool.release(), true, max_message_size_, &args));
ServerInitializer* initializer = server->initializer();
@@ -166,7 +213,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
}
}
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
- (*plugin).second->InitServer(initializer);
+ (*plugin)->InitServer(initializer);
}
if (generic_service_) {
server->RegisterAsyncGenericService(generic_service_);
@@ -192,7 +239,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
return nullptr;
}
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
- (*plugin).second->Finish(initializer);
+ (*plugin)->Finish(initializer);
}
return server;
}
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index 204fef1b09..43117fd1e9 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -42,7 +42,6 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include "src/core/lib/channel/compress_filter.h"
#include "src/core/lib/surface/call.h"
namespace grpc {
@@ -196,6 +195,9 @@ bool ServerContext::IsCancelled() const {
}
void ServerContext::set_compression_level(grpc_compression_level level) {
+ // TODO(dgq): get rid of grpc_call_compression_for_level and propagate the
+ // compression level by adding a new argument to
+ // CallOpSendInitialMetadata::SendInitialMetadata.
const grpc_compression_algorithm algorithm_for_level =
grpc_call_compression_for_level(call_, level);
set_compression_algorithm(algorithm_for_level);
@@ -210,7 +212,7 @@ void ServerContext::set_compression_algorithm(
abort();
}
GPR_ASSERT(algorithm_name != NULL);
- AddInitialMetadata(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, algorithm_name);
+ AddInitialMetadata(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, algorithm_name);
}
grpc::string ServerContext::peer() const {
diff --git a/src/cpp/server/server_posix.cc b/src/cpp/server/server_posix.cc
new file mode 100644
index 0000000000..c3aa2adc60
--- /dev/null
+++ b/src/cpp/server/server_posix.cc
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/server_posix.h>
+
+#include <grpc/grpc_posix.h>
+
+namespace grpc {
+
+#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
+
+void AddInsecureChannelFromFd(Server* server, int fd) {
+ grpc_server_add_insecure_channel_from_fd(
+ server->c_server(), server->completion_queue()->cq(), fd);
+}
+
+#endif // GPR_SUPPORT_CHANNELS_FROM_FD
+
+} // namespace grpc
diff --git a/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs b/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
new file mode 100644
index 0000000000..e605a310f9
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/AppDomainUnloadTest.cs
@@ -0,0 +1,90 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Diagnostics;
+using System.Linq;
+using System.Reflection;
+using System.Threading;
+using System.Threading.Tasks;
+using Grpc.Core;
+using Grpc.Core.Internal;
+using Grpc.Core.Utils;
+using NUnit.Framework;
+
+namespace Grpc.Core.Tests
+{
+ public class AppDomainUnloadTest
+ {
+ [Test]
+ public void AppDomainUnloadHookCanCleanupAbandonedCall()
+ {
+ var setup = new AppDomainSetup
+ {
+ ApplicationBase = AppDomain.CurrentDomain.BaseDirectory
+ };
+ var childDomain = AppDomain.CreateDomain("test", null, setup);
+ var remoteObj = childDomain.CreateInstance(typeof(AppDomainTestClass).Assembly.GetName().Name, typeof(AppDomainTestClass).FullName);
+
+ // Try to unload the appdomain once we've created a server and a channel inside the appdomain.
+ AppDomain.Unload(childDomain);
+ }
+
+ public class AppDomainTestClass
+ {
+ const string Host = "127.0.0.1";
+
+ /// <summary>
+ /// Creates a server and a channel and initiates a call. The code is invoked from inside of an AppDomain
+ /// to test if AppDomain.Unload() work if Grpc is being used.
+ /// </summary>
+ public AppDomainTestClass()
+ {
+ var helper = new MockServiceHelper(Host);
+ var server = helper.GetServer();
+ server.Start();
+ var channel = helper.GetChannel();
+
+ var readyToShutdown = new TaskCompletionSource<object>();
+ helper.DuplexStreamingHandler = new DuplexStreamingServerMethod<string, string>(async (requestStream, responseStream, context) =>
+ {
+ readyToShutdown.SetResult(null);
+ await requestStream.ToListAsync();
+ });
+
+ var call = Calls.AsyncDuplexStreamingCall(helper.CreateDuplexStreamingCall());
+ readyToShutdown.Task.Wait(); // make sure handler is running
+ }
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core.Tests/ChannelTest.cs b/src/csharp/Grpc.Core.Tests/ChannelTest.cs
index 850d70ce92..db0ef3a4cd 100644
--- a/src/csharp/Grpc.Core.Tests/ChannelTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ChannelTest.cs
@@ -71,7 +71,7 @@ namespace Grpc.Core.Tests
public void WaitForStateChangedAsync_InvalidArgument()
{
var channel = new Channel("localhost", ChannelCredentials.Insecure);
- Assert.ThrowsAsync(typeof(ArgumentException), async () => await channel.WaitForStateChangedAsync(ChannelState.FatalFailure));
+ Assert.ThrowsAsync(typeof(ArgumentException), async () => await channel.WaitForStateChangedAsync(ChannelState.Shutdown));
channel.ShutdownAsync().Wait();
}
@@ -102,11 +102,11 @@ namespace Grpc.Core.Tests
}
[Test]
- public async Task StateIsFatalFailureAfterShutdown()
+ public async Task StateIsShutdownAfterShutdown()
{
var channel = new Channel("localhost", ChannelCredentials.Insecure);
await channel.ShutdownAsync();
- Assert.AreEqual(ChannelState.FatalFailure, channel.State);
+ Assert.AreEqual(ChannelState.Shutdown, channel.State);
}
[Test]
diff --git a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
index d92addbf54..dcdddc769e 100644
--- a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
@@ -235,8 +235,16 @@ namespace Grpc.Core.Tests
await barrier.Task; // make sure the handler has started.
cts.Cancel();
- var ex = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseAsync);
- Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
+ try
+ {
+ // cannot use Assert.ThrowsAsync because it uses Task.Wait and would deadlock.
+ await call.ResponseAsync;
+ Assert.Fail();
+ }
+ catch (RpcException ex)
+ {
+ Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
+ }
}
[Test]
@@ -265,9 +273,15 @@ namespace Grpc.Core.Tests
await handlerStartedBarrier.Task;
cts.Cancel();
- var ex = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseAsync);
- Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
-
+ try
+ {
+ await call.ResponseAsync;
+ Assert.Fail();
+ }
+ catch (RpcException ex)
+ {
+ Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
+ }
Assert.AreEqual("SUCCESS", await successTcs.Task);
}
diff --git a/src/csharp/Grpc.Core.Tests/ContextPropagationTest.cs b/src/csharp/Grpc.Core.Tests/ContextPropagationTest.cs
index cec8c7ce6b..6a156293ad 100644
--- a/src/csharp/Grpc.Core.Tests/ContextPropagationTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ContextPropagationTest.cs
@@ -105,7 +105,15 @@ namespace Grpc.Core.Tests
var parentCall = Calls.AsyncClientStreamingCall(helper.CreateClientStreamingCall(new CallOptions(cancellationToken: cts.Token)));
await readyToCancelTcs.Task;
cts.Cancel();
- Assert.ThrowsAsync(typeof(RpcException), async () => await parentCall);
+ try
+ {
+ // cannot use Assert.ThrowsAsync because it uses Task.Wait and would deadlock.
+ await parentCall;
+ Assert.Fail();
+ }
+ catch (RpcException)
+ {
+ }
Assert.AreEqual("CHILD_CALL_CANCELLED", await successTcs.Task);
}
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index 47131fc454..074c9603dc 100644
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -86,6 +86,10 @@
<Compile Include="NUnitMain.cs" />
<Compile Include="Internal\FakeNativeCall.cs" />
<Compile Include="Internal\AsyncCallServerTest.cs" />
+ <Compile Include="ShutdownHookServerTest.cs" />
+ <Compile Include="ShutdownHookPendingCallTest.cs" />
+ <Compile Include="ShutdownHookClientTest.cs" />
+ <Compile Include="AppDomainUnloadTest.cs" />
</ItemGroup>
<Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
<ItemGroup>
diff --git a/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs b/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs
index ab12c120cb..3ec2cf48cd 100644
--- a/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs
+++ b/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs
@@ -32,7 +32,7 @@
#endregion
using System;
-using System.Threading;
+using System.Linq;
using Grpc.Core;
using NUnit.Framework;
@@ -44,8 +44,12 @@ namespace Grpc.Core.Tests
public void InitializeAndShutdownGrpcEnvironment()
{
var env = GrpcEnvironment.AddRef();
- Assert.IsNotNull(env.CompletionQueue);
- GrpcEnvironment.Release();
+ Assert.IsTrue(env.CompletionQueues.Count > 0);
+ for (int i = 0; i < env.CompletionQueues.Count; i++)
+ {
+ Assert.IsNotNull(env.CompletionQueues.ElementAt(i));
+ }
+ GrpcEnvironment.ReleaseAsync().Wait();
}
[Test]
@@ -54,8 +58,8 @@ namespace Grpc.Core.Tests
var env1 = GrpcEnvironment.AddRef();
var env2 = GrpcEnvironment.AddRef();
Assert.AreSame(env1, env2);
- GrpcEnvironment.Release();
- GrpcEnvironment.Release();
+ GrpcEnvironment.ReleaseAsync().Wait();
+ GrpcEnvironment.ReleaseAsync().Wait();
}
[Test]
@@ -64,10 +68,10 @@ namespace Grpc.Core.Tests
Assert.AreEqual(0, GrpcEnvironment.GetRefCount());
var env1 = GrpcEnvironment.AddRef();
- GrpcEnvironment.Release();
+ GrpcEnvironment.ReleaseAsync().Wait();
var env2 = GrpcEnvironment.AddRef();
- GrpcEnvironment.Release();
+ GrpcEnvironment.ReleaseAsync().Wait();
Assert.AreNotSame(env1, env2);
}
@@ -76,7 +80,7 @@ namespace Grpc.Core.Tests
public void ReleaseWithoutAddRef()
{
Assert.AreEqual(0, GrpcEnvironment.GetRefCount());
- Assert.Throws(typeof(InvalidOperationException), () => GrpcEnvironment.Release());
+ Assert.ThrowsAsync(typeof(InvalidOperationException), async () => await GrpcEnvironment.ReleaseAsync());
}
[Test]
diff --git a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
index 0e204761f6..c35aaf680f 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
@@ -53,8 +53,6 @@ namespace Grpc.Core.Internal.Tests
[SetUp]
public void Init()
{
- var environment = GrpcEnvironment.AddRef();
-
// Create a fake server just so we have an instance to refer to.
// The server won't actually be used at all.
server = new Server()
@@ -66,7 +64,6 @@ namespace Grpc.Core.Internal.Tests
fakeCall = new FakeNativeCall();
asyncCallServer = new AsyncCallServer<string, string>(
Marshallers.StringMarshaller.Serializer, Marshallers.StringMarshaller.Deserializer,
- environment,
server);
asyncCallServer.InitializeForTesting(fakeCall);
}
@@ -75,7 +72,6 @@ namespace Grpc.Core.Internal.Tests
public void Cleanup()
{
server.ShutdownAsync().Wait();
- GrpcEnvironment.Release();
}
[Test]
@@ -136,7 +132,6 @@ namespace Grpc.Core.Internal.Tests
public void WriteAfterCancelNotificationFails()
{
var finishedTask = asyncCallServer.ServerSideCallAsync();
- var requestStream = new ServerRequestStream<string, string>(asyncCallServer);
var responseStream = new ServerResponseStream<string, string>(asyncCallServer);
fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
@@ -181,6 +176,21 @@ namespace Grpc.Core.Internal.Tests
AssertFinished(asyncCallServer, fakeCall, finishedTask);
}
+ [Test]
+ public void WriteAfterWriteStatusThrowsInvalidOperationException()
+ {
+ var finishedTask = asyncCallServer.ServerSideCallAsync();
+ var responseStream = new ServerResponseStream<string, string>(asyncCallServer);
+
+ asyncCallServer.SendStatusFromServerAsync(Status.DefaultSuccess, new Metadata(), null);
+ Assert.ThrowsAsync(typeof(InvalidOperationException), async () => await responseStream.WriteAsync("request1"));
+
+ fakeCall.SendStatusFromServerHandler(true);
+ fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+
+ AssertFinished(asyncCallServer, fakeCall, finishedTask);
+ }
+
static void AssertFinished(AsyncCallServer<string, string> asyncCallServer, FakeNativeCall fakeCall, Task finishedTask)
{
Assert.IsTrue(fakeCall.IsDisposed);
diff --git a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
index 777a1c8c50..98e27a17a1 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
@@ -33,7 +33,6 @@
using System;
using System.Collections.Generic;
-using System.Runtime.InteropServices;
using System.Threading.Tasks;
using Grpc.Core.Internal;
@@ -82,7 +81,7 @@ namespace Grpc.Core.Internal.Tests
Assert.ThrowsAsync(typeof(InvalidOperationException),
async () => await asyncCall.ReadMessageAsync());
Assert.Throws(typeof(InvalidOperationException),
- () => asyncCall.StartSendMessage("abc", new WriteFlags(), (x,y) => {}));
+ () => asyncCall.SendMessageAsync("abc", new WriteFlags()));
}
[Test]
@@ -103,7 +102,7 @@ namespace Grpc.Core.Internal.Tests
var resultTask = asyncCall.UnaryCallAsync("request1");
fakeCall.UnaryResponseClientHandler(true,
CreateClientSideStatus(StatusCode.InvalidArgument),
- CreateResponsePayload(),
+ null,
new Metadata());
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.InvalidArgument);
@@ -148,7 +147,7 @@ namespace Grpc.Core.Internal.Tests
var resultTask = asyncCall.ClientStreamingCallAsync();
fakeCall.UnaryResponseClientHandler(true,
CreateClientSideStatus(StatusCode.InvalidArgument),
- CreateResponsePayload(),
+ null,
new Metadata());
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.InvalidArgument);
@@ -193,7 +192,7 @@ namespace Grpc.Core.Internal.Tests
fakeCall.UnaryResponseClientHandler(true,
CreateClientSideStatus(StatusCode.Internal),
- CreateResponsePayload(),
+ null,
new Metadata());
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.Internal);
@@ -211,7 +210,9 @@ namespace Grpc.Core.Internal.Tests
new Metadata());
AssertUnaryResponseSuccess(asyncCall, fakeCall, resultTask);
- var ex = Assert.Throws<RpcException>(() => requestStream.WriteAsync("request1"));
+
+ var writeTask = requestStream.WriteAsync("request1");
+ var ex = Assert.ThrowsAsync<RpcException>(async () => await writeTask);
Assert.AreEqual(Status.DefaultSuccess, ex.Status);
}
@@ -227,7 +228,9 @@ namespace Grpc.Core.Internal.Tests
new Metadata());
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.OutOfRange);
- var ex = Assert.Throws<RpcException>(() => requestStream.WriteAsync("request1"));
+
+ var writeTask = requestStream.WriteAsync("request1");
+ var ex = Assert.ThrowsAsync<RpcException>(async () => await writeTask);
Assert.AreEqual(StatusCode.OutOfRange, ex.Status.StatusCode);
}
@@ -267,7 +270,7 @@ namespace Grpc.Core.Internal.Tests
}
[Test]
- public void ClientStreaming_WriteAfterCancellationRequestThrowsOperationCancelledException()
+ public void ClientStreaming_WriteAfterCancellationRequestThrowsTaskCanceledException()
{
var resultTask = asyncCall.ClientStreamingCallAsync();
var requestStream = new ClientRequestStream<string, string>(asyncCall);
@@ -275,11 +278,12 @@ namespace Grpc.Core.Internal.Tests
asyncCall.Cancel();
Assert.IsTrue(fakeCall.IsCancelled);
- Assert.Throws(typeof(OperationCanceledException), () => requestStream.WriteAsync("request1"));
+ var writeTask = requestStream.WriteAsync("request1");
+ Assert.ThrowsAsync(typeof(TaskCanceledException), async () => await writeTask);
fakeCall.UnaryResponseClientHandler(true,
CreateClientSideStatus(StatusCode.Cancelled),
- CreateResponsePayload(),
+ null,
new Metadata());
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.Cancelled);
@@ -290,7 +294,7 @@ namespace Grpc.Core.Internal.Tests
{
asyncCall.StartServerStreamingCall("request1");
Assert.Throws(typeof(InvalidOperationException),
- () => asyncCall.StartSendMessage("abc", new WriteFlags(), (x,y) => {}));
+ () => asyncCall.SendMessageAsync("abc", new WriteFlags()));
}
[Test]
@@ -390,12 +394,13 @@ namespace Grpc.Core.Internal.Tests
AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask);
- var ex = Assert.ThrowsAsync<RpcException>(async () => await requestStream.WriteAsync("request1"));
+ var writeTask = requestStream.WriteAsync("request1");
+ var ex = Assert.ThrowsAsync<RpcException>(async () => await writeTask);
Assert.AreEqual(Status.DefaultSuccess, ex.Status);
}
[Test]
- public void DuplexStreaming_CompleteAfterReceivingStatusFails()
+ public void DuplexStreaming_CompleteAfterReceivingStatusSuceeds()
{
asyncCall.StartDuplexStreamingCall();
var requestStream = new ClientRequestStream<string, string>(asyncCall);
@@ -411,7 +416,7 @@ namespace Grpc.Core.Internal.Tests
}
[Test]
- public void DuplexStreaming_WriteAfterCancellationRequestThrowsOperationCancelledException()
+ public void DuplexStreaming_WriteAfterCancellationRequestThrowsTaskCanceledException()
{
asyncCall.StartDuplexStreamingCall();
var requestStream = new ClientRequestStream<string, string>(asyncCall);
@@ -419,7 +424,9 @@ namespace Grpc.Core.Internal.Tests
asyncCall.Cancel();
Assert.IsTrue(fakeCall.IsCancelled);
- Assert.Throws(typeof(OperationCanceledException), () => requestStream.WriteAsync("request1"));
+
+ var writeTask = requestStream.WriteAsync("request1");
+ Assert.ThrowsAsync(typeof(TaskCanceledException), async () => await writeTask);
var readTask = responseStream.MoveNext();
fakeCall.ReceivedMessageHandler(true, null);
diff --git a/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs b/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
index 195119f920..e9ec59eb3d 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
@@ -48,7 +48,7 @@ namespace Grpc.Core.Internal.Tests
GrpcEnvironment.AddRef();
var cq = CompletionQueueSafeHandle.Create();
cq.Dispose();
- GrpcEnvironment.Release();
+ GrpcEnvironment.ReleaseAsync().Wait();
}
[Test]
@@ -59,7 +59,7 @@ namespace Grpc.Core.Internal.Tests
cq.Shutdown();
var ev = cq.Next();
cq.Dispose();
- GrpcEnvironment.Release();
+ GrpcEnvironment.ReleaseAsync().Wait();
Assert.AreEqual(CompletionQueueEvent.CompletionType.Shutdown, ev.type);
Assert.AreNotEqual(IntPtr.Zero, ev.success);
Assert.AreEqual(IntPtr.Zero, ev.tag);
diff --git a/src/csharp/Grpc.Core.Tests/MarshallingErrorsTest.cs b/src/csharp/Grpc.Core.Tests/MarshallingErrorsTest.cs
index 0663e77d1e..d770f82390 100644
--- a/src/csharp/Grpc.Core.Tests/MarshallingErrorsTest.cs
+++ b/src/csharp/Grpc.Core.Tests/MarshallingErrorsTest.cs
@@ -134,7 +134,15 @@ namespace Grpc.Core.Tests
{
helper.ClientStreamingHandler = new ClientStreamingServerMethod<string, string>(async (requestStream, context) =>
{
- Assert.ThrowsAsync<IOException>(async () => await requestStream.MoveNext());
+ try
+ {
+ // cannot use Assert.ThrowsAsync because it uses Task.Wait and would deadlock.
+ await requestStream.MoveNext();
+ Assert.Fail();
+ }
+ catch (IOException)
+ {
+ }
return "RESPONSE";
});
diff --git a/src/csharp/Grpc.Core.Tests/MockServiceHelper.cs b/src/csharp/Grpc.Core.Tests/MockServiceHelper.cs
index 3047314345..4d90470056 100644
--- a/src/csharp/Grpc.Core.Tests/MockServiceHelper.cs
+++ b/src/csharp/Grpc.Core.Tests/MockServiceHelper.cs
@@ -102,7 +102,7 @@ namespace Grpc.Core.Tests
marshaller,
marshaller);
- serviceDefinition = ServerServiceDefinition.CreateBuilder(ServiceName)
+ serviceDefinition = ServerServiceDefinition.CreateBuilder()
.AddMethod(unaryMethod, (request, context) => unaryHandler(request, context))
.AddMethod(clientStreamingMethod, (requestStream, context) => clientStreamingHandler(requestStream, context))
.AddMethod(serverStreamingMethod, (request, responseStream, context) => serverStreamingHandler(request, responseStream, context))
diff --git a/src/csharp/Grpc.Core.Tests/PInvokeTest.cs b/src/csharp/Grpc.Core.Tests/PInvokeTest.cs
index d2b2fc6a66..d3735c7880 100644
--- a/src/csharp/Grpc.Core.Tests/PInvokeTest.cs
+++ b/src/csharp/Grpc.Core.Tests/PInvokeTest.cs
@@ -65,7 +65,7 @@ namespace Grpc.Core.Tests
cq.Dispose();
});
- GrpcEnvironment.Release();
+ GrpcEnvironment.ReleaseAsync().Wait();
}
/// <summary>
diff --git a/src/csharp/Grpc.Core.Tests/ServerTest.cs b/src/csharp/Grpc.Core.Tests/ServerTest.cs
index b40508accc..3b51aa6330 100644
--- a/src/csharp/Grpc.Core.Tests/ServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ServerTest.cs
@@ -89,9 +89,24 @@ namespace Grpc.Core.Tests
};
server.Start();
Assert.Throws(typeof(InvalidOperationException), () => server.Ports.Add("localhost", 9999, ServerCredentials.Insecure));
- Assert.Throws(typeof(InvalidOperationException), () => server.Services.Add(ServerServiceDefinition.CreateBuilder("serviceName").Build()));
+ Assert.Throws(typeof(InvalidOperationException), () => server.Services.Add(ServerServiceDefinition.CreateBuilder().Build()));
server.ShutdownAsync().Wait();
}
+
+ [Test]
+ public void UnstartedServerCanBeShutdown()
+ {
+ var server = new Server();
+ server.ShutdownAsync().Wait();
+ Assert.Throws(typeof(InvalidOperationException), () => server.Start());
+ }
+
+ [Test]
+ public void UnstartedServerDoesNotPreventShutdown()
+ {
+ // just create a server, don't start it, and make sure it doesn't prevent shutdown.
+ var server = new Server();
+ }
}
}
diff --git a/src/proto/grpc/testing/perf_db.proto b/src/csharp/Grpc.Core.Tests/ShutdownHookClientTest.cs
index 0ba8596fe9..12b8452f64 100644
--- a/src/proto/grpc/testing/perf_db.proto
+++ b/src/csharp/Grpc.Core.Tests/ShutdownHookClientTest.cs
@@ -1,3 +1,5 @@
+#region Copyright notice and license
+
// Copyright 2015, Google Inc.
// All rights reserved.
//
@@ -27,43 +29,29 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-syntax = "proto3";
-
-import "src/proto/grpc/testing/control.proto";
-
-package grpc.testing;
+#endregion
-service PerfDbTransfer {
- // Sends client info
- rpc RecordSingleClientData(SingleUserRecordRequest)
- returns (SingleUserRecordReply) {}
-}
+using System;
+using System.Diagnostics;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using Grpc.Core;
+using Grpc.Core.Internal;
+using Grpc.Core.Utils;
+using NUnit.Framework;
-// Metrics to be stored
-message Metrics {
- double qps = 1;
- double qps_per_core = 2;
- double perc_lat_50 = 3;
- double perc_lat_90 = 4;
- double perc_lat_95 = 5;
- double perc_lat_99 = 6;
- double perc_lat_99_point_9 = 7;
- double server_system_time = 8;
- double server_user_time = 9;
- double client_system_time = 10;
- double client_user_time = 11;
-}
+namespace Grpc.Core.Tests
+{
+ public class ShutdownHookClientTest
+ {
+ const string Host = "127.0.0.1";
-// Request for storing a single user's data
-message SingleUserRecordRequest {
- string hashed_id = 1;
- string test_name = 2;
- string sys_info = 3;
- string tag = 4;
- Metrics metrics = 5;
- ClientConfig client_config = 6;
- ServerConfig server_config = 7;
+ [Test]
+ public void ProcessExitHookCanCleanupAbandonedChannels()
+ {
+ var channel = new Channel(Host, 1000, ChannelCredentials.Insecure);
+ var channel2 = new Channel(Host, 1001, ChannelCredentials.Insecure);
+ }
+ }
}
-
-// Reply to request for storing single user's data
-message SingleUserRecordReply {}
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCompletion.cs b/src/csharp/Grpc.Core.Tests/ShutdownHookPendingCallTest.cs
index 7e86fddb4d..175233840d 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCompletion.cs
+++ b/src/csharp/Grpc.Core.Tests/ShutdownHookPendingCallTest.cs
@@ -2,11 +2,11 @@
// Copyright 2015, Google Inc.
// All rights reserved.
-//
+//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
-//
+//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
@@ -16,7 +16,7 @@
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
-//
+//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -33,62 +33,37 @@
using System;
using System.Diagnostics;
-using System.Runtime.CompilerServices;
-using System.Runtime.InteropServices;
+using System.Linq;
using System.Threading;
using System.Threading.Tasks;
+using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
+using NUnit.Framework;
-namespace Grpc.Core.Internal
+namespace Grpc.Core.Tests
{
- /// <summary>
- /// If error != null, there's been an error or operation has been cancelled.
- /// </summary>
- internal delegate void AsyncCompletionDelegate<T>(T result, Exception error);
-
- /// <summary>
- /// Helper for transforming AsyncCompletionDelegate into full-fledged Task.
- /// </summary>
- internal class AsyncCompletionTaskSource<T>
+ public class ShutdownHookPendingCallTest
{
- readonly TaskCompletionSource<T> tcs = new TaskCompletionSource<T>();
- readonly AsyncCompletionDelegate<T> completionDelegate;
+ const string Host = "127.0.0.1";
- public AsyncCompletionTaskSource()
+ [Test]
+ public void ProcessExitHookCanCleanupAbandonedCall()
{
- completionDelegate = new AsyncCompletionDelegate<T>(HandleCompletion);
- }
+ var helper = new MockServiceHelper(Host);
+ var server = helper.GetServer();
+ server.Start();
+ var channel = helper.GetChannel();
- public Task<T> Task
- {
- get
+ var readyToShutdown = new TaskCompletionSource<object>();
+ helper.DuplexStreamingHandler = new DuplexStreamingServerMethod<string, string>(async (requestStream, responseStream, context) =>
{
- return tcs.Task;
- }
- }
+ readyToShutdown.SetResult(null);
+ await requestStream.ToListAsync();
+ });
- public AsyncCompletionDelegate<T> CompletionDelegate
- {
- get
- {
- return completionDelegate;
- }
- }
-
- private void HandleCompletion(T value, Exception error)
- {
- if (error == null)
- {
- tcs.SetResult(value);
- return;
- }
- if (error is OperationCanceledException)
- {
- tcs.SetCanceled();
- return;
- }
- tcs.SetException(error);
+ var call = Calls.AsyncDuplexStreamingCall(helper.CreateDuplexStreamingCall());
+ readyToShutdown.Task.Wait(); // make sure handler is running
}
}
}
diff --git a/src/csharp/Grpc.Core.Tests/ShutdownHookServerTest.cs b/src/csharp/Grpc.Core.Tests/ShutdownHookServerTest.cs
new file mode 100644
index 0000000000..e7ea7a0bf5
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/ShutdownHookServerTest.cs
@@ -0,0 +1,58 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Diagnostics;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using Grpc.Core;
+using Grpc.Core.Internal;
+using Grpc.Core.Utils;
+using NUnit.Framework;
+
+namespace Grpc.Core.Tests
+{
+ public class ShutdownHookServerTest
+ {
+ const string Host = "127.0.0.1";
+
+ [Test]
+ public void ProcessExitHookCanCleanupAbandonedServers()
+ {
+ var helper = new MockServiceHelper(Host);
+ var server = helper.GetServer();
+ server.Start();
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs b/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
index 5646fed3d9..02b08d2a10 100644
--- a/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
@@ -127,6 +127,10 @@ namespace Grpc.Core
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
+ /// <remarks>
+ /// Normally, there is no need for you to dispose the call unless you want to utilize the
+ /// "Cancel" semantics of invoking <c>Dispose</c>.
+ /// </remarks>
public void Dispose()
{
disposeAction.Invoke();
diff --git a/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs b/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
index e75108c7e5..68fd6d0b05 100644
--- a/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
@@ -117,6 +117,10 @@ namespace Grpc.Core
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
+ /// <remarks>
+ /// Normally, there is no need for you to dispose the call unless you want to utilize the
+ /// "Cancel" semantics of invoking <c>Dispose</c>.
+ /// </remarks>
public void Dispose()
{
disposeAction.Invoke();
diff --git a/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs b/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
index f953091984..5777c72615 100644
--- a/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
@@ -103,6 +103,10 @@ namespace Grpc.Core
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
+ /// <remarks>
+ /// Normally, there is no need for you to dispose the call unless you want to utilize the
+ /// "Cancel" semantics of invoking <c>Dispose</c>.
+ /// </remarks>
public void Dispose()
{
disposeAction.Invoke();
diff --git a/src/csharp/Grpc.Core/AsyncUnaryCall.cs b/src/csharp/Grpc.Core/AsyncUnaryCall.cs
index 97df8f5e91..d180c27922 100644
--- a/src/csharp/Grpc.Core/AsyncUnaryCall.cs
+++ b/src/csharp/Grpc.Core/AsyncUnaryCall.cs
@@ -112,6 +112,10 @@ namespace Grpc.Core
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
+ /// <remarks>
+ /// Normally, there is no need for you to dispose the call unless you want to utilize the
+ /// "Cancel" semantics of invoking <c>Dispose</c>.
+ /// </remarks>
public void Dispose()
{
disposeAction.Invoke();
diff --git a/src/csharp/Grpc.Core/CallOptions.cs b/src/csharp/Grpc.Core/CallOptions.cs
index 9ca88849ee..35548cfc96 100644
--- a/src/csharp/Grpc.Core/CallOptions.cs
+++ b/src/csharp/Grpc.Core/CallOptions.cs
@@ -88,7 +88,13 @@ namespace Grpc.Core
}
/// <summary>
- /// Token that can be used for cancelling the call.
+ /// Token that can be used for cancelling the call on the client side.
+ /// Cancelling the token will request cancellation
+ /// of the remote call. Best effort will be made to deliver the cancellation
+ /// notification to the server and interaction of the call with the server side
+ /// will be terminated. Unless the call finishes before the cancellation could
+ /// happen (there is an inherent race),
+ /// the call will finish with <c>StatusCode.Cancelled</c> status.
/// </summary>
public CancellationToken CancellationToken
{
diff --git a/src/csharp/Grpc.Core/Channel.cs b/src/csharp/Grpc.Core/Channel.cs
index 93a6e6a3d9..4f29c35b32 100644
--- a/src/csharp/Grpc.Core/Channel.cs
+++ b/src/csharp/Grpc.Core/Channel.cs
@@ -31,7 +31,6 @@
using System;
using System.Collections.Generic;
-using System.Linq;
using System.Threading;
using System.Threading.Tasks;
@@ -56,6 +55,7 @@ namespace Grpc.Core
readonly string target;
readonly GrpcEnvironment environment;
+ readonly CompletionQueueSafeHandle completionQueue;
readonly ChannelSafeHandle handle;
readonly Dictionary<string, ChannelOption> options;
@@ -67,14 +67,26 @@ namespace Grpc.Core
/// </summary>
/// <param name="target">Target of the channel.</param>
/// <param name="credentials">Credentials to secure the channel.</param>
+ public Channel(string target, ChannelCredentials credentials) :
+ this(target, credentials, null)
+ {
+ }
+
+ /// <summary>
+ /// Creates a channel that connects to a specific host.
+ /// Port will default to 80 for an unsecure channel and to 443 for a secure channel.
+ /// </summary>
+ /// <param name="target">Target of the channel.</param>
+ /// <param name="credentials">Credentials to secure the channel.</param>
/// <param name="options">Channel options.</param>
- public Channel(string target, ChannelCredentials credentials, IEnumerable<ChannelOption> options = null)
+ public Channel(string target, ChannelCredentials credentials, IEnumerable<ChannelOption> options)
{
this.target = GrpcPreconditions.CheckNotNull(target, "target");
this.options = CreateOptionsDictionary(options);
EnsureUserAgentChannelOption(this.options);
this.environment = GrpcEnvironment.AddRef();
+ this.completionQueue = this.environment.PickCompletionQueue();
using (var nativeCredentials = credentials.ToNativeCredentials())
using (var nativeChannelArgs = ChannelOptions.CreateChannelArgs(this.options.Values))
{
@@ -87,6 +99,18 @@ namespace Grpc.Core
this.handle = ChannelSafeHandle.CreateInsecure(target, nativeChannelArgs);
}
}
+ GrpcEnvironment.RegisterChannel(this);
+ }
+
+ /// <summary>
+ /// Creates a channel that connects to a specific host and port.
+ /// </summary>
+ /// <param name="host">The name or IP address of the host.</param>
+ /// <param name="port">The port.</param>
+ /// <param name="credentials">Credentials to secure the channel.</param>
+ public Channel(string host, int port, ChannelCredentials credentials) :
+ this(host, port, credentials, null)
+ {
}
/// <summary>
@@ -96,14 +120,14 @@ namespace Grpc.Core
/// <param name="port">The port.</param>
/// <param name="credentials">Credentials to secure the channel.</param>
/// <param name="options">Channel options.</param>
- public Channel(string host, int port, ChannelCredentials credentials, IEnumerable<ChannelOption> options = null) :
+ public Channel(string host, int port, ChannelCredentials credentials, IEnumerable<ChannelOption> options) :
this(string.Format("{0}:{1}", host, port), credentials, options)
{
}
/// <summary>
/// Gets current connectivity state of this channel.
- /// After channel is has been shutdown, <c>ChannelState.FatalFailure</c> will be returned.
+ /// After channel is has been shutdown, <c>ChannelState.Shutdown</c> will be returned.
/// </summary>
public ChannelState State
{
@@ -120,8 +144,8 @@ namespace Grpc.Core
/// </summary>
public Task WaitForStateChangedAsync(ChannelState lastObservedState, DateTime? deadline = null)
{
- GrpcPreconditions.CheckArgument(lastObservedState != ChannelState.FatalFailure,
- "FatalFailure is a terminal state. No further state changes can occur.");
+ GrpcPreconditions.CheckArgument(lastObservedState != ChannelState.Shutdown,
+ "Shutdown is a terminal state. No further state changes can occur.");
var tcs = new TaskCompletionSource<object>();
var deadlineTimespec = deadline.HasValue ? Timespec.FromDateTime(deadline.Value) : Timespec.InfFuture;
var handler = new BatchCompletionDelegate((success, ctx) =>
@@ -135,7 +159,7 @@ namespace Grpc.Core
tcs.SetCanceled();
}
});
- handle.WatchConnectivityState(lastObservedState, deadlineTimespec, environment.CompletionQueue, environment.CompletionRegistry, handler);
+ handle.WatchConnectivityState(lastObservedState, deadlineTimespec, completionQueue, handler);
return tcs.Task;
}
@@ -171,7 +195,7 @@ namespace Grpc.Core
/// <summary>
/// Allows explicitly requesting channel to connect without starting an RPC.
/// Returned task completes once state Ready was seen. If the deadline is reached,
- /// or channel enters the FatalFailure state, the task is cancelled.
+ /// or channel enters the Shutdown state, the task is cancelled.
/// There is no need to call this explicitly unless your use case requires that.
/// Starting an RPC on a new channel will request connection implicitly.
/// </summary>
@@ -181,9 +205,9 @@ namespace Grpc.Core
var currentState = GetConnectivityState(true);
while (currentState != ChannelState.Ready)
{
- if (currentState == ChannelState.FatalFailure)
+ if (currentState == ChannelState.Shutdown)
{
- throw new OperationCanceledException("Channel has reached FatalFailure state.");
+ throw new OperationCanceledException("Channel has reached Shutdown state.");
}
await WaitForStateChangedAsync(currentState, deadline).ConfigureAwait(false);
currentState = GetConnectivityState(false);
@@ -191,9 +215,16 @@ namespace Grpc.Core
}
/// <summary>
- /// Waits until there are no more active calls for this channel and then cleans up
- /// resources used by this channel.
+ /// Shuts down the channel cleanly. It is strongly recommended to shutdown
+ /// all previously created channels before exiting from the process.
/// </summary>
+ /// <remarks>
+ /// This method doesn't wait for all calls on this channel to finish (nor does
+ /// it explicitly cancel all outstanding calls). It is user's responsibility to make sure
+ /// all the calls on this channel have finished (successfully or with an error)
+ /// before shutting down the channel to ensure channel shutdown won't impact
+ /// the outcome of those remote calls.
+ /// </remarks>
public async Task ShutdownAsync()
{
lock (myLock)
@@ -201,6 +232,7 @@ namespace Grpc.Core
GrpcPreconditions.CheckState(!shutdownRequested);
shutdownRequested = true;
}
+ GrpcEnvironment.UnregisterChannel(this);
shutdownTokenSource.Cancel();
@@ -212,7 +244,7 @@ namespace Grpc.Core
handle.Dispose();
- await Task.Run(() => GrpcEnvironment.Release()).ConfigureAwait(false);
+ await GrpcEnvironment.ReleaseAsync().ConfigureAwait(false);
}
internal ChannelSafeHandle Handle
@@ -231,6 +263,14 @@ namespace Grpc.Core
}
}
+ internal CompletionQueueSafeHandle CompletionQueue
+ {
+ get
+ {
+ return this.completionQueue;
+ }
+ }
+
internal void AddCallReference(object call)
{
activeCallCounter.Increment();
@@ -255,7 +295,7 @@ namespace Grpc.Core
}
catch (ObjectDisposedException)
{
- return ChannelState.FatalFailure;
+ return ChannelState.Shutdown;
}
}
diff --git a/src/csharp/Grpc.Core/ChannelState.cs b/src/csharp/Grpc.Core/ChannelState.cs
index d293b98f75..a6c3b2a488 100644
--- a/src/csharp/Grpc.Core/ChannelState.cs
+++ b/src/csharp/Grpc.Core/ChannelState.cs
@@ -64,6 +64,6 @@ namespace Grpc.Core
/// <summary>
/// Channel has seen a failure that it cannot recover from
/// </summary>
- FatalFailure
+ Shutdown
}
}
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index 4bf30e83c1..a796911b99 100644
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -86,7 +86,6 @@
<Compile Include="Utils\BenchmarkUtil.cs" />
<Compile Include="ChannelCredentials.cs" />
<Compile Include="Internal\ChannelArgsSafeHandle.cs" />
- <Compile Include="Internal\AsyncCompletion.cs" />
<Compile Include="Internal\AsyncCallBase.cs" />
<Compile Include="Internal\AsyncCallServer.cs" />
<Compile Include="Internal\AsyncCall.cs" />
@@ -137,6 +136,8 @@
<Compile Include="Internal\ClientSideStatus.cs" />
<Compile Include="Internal\ClockType.cs" />
<Compile Include="Internal\CallError.cs" />
+ <Compile Include="Logging\LogLevel.cs" />
+ <Compile Include="Logging\LogLevelFilterLogger.cs" />
</ItemGroup>
<ItemGroup>
<None Include="Grpc.Core.nuspec" />
diff --git a/src/csharp/Grpc.Core/GrpcEnvironment.cs b/src/csharp/Grpc.Core/GrpcEnvironment.cs
index bee0ef1d62..e9e4cb4cbb 100644
--- a/src/csharp/Grpc.Core/GrpcEnvironment.cs
+++ b/src/csharp/Grpc.Core/GrpcEnvironment.cs
@@ -32,6 +32,8 @@
#endregion
using System;
+using System.Collections.Generic;
+using System.Linq;
using System.Runtime.InteropServices;
using System.Threading.Tasks;
using Grpc.Core.Internal;
@@ -45,18 +47,24 @@ namespace Grpc.Core
/// </summary>
public class GrpcEnvironment
{
+ const LogLevel DefaultLogLevel = LogLevel.Info;
const int MinDefaultThreadPoolSize = 4;
static object staticLock = new object();
static GrpcEnvironment instance;
static int refCount;
static int? customThreadPoolSize;
+ static int? customCompletionQueueCount;
+ static readonly HashSet<Channel> registeredChannels = new HashSet<Channel>();
+ static readonly HashSet<Server> registeredServers = new HashSet<Server>();
- static ILogger logger = new ConsoleLogger();
+ static ILogger logger = new LogLevelFilterLogger(new ConsoleLogger(), DefaultLogLevel);
+ readonly object myLock = new object();
readonly GrpcThreadPool threadPool;
- readonly CompletionRegistry completionRegistry;
readonly DebugStats debugStats = new DebugStats();
+ readonly AtomicCounter cqPickerCounter = new AtomicCounter();
+
bool isClosed;
/// <summary>
@@ -65,6 +73,8 @@ namespace Grpc.Core
/// </summary>
internal static GrpcEnvironment AddRef()
{
+ ShutdownHooks.Register();
+
lock (staticLock)
{
refCount++;
@@ -77,21 +87,26 @@ namespace Grpc.Core
}
/// <summary>
- /// Decrements the reference count for currently active environment and shuts down the gRPC environment if reference count drops to zero.
- /// (and blocks until the environment has been fully shutdown).
+ /// Decrements the reference count for currently active environment and asynchronously shuts down the gRPC environment if reference count drops to zero.
/// </summary>
- internal static void Release()
+ internal static async Task ReleaseAsync()
{
+ GrpcEnvironment instanceToShutdown = null;
lock (staticLock)
{
GrpcPreconditions.CheckState(refCount > 0);
refCount--;
if (refCount == 0)
{
- instance.Close();
+ instanceToShutdown = instance;
instance = null;
}
}
+
+ if (instanceToShutdown != null)
+ {
+ await instanceToShutdown.ShutdownAsync();
+ }
}
internal static int GetRefCount()
@@ -102,6 +117,68 @@ namespace Grpc.Core
}
}
+ internal static void RegisterChannel(Channel channel)
+ {
+ lock (staticLock)
+ {
+ GrpcPreconditions.CheckNotNull(channel);
+ registeredChannels.Add(channel);
+ }
+ }
+
+ internal static void UnregisterChannel(Channel channel)
+ {
+ lock (staticLock)
+ {
+ GrpcPreconditions.CheckNotNull(channel);
+ GrpcPreconditions.CheckArgument(registeredChannels.Remove(channel), "Channel not found in the registered channels set.");
+ }
+ }
+
+ internal static void RegisterServer(Server server)
+ {
+ lock (staticLock)
+ {
+ GrpcPreconditions.CheckNotNull(server);
+ registeredServers.Add(server);
+ }
+ }
+
+ internal static void UnregisterServer(Server server)
+ {
+ lock (staticLock)
+ {
+ GrpcPreconditions.CheckNotNull(server);
+ GrpcPreconditions.CheckArgument(registeredServers.Remove(server), "Server not found in the registered servers set.");
+ }
+ }
+
+ /// <summary>
+ /// Requests shutdown of all channels created by the current process.
+ /// </summary>
+ public static Task ShutdownChannelsAsync()
+ {
+ HashSet<Channel> snapshot = null;
+ lock (staticLock)
+ {
+ snapshot = new HashSet<Channel>(registeredChannels);
+ }
+ return Task.WhenAll(snapshot.Select((channel) => channel.ShutdownAsync()));
+ }
+
+ /// <summary>
+ /// Requests immediate shutdown of all servers created by the current process.
+ /// </summary>
+ public static Task KillServersAsync()
+ {
+ HashSet<Server> snapshot = null;
+ lock (staticLock)
+ {
+ snapshot = new HashSet<Server>(registeredServers);
+ }
+ return Task.WhenAll(snapshot.Select((server) => server.KillAsync()));
+ }
+
/// <summary>
/// Gets application-wide logger used by gRPC.
/// </summary>
@@ -141,39 +218,62 @@ namespace Grpc.Core
}
/// <summary>
+ /// Sets the number of completion queues in the gRPC thread pool that polls for internal RPC events.
+ /// Can be only invoke before the <c>GrpcEnviroment</c> is started and cannot be changed afterwards.
+ /// Setting the number of completions queues is an advanced setting and you should only use it if you know what you are doing.
+ /// Most users should rely on the default value provided by gRPC library.
+ /// Note: this method is part of an experimental API that can change or be removed without any prior notice.
+ /// </summary>
+ public static void SetCompletionQueueCount(int completionQueueCount)
+ {
+ lock (staticLock)
+ {
+ GrpcPreconditions.CheckState(instance == null, "Can only be set before GrpcEnvironment is initialized");
+ GrpcPreconditions.CheckArgument(completionQueueCount > 0, "threadCount needs to be a positive number");
+ customCompletionQueueCount = completionQueueCount;
+ }
+ }
+
+ /// <summary>
/// Creates gRPC environment.
/// </summary>
private GrpcEnvironment()
{
GrpcNativeInit();
- completionRegistry = new CompletionRegistry(this);
- threadPool = new GrpcThreadPool(this, GetThreadPoolSizeOrDefault());
+ threadPool = new GrpcThreadPool(this, GetThreadPoolSizeOrDefault(), GetCompletionQueueCountOrDefault());
threadPool.Start();
}
/// <summary>
- /// Gets the completion registry used by this gRPC environment.
+ /// Gets the completion queues used by this gRPC environment.
/// </summary>
- internal CompletionRegistry CompletionRegistry
+ internal IReadOnlyCollection<CompletionQueueSafeHandle> CompletionQueues
{
get
{
- return this.completionRegistry;
+ return this.threadPool.CompletionQueues;
}
}
- /// <summary>
- /// Gets the completion queue used by this gRPC environment.
- /// </summary>
- internal CompletionQueueSafeHandle CompletionQueue
+ internal bool IsAlive
{
get
{
- return this.threadPool.CompletionQueue;
+ return this.threadPool.IsAlive;
}
}
/// <summary>
+ /// Picks a completion queue in a round-robin fashion.
+ /// Shouldn't be invoked on a per-call basis (used at per-channel basis).
+ /// </summary>
+ internal CompletionQueueSafeHandle PickCompletionQueue()
+ {
+ var cqIndex = (int) ((cqPickerCounter.Increment() - 1) % this.threadPool.CompletionQueues.Count);
+ return this.threadPool.CompletionQueues.ElementAt(cqIndex);
+ }
+
+ /// <summary>
/// Gets the completion queue used by this gRPC environment.
/// </summary>
internal DebugStats DebugStats
@@ -206,13 +306,13 @@ namespace Grpc.Core
/// <summary>
/// Shuts down this environment.
/// </summary>
- private void Close()
+ private async Task ShutdownAsync()
{
if (isClosed)
{
throw new InvalidOperationException("Close has already been called");
}
- threadPool.Stop();
+ await threadPool.StopAsync().ConfigureAwait(false);
GrpcNativeShutdown();
isClosed = true;
@@ -230,5 +330,42 @@ namespace Grpc.Core
// more work, but seems to work reasonably well for a start.
return Math.Max(MinDefaultThreadPoolSize, Environment.ProcessorCount / 2);
}
+
+ private int GetCompletionQueueCountOrDefault()
+ {
+ if (customCompletionQueueCount.HasValue)
+ {
+ return customCompletionQueueCount.Value;
+ }
+ // by default, create a completion queue for each thread
+ return GetThreadPoolSizeOrDefault();
+ }
+
+ private static class ShutdownHooks
+ {
+ static object staticLock = new object();
+ static bool hooksRegistered;
+
+ public static void Register()
+ {
+ lock (staticLock)
+ {
+ if (!hooksRegistered)
+ {
+ AppDomain.CurrentDomain.ProcessExit += ShutdownHookHandler;
+ AppDomain.CurrentDomain.DomainUnload += ShutdownHookHandler;
+ }
+ hooksRegistered = true;
+ }
+ }
+
+ /// <summary>
+ /// Handler for AppDomain.DomainUnload and AppDomain.ProcessExit hooks.
+ /// </summary>
+ private static void ShutdownHookHandler(object sender, EventArgs e)
+ {
+ Task.WaitAll(GrpcEnvironment.ShutdownChannelsAsync(), GrpcEnvironment.KillServersAsync());
+ }
+ }
}
}
diff --git a/src/csharp/Grpc.Core/IAsyncStreamReader.cs b/src/csharp/Grpc.Core/IAsyncStreamReader.cs
index 49e1ea7832..aa3b802a50 100644
--- a/src/csharp/Grpc.Core/IAsyncStreamReader.cs
+++ b/src/csharp/Grpc.Core/IAsyncStreamReader.cs
@@ -41,10 +41,24 @@ namespace Grpc.Core
{
/// <summary>
/// A stream of messages to be read.
+ /// Messages can be awaited <c>await reader.MoveNext()</c>, that returns <c>true</c>
+ /// if there is a message available and <c>false</c> if there are no more messages
+ /// (i.e. the stream has been closed).
+ /// <para>
+ /// On the client side, the last invocation of <c>MoveNext()</c> either returns <c>false</c>
+ /// if the call has finished successfully or throws <c>RpcException</c> if call finished
+ /// with an error. Once the call finishes, subsequent invocations of <c>MoveNext()</c> will
+ /// continue yielding the same result (returning <c>false</c> or throwing an exception).
+ /// </para>
+ /// <para>
+ /// On the server side, <c>MoveNext()</c> does not throw exceptions.
+ /// In case of a failure, the request stream will appear to be finished
+ /// (<c>MoveNext</c> will return <c>false</c>) and the <c>CancellationToken</c>
+ /// associated with the call will be cancelled to signal the failure.
+ /// </para>
/// </summary>
/// <typeparam name="T">The message type.</typeparam>
public interface IAsyncStreamReader<T> : IAsyncEnumerator<T>
{
- // TODO(jtattermusch): consider just using IAsyncEnumerator instead of this interface.
}
}
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
index 55351869b5..f549c52876 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
@@ -32,12 +32,7 @@
#endregion
using System;
-using System.Diagnostics;
-using System.Runtime.CompilerServices;
-using System.Runtime.InteropServices;
-using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core.Internal;
using Grpc.Core.Logging;
using Grpc.Core.Profiling;
using Grpc.Core.Utils;
@@ -57,9 +52,11 @@ namespace Grpc.Core.Internal
// Completion of a pending unary response if not null.
TaskCompletionSource<TResponse> unaryResponseTcs;
+ // TODO(jtattermusch): this field doesn't need to be initialized for unary response calls.
// Indicates that response streaming call has finished.
TaskCompletionSource<object> streamingCallFinishedTcs = new TaskCompletionSource<object>();
+ // TODO(jtattermusch): this field could be lazy-initialized (only if someone requests the response headers).
// Response headers set here once received.
TaskCompletionSource<Metadata> responseHeadersTcs = new TaskCompletionSource<Metadata>();
@@ -67,7 +64,7 @@ namespace Grpc.Core.Internal
ClientSideStatus? finishedStatus;
public AsyncCall(CallInvocationDetails<TRequest, TResponse> callDetails)
- : base(callDetails.RequestMarshaller.Serializer, callDetails.ResponseMarshaller.Deserializer, callDetails.Channel.Environment)
+ : base(callDetails.RequestMarshaller.Serializer, callDetails.ResponseMarshaller.Deserializer)
{
this.details = callDetails.WithOptions(callDetails.Options.Normalize());
this.initialMetadataSent = true; // we always send metadata at the very beginning of the call.
@@ -144,7 +141,7 @@ namespace Grpc.Core.Internal
GrpcPreconditions.CheckState(!started);
started = true;
- Initialize(environment.CompletionQueue);
+ Initialize(details.Channel.CompletionQueue);
halfcloseRequested = true;
readingDone = true;
@@ -171,7 +168,7 @@ namespace Grpc.Core.Internal
GrpcPreconditions.CheckState(!started);
started = true;
- Initialize(environment.CompletionQueue);
+ Initialize(details.Channel.CompletionQueue);
readingDone = true;
@@ -195,7 +192,7 @@ namespace Grpc.Core.Internal
GrpcPreconditions.CheckState(!started);
started = true;
- Initialize(environment.CompletionQueue);
+ Initialize(details.Channel.CompletionQueue);
halfcloseRequested = true;
@@ -220,7 +217,7 @@ namespace Grpc.Core.Internal
GrpcPreconditions.CheckState(!started);
started = true;
- Initialize(environment.CompletionQueue);
+ Initialize(details.Channel.CompletionQueue);
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
@@ -232,11 +229,10 @@ namespace Grpc.Core.Internal
/// <summary>
/// Sends a streaming request. Only one pending send action is allowed at any given time.
- /// completionDelegate is called when the operation finishes.
/// </summary>
- public void StartSendMessage(TRequest msg, WriteFlags writeFlags, AsyncCompletionDelegate<object> completionDelegate)
+ public Task SendMessageAsync(TRequest msg, WriteFlags writeFlags)
{
- StartSendMessageInternal(msg, writeFlags, completionDelegate);
+ return SendMessageInternalAsync(msg, writeFlags);
}
/// <summary>
@@ -250,29 +246,32 @@ namespace Grpc.Core.Internal
/// <summary>
/// Sends halfclose, indicating client is done with streaming requests.
/// Only one pending send action is allowed at any given time.
- /// completionDelegate is called when the operation finishes.
/// </summary>
- public void StartSendCloseFromClient(AsyncCompletionDelegate<object> completionDelegate)
+ public Task SendCloseFromClientAsync()
{
lock (myLock)
{
- GrpcPreconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
- CheckSendingAllowed(allowFinished: true);
+ GrpcPreconditions.CheckState(started);
- if (!disposed && !finished)
+ var earlyResult = CheckSendPreconditionsClientSide();
+ if (earlyResult != null)
{
- call.StartSendCloseFromClient(HandleSendCloseFromClientFinished);
+ return earlyResult;
}
- else
+
+ if (disposed || finished)
{
// In case the call has already been finished by the serverside,
- // the halfclose has already been done implicitly, so we only
- // emit the notification for the completion delegate.
- Task.Run(() => HandleSendCloseFromClientFinished(true));
+ // the halfclose has already been done implicitly, so just return
+ // completed task here.
+ halfcloseRequested = true;
+ return Task.FromResult<object>(null);
}
+ call.StartSendCloseFromClient(HandleSendFinished);
halfcloseRequested = true;
- sendCompletionDelegate = completionDelegate;
+ streamingWriteTcs = new TaskCompletionSource<object>();
+ return streamingWriteTcs.Task;
}
}
@@ -342,6 +341,45 @@ namespace Grpc.Core.Internal
get { return true; }
}
+ protected override Task CheckSendAllowedOrEarlyResult()
+ {
+ var earlyResult = CheckSendPreconditionsClientSide();
+ if (earlyResult != null)
+ {
+ return earlyResult;
+ }
+
+ if (finishedStatus.HasValue)
+ {
+ // throwing RpcException if we already received status on client
+ // side makes the most sense.
+ // Note that this throws even for StatusCode.OK.
+ // Writing after the call has finished is not a programming error because server can close
+ // the call anytime, so don't throw directly, but let the write task finish with an error.
+ var tcs = new TaskCompletionSource<object>();
+ tcs.SetException(new RpcException(finishedStatus.Value.Status));
+ return tcs.Task;
+ }
+
+ return null;
+ }
+
+ private Task CheckSendPreconditionsClientSide()
+ {
+ GrpcPreconditions.CheckState(!halfcloseRequested, "Request stream has already been completed.");
+ GrpcPreconditions.CheckState(streamingWriteTcs == null, "Only one write can be pending at a time.");
+
+ if (cancelRequested)
+ {
+ // Return a cancelled task.
+ var tcs = new TaskCompletionSource<object>();
+ tcs.SetCanceled();
+ return tcs.Task;
+ }
+
+ return null;
+ }
+
private void Initialize(CompletionQueueSafeHandle cq)
{
using (Profilers.ForCurrentThread().NewScope("AsyncCall.Initialize"))
@@ -368,7 +406,7 @@ namespace Grpc.Core.Internal
var credentials = details.Options.Credentials;
using (var nativeCredentials = credentials != null ? credentials.ToNativeCredentials() : null)
{
- var result = details.Channel.Handle.CreateCall(environment.CompletionRegistry,
+ var result = details.Channel.Handle.CreateCall(
parentCall, ContextPropagationToken.DefaultMask, cq,
details.Method, details.Host, Timespec.FromDateTime(details.Options.Deadline.Value), nativeCredentials);
return result;
@@ -400,6 +438,7 @@ namespace Grpc.Core.Internal
/// </summary>
private void HandleReceivedResponseHeaders(bool success, Metadata responseHeaders)
{
+ // TODO(jtattermusch): handle success==false
responseHeadersTcs.SetResult(responseHeaders);
}
@@ -443,19 +482,6 @@ namespace Grpc.Core.Internal
}
}
- protected override void CheckSendingAllowed(bool allowFinished)
- {
- base.CheckSendingAllowed(true);
-
- // throwing RpcException if we already received status on client
- // side makes the most sense.
- // Note that this throws even for StatusCode.OK.
- if (!allowFinished && finishedStatus.HasValue)
- {
- throw new RpcException(finishedStatus.Value.Status);
- }
- }
-
/// <summary>
/// Handles receive status completion for calls with streaming response.
/// </summary>
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
index 4de23706b2..eb9c3ea62d 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
@@ -58,7 +58,6 @@ namespace Grpc.Core.Internal
readonly Func<TWrite, byte[]> serializer;
readonly Func<byte[], TRead> deserializer;
- protected readonly GrpcEnvironment environment;
protected readonly object myLock = new object();
protected INativeCall call;
@@ -67,8 +66,8 @@ namespace Grpc.Core.Internal
protected bool started;
protected bool cancelRequested;
- protected AsyncCompletionDelegate<object> sendCompletionDelegate; // Completion of a pending send or sendclose if not null.
protected TaskCompletionSource<TRead> streamingReadTcs; // Completion of a pending streaming read if not null.
+ protected TaskCompletionSource<object> streamingWriteTcs; // Completion of a pending streaming write or send close from client if not null.
protected TaskCompletionSource<object> sendStatusFromServerTcs;
protected bool readingDone; // True if last read (i.e. read with null payload) was already received.
@@ -78,11 +77,10 @@ namespace Grpc.Core.Internal
protected bool initialMetadataSent;
protected long streamingWritesCounter; // Number of streaming send operations started so far.
- public AsyncCallBase(Func<TWrite, byte[]> serializer, Func<byte[], TRead> deserializer, GrpcEnvironment environment)
+ public AsyncCallBase(Func<TWrite, byte[]> serializer, Func<byte[], TRead> deserializer)
{
this.serializer = GrpcPreconditions.CheckNotNull(serializer);
this.deserializer = GrpcPreconditions.CheckNotNull(deserializer);
- this.environment = GrpcPreconditions.CheckNotNull(environment);
}
/// <summary>
@@ -128,28 +126,31 @@ namespace Grpc.Core.Internal
/// <summary>
/// Initiates sending a message. Only one send operation can be active at a time.
- /// completionDelegate is invoked upon completion.
/// </summary>
- protected void StartSendMessageInternal(TWrite msg, WriteFlags writeFlags, AsyncCompletionDelegate<object> completionDelegate)
+ protected Task SendMessageInternalAsync(TWrite msg, WriteFlags writeFlags)
{
byte[] payload = UnsafeSerialize(msg);
lock (myLock)
{
- GrpcPreconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
- CheckSendingAllowed(allowFinished: false);
+ GrpcPreconditions.CheckState(started);
+ var earlyResult = CheckSendAllowedOrEarlyResult();
+ if (earlyResult != null)
+ {
+ return earlyResult;
+ }
call.StartSendMessage(HandleSendFinished, payload, writeFlags, !initialMetadataSent);
- sendCompletionDelegate = completionDelegate;
initialMetadataSent = true;
streamingWritesCounter++;
+ streamingWriteTcs = new TaskCompletionSource<object>();
+ return streamingWriteTcs.Task;
}
}
/// <summary>
/// Initiates reading a message. Only one read operation can be active at a time.
- /// completionDelegate is invoked upon completion.
/// </summary>
protected Task<TRead> ReadMessageInternalAsync()
{
@@ -159,7 +160,7 @@ namespace Grpc.Core.Internal
if (readingDone)
{
// the last read that returns null or throws an exception is idempotent
- // and maintain its state.
+ // and maintains its state.
GrpcPreconditions.CheckState(streamingReadTcs != null, "Call does not support streaming reads.");
return streamingReadTcs.Task;
}
@@ -183,7 +184,7 @@ namespace Grpc.Core.Internal
{
if (!disposed && call != null)
{
- bool noMoreSendCompletions = sendCompletionDelegate == null && (halfcloseRequested || cancelRequested || finished);
+ bool noMoreSendCompletions = streamingWriteTcs == null && (halfcloseRequested || cancelRequested || finished);
if (noMoreSendCompletions && readingDone && finished)
{
ReleaseResources();
@@ -213,24 +214,11 @@ namespace Grpc.Core.Internal
{
}
- protected virtual void CheckSendingAllowed(bool allowFinished)
- {
- GrpcPreconditions.CheckState(started);
- CheckNotCancelled();
- GrpcPreconditions.CheckState(!disposed || allowFinished);
-
- GrpcPreconditions.CheckState(!halfcloseRequested, "Already halfclosed.");
- GrpcPreconditions.CheckState(!finished || allowFinished, "Already finished.");
- GrpcPreconditions.CheckState(sendCompletionDelegate == null, "Only one write can be pending at a time");
- }
-
- protected void CheckNotCancelled()
- {
- if (cancelRequested)
- {
- throw new OperationCanceledException("Remote call has been cancelled.");
- }
- }
+ /// <summary>
+ /// Checks if sending is allowed and possibly returns a Task that allows short-circuiting the send
+ /// logic by directly returning the write operation result task. Normally, null is returned.
+ /// </summary>
+ protected abstract Task CheckSendAllowedOrEarlyResult();
protected byte[] UnsafeSerialize(TWrite msg)
{
@@ -259,63 +247,27 @@ namespace Grpc.Core.Internal
}
}
- protected void FireCompletion<T>(AsyncCompletionDelegate<T> completionDelegate, T value, Exception error)
- {
- try
- {
- completionDelegate(value, error);
- }
- catch (Exception e)
- {
- Logger.Error(e, "Exception occured while invoking completion delegate.");
- }
- }
-
/// <summary>
- /// Handles send completion.
+ /// Handles send completion (including SendCloseFromClient).
/// </summary>
protected void HandleSendFinished(bool success)
{
- AsyncCompletionDelegate<object> origCompletionDelegate = null;
- lock (myLock)
- {
- origCompletionDelegate = sendCompletionDelegate;
- sendCompletionDelegate = null;
-
- ReleaseResourcesIfPossible();
- }
-
- if (!success)
- {
- FireCompletion(origCompletionDelegate, null, new InvalidOperationException("Send failed"));
- }
- else
- {
- FireCompletion(origCompletionDelegate, null, null);
- }
- }
-
- /// <summary>
- /// Handles halfclose (send close from client) completion.
- /// </summary>
- protected void HandleSendCloseFromClientFinished(bool success)
- {
- AsyncCompletionDelegate<object> origCompletionDelegate = null;
+ TaskCompletionSource<object> origTcs = null;
lock (myLock)
{
- origCompletionDelegate = sendCompletionDelegate;
- sendCompletionDelegate = null;
+ origTcs = streamingWriteTcs;
+ streamingWriteTcs = null;
ReleaseResourcesIfPossible();
}
if (!success)
{
- FireCompletion(origCompletionDelegate, null, new InvalidOperationException("Sending close from client has failed."));
+ origTcs.SetException(new InvalidOperationException("Send failed"));
}
else
{
- FireCompletion(origCompletionDelegate, null, null);
+ origTcs.SetResult(null);
}
}
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
index b1566b44a7..56c23ba3ef 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
@@ -51,14 +51,14 @@ namespace Grpc.Core.Internal
readonly CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();
readonly Server server;
- public AsyncCallServer(Func<TResponse, byte[]> serializer, Func<byte[], TRequest> deserializer, GrpcEnvironment environment, Server server) : base(serializer, deserializer, environment)
+ public AsyncCallServer(Func<TResponse, byte[]> serializer, Func<byte[], TRequest> deserializer, Server server) : base(serializer, deserializer)
{
this.server = GrpcPreconditions.CheckNotNull(server);
}
- public void Initialize(CallSafeHandle call)
+ public void Initialize(CallSafeHandle call, CompletionQueueSafeHandle completionQueue)
{
- call.Initialize(environment.CompletionRegistry, environment.CompletionQueue);
+ call.Initialize(completionQueue);
server.AddCallReference(this);
InitializeInternal(call);
@@ -91,11 +91,10 @@ namespace Grpc.Core.Internal
/// <summary>
/// Sends a streaming response. Only one pending send action is allowed at any given time.
- /// completionDelegate is called when the operation finishes.
/// </summary>
- public void StartSendMessage(TResponse msg, WriteFlags writeFlags, AsyncCompletionDelegate<object> completionDelegate)
+ public Task SendMessageAsync(TResponse msg, WriteFlags writeFlags)
{
- StartSendMessageInternal(msg, writeFlags, completionDelegate);
+ return SendMessageInternalAsync(msg, writeFlags);
}
/// <summary>
@@ -110,20 +109,22 @@ namespace Grpc.Core.Internal
/// Initiates sending a initial metadata.
/// Even though C-core allows sending metadata in parallel to sending messages, we will treat sending metadata as a send message operation
/// to make things simpler.
- /// completionDelegate is invoked upon completion.
/// </summary>
- public void StartSendInitialMetadata(Metadata headers, AsyncCompletionDelegate<object> completionDelegate)
+ public Task SendInitialMetadataAsync(Metadata headers)
{
lock (myLock)
{
GrpcPreconditions.CheckNotNull(headers, "metadata");
- GrpcPreconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
+ GrpcPreconditions.CheckState(started);
GrpcPreconditions.CheckState(!initialMetadataSent, "Response headers can only be sent once per call.");
GrpcPreconditions.CheckState(streamingWritesCounter == 0, "Response headers can only be sent before the first write starts.");
- CheckSendingAllowed(allowFinished: false);
- GrpcPreconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
+ var earlyResult = CheckSendAllowedOrEarlyResult();
+ if (earlyResult != null)
+ {
+ return earlyResult;
+ }
using (var metadataArray = MetadataArraySafeHandle.Create(headers))
{
@@ -131,7 +132,8 @@ namespace Grpc.Core.Internal
}
this.initialMetadataSent = true;
- sendCompletionDelegate = completionDelegate;
+ streamingWriteTcs = new TaskCompletionSource<object>();
+ return streamingWriteTcs.Task;
}
}
@@ -196,6 +198,16 @@ namespace Grpc.Core.Internal
server.RemoveCallReference(this);
}
+ protected override Task CheckSendAllowedOrEarlyResult()
+ {
+ GrpcPreconditions.CheckState(!halfcloseRequested, "Response stream has already been completed.");
+ GrpcPreconditions.CheckState(!finished, "Already finished.");
+ GrpcPreconditions.CheckState(streamingWriteTcs == null, "Only one write can be pending at a time");
+ GrpcPreconditions.CheckState(!disposed);
+
+ return null;
+ }
+
/// <summary>
/// Handles the server side close completion.
/// </summary>
diff --git a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
index 244b97d4a4..82361f5797 100644
--- a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
@@ -47,16 +47,14 @@ namespace Grpc.Core.Internal
static readonly NativeMethods Native = NativeMethods.Get();
const uint GRPC_WRITE_BUFFER_HINT = 1;
- CompletionRegistry completionRegistry;
CompletionQueueSafeHandle completionQueue;
private CallSafeHandle()
{
}
- public void Initialize(CompletionRegistry completionRegistry, CompletionQueueSafeHandle completionQueue)
+ public void Initialize(CompletionQueueSafeHandle completionQueue)
{
- this.completionRegistry = completionRegistry;
this.completionQueue = completionQueue;
}
@@ -70,7 +68,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient(), context.GetReceivedMessage(), context.GetReceivedInitialMetadata()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient(), context.GetReceivedMessage(), context.GetReceivedInitialMetadata()));
Native.grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray, writeFlags)
.CheckOk();
}
@@ -90,7 +88,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient(), context.GetReceivedMessage(), context.GetReceivedInitialMetadata()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient(), context.GetReceivedMessage(), context.GetReceivedInitialMetadata()));
Native.grpcsharp_call_start_client_streaming(this, ctx, metadataArray).CheckOk();
}
}
@@ -100,7 +98,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient()));
Native.grpcsharp_call_start_server_streaming(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray, writeFlags).CheckOk();
}
}
@@ -110,7 +108,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient()));
Native.grpcsharp_call_start_duplex_streaming(this, ctx, metadataArray).CheckOk();
}
}
@@ -120,7 +118,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
Native.grpcsharp_call_send_message(this, ctx, payload, new UIntPtr((ulong)payload.Length), writeFlags, sendEmptyInitialMetadata).CheckOk();
}
}
@@ -130,7 +128,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
Native.grpcsharp_call_send_close_from_client(this, ctx).CheckOk();
}
}
@@ -142,7 +140,7 @@ namespace Grpc.Core.Internal
{
var ctx = BatchContextSafeHandle.Create();
var optionalPayloadLength = optionalPayload != null ? new UIntPtr((ulong)optionalPayload.Length) : UIntPtr.Zero;
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
Native.grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail, metadataArray, sendEmptyInitialMetadata,
optionalPayload, optionalPayloadLength, writeFlags).CheckOk();
}
@@ -153,7 +151,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedMessage()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedMessage()));
Native.grpcsharp_call_recv_message(this, ctx).CheckOk();
}
}
@@ -163,7 +161,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedInitialMetadata()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedInitialMetadata()));
Native.grpcsharp_call_recv_initial_metadata(this, ctx).CheckOk();
}
}
@@ -173,7 +171,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedCloseOnServerCancelled()));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedCloseOnServerCancelled()));
Native.grpcsharp_call_start_serverside(this, ctx).CheckOk();
}
}
@@ -183,7 +181,7 @@ namespace Grpc.Core.Internal
using (completionQueue.NewScope())
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
Native.grpcsharp_call_send_initial_metadata(this, ctx, metadataArray).CheckOk();
}
}
diff --git a/src/csharp/Grpc.Core/Internal/ChannelSafeHandle.cs b/src/csharp/Grpc.Core/Internal/ChannelSafeHandle.cs
index 1dbd1f4e34..62864dff0c 100644
--- a/src/csharp/Grpc.Core/Internal/ChannelSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/ChannelSafeHandle.cs
@@ -63,7 +63,7 @@ namespace Grpc.Core.Internal
return Native.grpcsharp_secure_channel_create(credentials, target, channelArgs);
}
- public CallSafeHandle CreateCall(CompletionRegistry registry, CallSafeHandle parentCall, ContextPropagationFlags propagationMask, CompletionQueueSafeHandle cq, string method, string host, Timespec deadline, CallCredentialsSafeHandle credentials)
+ public CallSafeHandle CreateCall(CallSafeHandle parentCall, ContextPropagationFlags propagationMask, CompletionQueueSafeHandle cq, string method, string host, Timespec deadline, CallCredentialsSafeHandle credentials)
{
using (Profilers.ForCurrentThread().NewScope("ChannelSafeHandle.CreateCall"))
{
@@ -72,7 +72,7 @@ namespace Grpc.Core.Internal
{
result.SetCredentials(credentials);
}
- result.Initialize(registry, cq);
+ result.Initialize(cq);
return result;
}
}
@@ -82,11 +82,10 @@ namespace Grpc.Core.Internal
return Native.grpcsharp_channel_check_connectivity_state(this, tryToConnect ? 1 : 0);
}
- public void WatchConnectivityState(ChannelState lastObservedState, Timespec deadline, CompletionQueueSafeHandle cq,
- CompletionRegistry completionRegistry, BatchCompletionDelegate callback)
+ public void WatchConnectivityState(ChannelState lastObservedState, Timespec deadline, CompletionQueueSafeHandle cq, BatchCompletionDelegate callback)
{
var ctx = BatchContextSafeHandle.Create();
- completionRegistry.RegisterBatchCompletion(ctx, callback);
+ cq.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
Native.grpcsharp_channel_watch_connectivity_state(this, lastObservedState, deadline, cq, ctx);
}
diff --git a/src/csharp/Grpc.Core/Internal/ClientRequestStream.cs b/src/csharp/Grpc.Core/Internal/ClientRequestStream.cs
index 013f00ff6f..924de028f5 100644
--- a/src/csharp/Grpc.Core/Internal/ClientRequestStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ClientRequestStream.cs
@@ -50,16 +50,12 @@ namespace Grpc.Core.Internal
public Task WriteAsync(TRequest message)
{
- var taskSource = new AsyncCompletionTaskSource<object>();
- call.StartSendMessage(message, GetWriteFlags(), taskSource.CompletionDelegate);
- return taskSource.Task;
+ return call.SendMessageAsync(message, GetWriteFlags());
}
public Task CompleteAsync()
{
- var taskSource = new AsyncCompletionTaskSource<object>();
- call.StartSendCloseFromClient(taskSource.CompletionDelegate);
- return taskSource.Task;
+ return call.SendCloseFromClientAsync();
}
public WriteOptions WriteOptions
diff --git a/src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs b/src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs
index 91364cdc70..46f5624223 100644
--- a/src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs
@@ -45,6 +45,7 @@ namespace Grpc.Core.Internal
static readonly NativeMethods Native = NativeMethods.Get();
AtomicCounter shutdownRefcount = new AtomicCounter(1);
+ CompletionRegistry completionRegistry;
private CompletionQueueSafeHandle()
{
@@ -53,7 +54,13 @@ namespace Grpc.Core.Internal
public static CompletionQueueSafeHandle Create()
{
return Native.grpcsharp_completion_queue_create();
+ }
+ public static CompletionQueueSafeHandle Create(CompletionRegistry completionRegistry)
+ {
+ var cq = Native.grpcsharp_completion_queue_create();
+ cq.completionRegistry = completionRegistry;
+ return cq;
}
public CompletionQueueEvent Next()
@@ -83,6 +90,15 @@ namespace Grpc.Core.Internal
DecrementShutdownRefcount();
}
+ /// <summary>
+ /// Completion registry associated with this completion queue.
+ /// Doesn't need to be set if only using Pluck() operations.
+ /// </summary>
+ public CompletionRegistry CompletionRegistry
+ {
+ get { return completionRegistry; }
+ }
+
protected override bool ReleaseHandle()
{
Native.grpcsharp_completion_queue_destroy(handle);
diff --git a/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs b/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
index b538726fa1..a446c1f99f 100644
--- a/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
+++ b/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
@@ -33,15 +33,16 @@
using System;
using System.Collections.Generic;
-using System.Runtime.InteropServices;
+using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Grpc.Core.Logging;
+using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
/// <summary>
- /// Pool of threads polling on the same completion queue.
+ /// Pool of threads polling on a set of completions queues.
/// </summary>
internal class GrpcThreadPool
{
@@ -51,25 +52,33 @@ namespace Grpc.Core.Internal
readonly object myLock = new object();
readonly List<Thread> threads = new List<Thread>();
readonly int poolSize;
+ readonly int completionQueueCount;
- CompletionQueueSafeHandle cq;
+ bool stopRequested;
- public GrpcThreadPool(GrpcEnvironment environment, int poolSize)
+ IReadOnlyCollection<CompletionQueueSafeHandle> completionQueues;
+
+ /// <summary>
+ /// Creates a thread pool threads polling on a set of completions queues.
+ /// </summary>
+ /// <param name="environment">Environment.</param>
+ /// <param name="poolSize">Pool size.</param>
+ /// <param name="completionQueueCount">Completion queue count.</param>
+ public GrpcThreadPool(GrpcEnvironment environment, int poolSize, int completionQueueCount)
{
this.environment = environment;
this.poolSize = poolSize;
+ this.completionQueueCount = completionQueueCount;
+ GrpcPreconditions.CheckArgument(poolSize >= completionQueueCount,
+ "Thread pool size cannot be smaller than the number of completion queues used.");
}
public void Start()
{
lock (myLock)
{
- if (cq != null)
- {
- throw new InvalidOperationException("Already started.");
- }
-
- cq = CompletionQueueSafeHandle.Create();
+ GrpcPreconditions.CheckState(completionQueues == null, "Already started.");
+ completionQueues = CreateCompletionQueueList(environment, completionQueueCount);
for (int i = 0; i < poolSize; i++)
{
@@ -78,41 +87,73 @@ namespace Grpc.Core.Internal
}
}
- public void Stop()
+ public Task StopAsync()
{
lock (myLock)
{
- cq.Shutdown();
+ GrpcPreconditions.CheckState(!stopRequested, "Stop already requested.");
+ stopRequested = true;
+
+ foreach (var cq in completionQueues)
+ {
+ cq.Shutdown();
+ }
+ }
+
+ return Task.Run(() =>
+ {
foreach (var thread in threads)
{
thread.Join();
}
- cq.Dispose();
+ foreach (var cq in completionQueues)
+ {
+ cq.Dispose();
+ }
+ });
+ }
+
+ /// <summary>
+ /// Returns true if there is at least one thread pool thread that hasn't
+ /// already stopped.
+ /// Threads can either stop because all completion queues shut down or
+ /// because all foreground threads have already shutdown and process is
+ /// going to exit.
+ /// </summary>
+ internal bool IsAlive
+ {
+ get
+ {
+ return threads.Any(t => t.ThreadState != ThreadState.Stopped);
}
}
- internal CompletionQueueSafeHandle CompletionQueue
+ internal IReadOnlyCollection<CompletionQueueSafeHandle> CompletionQueues
{
get
{
- return cq;
+ return completionQueues;
}
}
- private Thread CreateAndStartThread(int i)
+ private Thread CreateAndStartThread(int threadIndex)
{
- var thread = new Thread(new ThreadStart(RunHandlerLoop));
- thread.IsBackground = false;
+ var cqIndex = threadIndex % completionQueues.Count;
+ var cq = completionQueues.ElementAt(cqIndex);
+
+ var thread = new Thread(new ThreadStart(() => RunHandlerLoop(cq)));
+ thread.IsBackground = true;
+ thread.Name = string.Format("grpc {0} (cq {1})", threadIndex, cqIndex);
thread.Start();
- thread.Name = "grpc " + i;
+
return thread;
}
/// <summary>
/// Body of the polling thread.
/// </summary>
- private void RunHandlerLoop()
+ private void RunHandlerLoop(CompletionQueueSafeHandle cq)
{
CompletionQueueEvent ev;
do
@@ -124,7 +165,7 @@ namespace Grpc.Core.Internal
IntPtr tag = ev.tag;
try
{
- var callback = environment.CompletionRegistry.Extract(tag);
+ var callback = cq.CompletionRegistry.Extract(tag);
callback(success);
}
catch (Exception e)
@@ -135,5 +176,16 @@ namespace Grpc.Core.Internal
}
while (ev.type != CompletionQueueEvent.CompletionType.Shutdown);
}
+
+ private static IReadOnlyCollection<CompletionQueueSafeHandle> CreateCompletionQueueList(GrpcEnvironment environment, int completionQueueCount)
+ {
+ var list = new List<CompletionQueueSafeHandle>();
+ for (int i = 0; i < completionQueueCount; i++)
+ {
+ var completionRegistry = new CompletionRegistry(environment);
+ list.Add(CompletionQueueSafeHandle.Create(completionRegistry));
+ }
+ return list.AsReadOnly();
+ }
}
}
diff --git a/src/csharp/Grpc.Core/Internal/NativeMethods.cs b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
index 42fd4d4dc6..65607ed120 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMethods.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
@@ -137,6 +137,7 @@ namespace Grpc.Core.Internal
public readonly Delegates.grpcsharp_server_credentials_release_delegate grpcsharp_server_credentials_release;
public readonly Delegates.grpcsharp_server_create_delegate grpcsharp_server_create;
+ public readonly Delegates.grpcsharp_server_register_completion_queue_delegate grpcsharp_server_register_completion_queue;
public readonly Delegates.grpcsharp_server_add_insecure_http2_port_delegate grpcsharp_server_add_insecure_http2_port;
public readonly Delegates.grpcsharp_server_add_secure_http2_port_delegate grpcsharp_server_add_secure_http2_port;
public readonly Delegates.grpcsharp_server_start_delegate grpcsharp_server_start;
@@ -244,6 +245,7 @@ namespace Grpc.Core.Internal
this.grpcsharp_server_credentials_release = GetMethodDelegate<Delegates.grpcsharp_server_credentials_release_delegate>(library);
this.grpcsharp_server_create = GetMethodDelegate<Delegates.grpcsharp_server_create_delegate>(library);
+ this.grpcsharp_server_register_completion_queue = GetMethodDelegate<Delegates.grpcsharp_server_register_completion_queue_delegate>(library);
this.grpcsharp_server_add_insecure_http2_port = GetMethodDelegate<Delegates.grpcsharp_server_add_insecure_http2_port_delegate>(library);
this.grpcsharp_server_add_secure_http2_port = GetMethodDelegate<Delegates.grpcsharp_server_add_secure_http2_port_delegate>(library);
this.grpcsharp_server_start = GetMethodDelegate<Delegates.grpcsharp_server_start_delegate>(library);
@@ -348,6 +350,7 @@ namespace Grpc.Core.Internal
this.grpcsharp_server_credentials_release = PInvokeMethods.grpcsharp_server_credentials_release;
this.grpcsharp_server_create = PInvokeMethods.grpcsharp_server_create;
+ this.grpcsharp_server_register_completion_queue = PInvokeMethods.grpcsharp_server_register_completion_queue;
this.grpcsharp_server_add_insecure_http2_port = PInvokeMethods.grpcsharp_server_add_insecure_http2_port;
this.grpcsharp_server_add_secure_http2_port = PInvokeMethods.grpcsharp_server_add_secure_http2_port;
this.grpcsharp_server_start = PInvokeMethods.grpcsharp_server_start;
@@ -493,7 +496,8 @@ namespace Grpc.Core.Internal
public delegate ServerCredentialsSafeHandle grpcsharp_ssl_server_credentials_create_delegate(string pemRootCerts, string[] keyCertPairCertChainArray, string[] keyCertPairPrivateKeyArray, UIntPtr numKeyCertPairs, bool forceClientAuth);
public delegate void grpcsharp_server_credentials_release_delegate(IntPtr credentials);
- public delegate ServerSafeHandle grpcsharp_server_create_delegate(CompletionQueueSafeHandle cq, ChannelArgsSafeHandle args);
+ public delegate ServerSafeHandle grpcsharp_server_create_delegate(ChannelArgsSafeHandle args);
+ public delegate void grpcsharp_server_register_completion_queue_delegate(ServerSafeHandle server, CompletionQueueSafeHandle cq);
public delegate int grpcsharp_server_add_insecure_http2_port_delegate(ServerSafeHandle server, string addr);
public delegate int grpcsharp_server_add_secure_http2_port_delegate(ServerSafeHandle server, string addr, ServerCredentialsSafeHandle creds);
public delegate void grpcsharp_server_start_delegate(ServerSafeHandle server);
@@ -773,7 +777,10 @@ namespace Grpc.Core.Internal
// ServerSafeHandle
[DllImport("grpc_csharp_ext.dll")]
- public static extern ServerSafeHandle grpcsharp_server_create(CompletionQueueSafeHandle cq, ChannelArgsSafeHandle args);
+ public static extern ServerSafeHandle grpcsharp_server_create(ChannelArgsSafeHandle args);
+
+ [DllImport("grpc_csharp_ext.dll")]
+ public static extern void grpcsharp_server_register_completion_queue(ServerSafeHandle server, CompletionQueueSafeHandle cq);
[DllImport("grpc_csharp_ext.dll")]
public static extern int grpcsharp_server_add_insecure_http2_port(ServerSafeHandle server, string addr);
diff --git a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
index febebba209..6a2f520163 100644
--- a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
@@ -44,7 +44,7 @@ namespace Grpc.Core.Internal
{
internal interface IServerCallHandler
{
- Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment);
+ Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq);
}
internal class UnaryServerCallHandler<TRequest, TResponse> : IServerCallHandler
@@ -62,14 +62,14 @@ namespace Grpc.Core.Internal
this.handler = handler;
}
- public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
+ public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer,
- environment, newRpc.Server);
+ newRpc.Server);
- asyncCall.Initialize(newRpc.Call);
+ asyncCall.Initialize(newRpc.Call, cq);
var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
@@ -121,14 +121,14 @@ namespace Grpc.Core.Internal
this.handler = handler;
}
- public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
+ public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer,
- environment, newRpc.Server);
+ newRpc.Server);
- asyncCall.Initialize(newRpc.Call);
+ asyncCall.Initialize(newRpc.Call, cq);
var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
@@ -179,14 +179,14 @@ namespace Grpc.Core.Internal
this.handler = handler;
}
- public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
+ public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer,
- environment, newRpc.Server);
+ newRpc.Server);
- asyncCall.Initialize(newRpc.Call);
+ asyncCall.Initialize(newRpc.Call, cq);
var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
@@ -237,14 +237,14 @@ namespace Grpc.Core.Internal
this.handler = handler;
}
- public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
+ public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer,
- environment, newRpc.Server);
+ newRpc.Server);
- asyncCall.Initialize(newRpc.Call);
+ asyncCall.Initialize(newRpc.Call, cq);
var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
@@ -281,13 +281,13 @@ namespace Grpc.Core.Internal
{
public static readonly NoSuchMethodCallHandler Instance = new NoSuchMethodCallHandler();
- public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
+ public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
// We don't care about the payload type here.
var asyncCall = new AsyncCallServer<byte[], byte[]>(
- (payload) => payload, (payload) => payload, environment, newRpc.Server);
+ (payload) => payload, (payload) => payload, newRpc.Server);
- asyncCall.Initialize(newRpc.Call);
+ asyncCall.Initialize(newRpc.Call, cq);
var finishedTask = asyncCall.ServerSideCallAsync();
await asyncCall.SendStatusFromServerAsync(new Status(StatusCode.Unimplemented, ""), Metadata.Empty, null).ConfigureAwait(false);
await finishedTask.ConfigureAwait(false);
diff --git a/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs b/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
index ecfee0bfdd..25b79b4398 100644
--- a/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
@@ -52,16 +52,12 @@ namespace Grpc.Core.Internal
public Task WriteAsync(TResponse message)
{
- var taskSource = new AsyncCompletionTaskSource<object>();
- call.StartSendMessage(message, GetWriteFlags(), taskSource.CompletionDelegate);
- return taskSource.Task;
+ return call.SendMessageAsync(message, GetWriteFlags());
}
public Task WriteResponseHeadersAsync(Metadata responseHeaders)
{
- var taskSource = new AsyncCompletionTaskSource<object>();
- call.StartSendInitialMetadata(responseHeaders, taskSource.CompletionDelegate);
- return taskSource.Task;
+ return call.SendInitialMetadataAsync(responseHeaders);
}
public WriteOptions WriteOptions
diff --git a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
index 6b5f70e220..8581302706 100644
--- a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
@@ -31,12 +31,6 @@
#endregion
-using System;
-using System.Collections.Concurrent;
-using System.Diagnostics;
-using System.Runtime.InteropServices;
-using Grpc.Core.Utils;
-
namespace Grpc.Core.Internal
{
/// <summary>
@@ -50,12 +44,17 @@ namespace Grpc.Core.Internal
{
}
- public static ServerSafeHandle NewServer(CompletionQueueSafeHandle cq, ChannelArgsSafeHandle args)
+ public static ServerSafeHandle NewServer(ChannelArgsSafeHandle args)
{
// Increment reference count for the native gRPC environment to make sure we don't do grpc_shutdown() before destroying the server handle.
// Doing so would make object finalizer crash if we end up abandoning the handle.
GrpcEnvironment.GrpcNativeInit();
- return Native.grpcsharp_server_create(cq, args);
+ return Native.grpcsharp_server_create(args);
+ }
+
+ public void RegisterCompletionQueue(CompletionQueueSafeHandle cq)
+ {
+ Native.grpcsharp_server_register_completion_queue(this, cq);
}
public int AddInsecurePort(string addr)
@@ -73,18 +72,18 @@ namespace Grpc.Core.Internal
Native.grpcsharp_server_start(this);
}
- public void ShutdownAndNotify(BatchCompletionDelegate callback, GrpcEnvironment environment)
+ public void ShutdownAndNotify(BatchCompletionDelegate callback, CompletionQueueSafeHandle completionQueue)
{
var ctx = BatchContextSafeHandle.Create();
- environment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
- Native.grpcsharp_server_shutdown_and_notify_callback(this, environment.CompletionQueue, ctx);
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
+ Native.grpcsharp_server_shutdown_and_notify_callback(this, completionQueue, ctx);
}
- public void RequestCall(BatchCompletionDelegate callback, GrpcEnvironment environment)
+ public void RequestCall(BatchCompletionDelegate callback, CompletionQueueSafeHandle completionQueue)
{
var ctx = BatchContextSafeHandle.Create();
- environment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
- Native.grpcsharp_server_request_call(this, environment.CompletionQueue, ctx).CheckOk();
+ completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
+ Native.grpcsharp_server_request_call(this, completionQueue, ctx).CheckOk();
}
protected override bool ReleaseHandle()
diff --git a/src/csharp/Grpc.Core/Logging/LogLevel.cs b/src/csharp/Grpc.Core/Logging/LogLevel.cs
new file mode 100644
index 0000000000..d64e1f5fd0
--- /dev/null
+++ b/src/csharp/Grpc.Core/Logging/LogLevel.cs
@@ -0,0 +1,59 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+
+namespace Grpc.Core.Logging
+{
+ /// <summary>Standard logging levels.</summary>
+ public enum LogLevel
+ {
+ /// <summary>
+ /// Debug severity.
+ /// </summary>
+ Debug = 0,
+ /// <summary>
+ /// Info severity.
+ /// </summary>
+ Info,
+ /// <summary>
+ /// Warning severity.
+ /// </summary>
+ Warning,
+ /// <summary>
+ /// Error severity.
+ /// </summary>
+ Error
+ }
+}
diff --git a/src/csharp/Grpc.Core/Logging/LogLevelFilterLogger.cs b/src/csharp/Grpc.Core/Logging/LogLevelFilterLogger.cs
new file mode 100644
index 0000000000..4eeb79c783
--- /dev/null
+++ b/src/csharp/Grpc.Core/Logging/LogLevelFilterLogger.cs
@@ -0,0 +1,160 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+using System.IO;
+using Grpc.Core.Utils;
+
+namespace Grpc.Core.Logging
+{
+ /// <summary>Logger that filters out messages below certain log level.</summary>
+ public class LogLevelFilterLogger : ILogger
+ {
+ readonly ILogger innerLogger;
+ readonly LogLevel logLevel;
+
+ /// <summary>
+ /// Creates and instance of <c>LogLevelFilter.</c>
+ /// </summary>
+ public LogLevelFilterLogger(ILogger logger, LogLevel logLevel)
+ {
+ this.innerLogger = GrpcPreconditions.CheckNotNull(logger);
+ this.logLevel = logLevel;
+ }
+
+ /// <summary>
+ /// Returns a logger associated with the specified type.
+ /// </summary>
+ public virtual ILogger ForType<T>()
+ {
+ var newInnerLogger = innerLogger.ForType<T>();
+ if (object.ReferenceEquals(this.innerLogger, newInnerLogger))
+ {
+ return this;
+ }
+ return new LogLevelFilterLogger(newInnerLogger, logLevel);
+ }
+
+ /// <summary>Logs a message with severity Debug.</summary>
+ public void Debug(string message)
+ {
+ if (logLevel <= LogLevel.Debug)
+ {
+ innerLogger.Debug(message);
+ }
+ }
+
+ /// <summary>Logs a formatted message with severity Debug.</summary>
+ public void Debug(string format, params object[] formatArgs)
+ {
+ if (logLevel <= LogLevel.Debug)
+ {
+ innerLogger.Debug(format, formatArgs);
+ }
+ }
+
+ /// <summary>Logs a message with severity Info.</summary>
+ public void Info(string message)
+ {
+ if (logLevel <= LogLevel.Info)
+ {
+ innerLogger.Info(message);
+ }
+ }
+
+ /// <summary>Logs a formatted message with severity Info.</summary>
+ public void Info(string format, params object[] formatArgs)
+ {
+ if (logLevel <= LogLevel.Info)
+ {
+ innerLogger.Info(format, formatArgs);
+ }
+ }
+
+ /// <summary>Logs a message with severity Warning.</summary>
+ public void Warning(string message)
+ {
+ if (logLevel <= LogLevel.Warning)
+ {
+ innerLogger.Warning(message);
+ }
+ }
+
+ /// <summary>Logs a formatted message with severity Warning.</summary>
+ public void Warning(string format, params object[] formatArgs)
+ {
+ if (logLevel <= LogLevel.Warning)
+ {
+ innerLogger.Warning(format, formatArgs);
+ }
+ }
+
+ /// <summary>Logs a message and an associated exception with severity Warning.</summary>
+ public void Warning(Exception exception, string message)
+ {
+ if (logLevel <= LogLevel.Warning)
+ {
+ innerLogger.Warning(exception, message);
+ }
+ }
+
+ /// <summary>Logs a message with severity Error.</summary>
+ public void Error(string message)
+ {
+ if (logLevel <= LogLevel.Error)
+ {
+ innerLogger.Error(message);
+ }
+ }
+
+ /// <summary>Logs a formatted message with severity Error.</summary>
+ public void Error(string format, params object[] formatArgs)
+ {
+ if (logLevel <= LogLevel.Error)
+ {
+ innerLogger.Error(format, formatArgs);
+ }
+ }
+
+ /// <summary>Logs a message and an associated exception with severity Error.</summary>
+ public void Error(Exception exception, string message)
+ {
+ if (logLevel <= LogLevel.Error)
+ {
+ innerLogger.Error(exception, message);
+ }
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core/Metadata.cs b/src/csharp/Grpc.Core/Metadata.cs
index e982fa0c48..f73f720094 100644
--- a/src/csharp/Grpc.Core/Metadata.cs
+++ b/src/csharp/Grpc.Core/Metadata.cs
@@ -95,6 +95,7 @@ namespace Grpc.Core
public void Insert(int index, Metadata.Entry item)
{
+ GrpcPreconditions.CheckNotNull(item);
CheckWriteable();
entries.Insert(index, item);
}
@@ -114,6 +115,7 @@ namespace Grpc.Core
set
{
+ GrpcPreconditions.CheckNotNull(value);
CheckWriteable();
entries[index] = value;
}
@@ -121,6 +123,7 @@ namespace Grpc.Core
public void Add(Metadata.Entry item)
{
+ GrpcPreconditions.CheckNotNull(item);
CheckWriteable();
entries.Add(item);
}
@@ -187,7 +190,7 @@ namespace Grpc.Core
/// <summary>
/// Metadata entry
/// </summary>
- public struct Entry
+ public class Entry
{
private static readonly Encoding Encoding = Encoding.ASCII;
private static readonly Regex ValidKeyRegex = new Regex("^[a-z0-9_-]+$");
diff --git a/src/csharp/Grpc.Core/Server.cs b/src/csharp/Grpc.Core/Server.cs
index d538a4671f..3b554e5e87 100644
--- a/src/csharp/Grpc.Core/Server.cs
+++ b/src/csharp/Grpc.Core/Server.cs
@@ -34,8 +34,7 @@
using System;
using System.Collections;
using System.Collections.Generic;
-using System.Diagnostics;
-using System.Runtime.InteropServices;
+using System.Linq;
using System.Threading.Tasks;
using Grpc.Core.Internal;
using Grpc.Core.Logging;
@@ -48,7 +47,7 @@ namespace Grpc.Core
/// </summary>
public class Server
{
- const int InitialAllowRpcTokenCount = 10;
+ const int InitialAllowRpcTokenCountPerCq = 10;
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<Server>();
readonly AtomicCounter activeCallCounter = new AtomicCounter();
@@ -68,11 +67,19 @@ namespace Grpc.Core
bool startRequested;
volatile bool shutdownRequested;
+
/// <summary>
- /// Create a new server.
+ /// Creates a new server.
+ /// </summary>
+ public Server() : this(null)
+ {
+ }
+
+ /// <summary>
+ /// Creates a new server.
/// </summary>
/// <param name="options">Channel options.</param>
- public Server(IEnumerable<ChannelOption> options = null)
+ public Server(IEnumerable<ChannelOption> options)
{
this.serviceDefinitions = new ServiceDefinitionCollection(this);
this.ports = new ServerPortCollection(this);
@@ -80,8 +87,14 @@ namespace Grpc.Core
this.options = options != null ? new List<ChannelOption>(options) : new List<ChannelOption>();
using (var channelArgs = ChannelOptions.CreateChannelArgs(this.options))
{
- this.handle = ServerSafeHandle.NewServer(environment.CompletionQueue, channelArgs);
+ this.handle = ServerSafeHandle.NewServer(channelArgs);
+ }
+
+ foreach (var cq in environment.CompletionQueues)
+ {
+ this.handle.RegisterCompletionQueue(cq);
}
+ GrpcEnvironment.RegisterServer(this);
}
/// <summary>
@@ -127,15 +140,19 @@ namespace Grpc.Core
lock (myLock)
{
GrpcPreconditions.CheckState(!startRequested);
+ GrpcPreconditions.CheckState(!shutdownRequested);
startRequested = true;
handle.Start();
// Starting with more than one AllowOneRpc tokens can significantly increase
// unary RPC throughput.
- for (int i = 0; i < InitialAllowRpcTokenCount; i++)
+ for (int i = 0; i < InitialAllowRpcTokenCountPerCq; i++)
{
- AllowOneRpc();
+ foreach (var cq in environment.CompletionQueues)
+ {
+ AllowOneRpc(cq);
+ }
}
}
}
@@ -145,41 +162,24 @@ namespace Grpc.Core
/// cleans up used resources. The returned task finishes when shutdown procedure
/// is complete.
/// </summary>
- public async Task ShutdownAsync()
+ /// <remarks>
+ /// It is strongly recommended to shutdown all previously created servers before exiting from the process.
+ /// </remarks>
+ public Task ShutdownAsync()
{
- lock (myLock)
- {
- GrpcPreconditions.CheckState(startRequested);
- GrpcPreconditions.CheckState(!shutdownRequested);
- shutdownRequested = true;
- }
-
- handle.ShutdownAndNotify(HandleServerShutdown, environment);
- await shutdownTcs.Task.ConfigureAwait(false);
- DisposeHandle();
-
- await Task.Run(() => GrpcEnvironment.Release()).ConfigureAwait(false);
+ return ShutdownInternalAsync(false);
}
/// <summary>
/// Requests server shutdown while cancelling all the in-progress calls.
/// The returned task finishes when shutdown procedure is complete.
/// </summary>
- public async Task KillAsync()
+ /// <remarks>
+ /// It is strongly recommended to shutdown all previously created servers before exiting from the process.
+ /// </remarks>
+ public Task KillAsync()
{
- lock (myLock)
- {
- GrpcPreconditions.CheckState(startRequested);
- GrpcPreconditions.CheckState(!shutdownRequested);
- shutdownRequested = true;
- }
-
- handle.ShutdownAndNotify(HandleServerShutdown, environment);
- handle.CancelAllCalls();
- await shutdownTcs.Task.ConfigureAwait(false);
- DisposeHandle();
-
- await Task.Run(() => GrpcEnvironment.Release()).ConfigureAwait(false);
+ return ShutdownInternalAsync(true);
}
internal void AddCallReference(object call)
@@ -198,6 +198,51 @@ namespace Grpc.Core
}
/// <summary>
+ /// Shuts down the server.
+ /// </summary>
+ private async Task ShutdownInternalAsync(bool kill)
+ {
+ lock (myLock)
+ {
+ GrpcPreconditions.CheckState(!shutdownRequested);
+ shutdownRequested = true;
+ }
+ GrpcEnvironment.UnregisterServer(this);
+
+ var cq = environment.CompletionQueues.First(); // any cq will do
+ handle.ShutdownAndNotify(HandleServerShutdown, cq);
+ if (kill)
+ {
+ handle.CancelAllCalls();
+ }
+ await ShutdownCompleteOrEnvironmentDeadAsync().ConfigureAwait(false);
+
+ DisposeHandle();
+
+ await GrpcEnvironment.ReleaseAsync().ConfigureAwait(false);
+ }
+
+ /// <summary>
+ /// In case the environment's threadpool becomes dead, the shutdown completion will
+ /// never be delivered, but we need to release the environment's handle anyway.
+ /// </summary>
+ private async Task ShutdownCompleteOrEnvironmentDeadAsync()
+ {
+ while (true)
+ {
+ var task = await Task.WhenAny(shutdownTcs.Task, Task.Delay(20)).ConfigureAwait(false);
+ if (shutdownTcs.Task == task)
+ {
+ return;
+ }
+ if (!environment.IsAlive)
+ {
+ return;
+ }
+ }
+ }
+
+ /// <summary>
/// Adds a service definition.
/// </summary>
private void AddServiceDefinitionInternal(ServerServiceDefinition serviceDefinition)
@@ -244,11 +289,11 @@ namespace Grpc.Core
/// <summary>
/// Allows one new RPC call to be received by server.
/// </summary>
- private void AllowOneRpc()
+ private void AllowOneRpc(CompletionQueueSafeHandle cq)
{
if (!shutdownRequested)
{
- handle.RequestCall(HandleNewServerRpc, environment);
+ handle.RequestCall((success, ctx) => HandleNewServerRpc(success, ctx, cq), cq);
}
}
@@ -265,7 +310,7 @@ namespace Grpc.Core
/// <summary>
/// Selects corresponding handler for given call and handles the call.
/// </summary>
- private async Task HandleCallAsync(ServerRpcNew newRpc)
+ private async Task HandleCallAsync(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
{
try
{
@@ -274,7 +319,7 @@ namespace Grpc.Core
{
callHandler = NoSuchMethodCallHandler.Instance;
}
- await callHandler.HandleCall(newRpc, environment).ConfigureAwait(false);
+ await callHandler.HandleCall(newRpc, cq).ConfigureAwait(false);
}
catch (Exception e)
{
@@ -285,9 +330,9 @@ namespace Grpc.Core
/// <summary>
/// Handles the native callback.
/// </summary>
- private void HandleNewServerRpc(bool success, BatchContextSafeHandle ctx)
+ private void HandleNewServerRpc(bool success, BatchContextSafeHandle ctx, CompletionQueueSafeHandle cq)
{
- Task.Run(() => AllowOneRpc());
+ Task.Run(() => AllowOneRpc(cq));
if (success)
{
@@ -296,7 +341,7 @@ namespace Grpc.Core
// after server shutdown, the callback returns with null call
if (!newRpc.Call.IsInvalid)
{
- HandleCallAsync(newRpc); // we don't need to await.
+ HandleCallAsync(newRpc, cq); // we don't need to await.
}
}
}
diff --git a/src/csharp/Grpc.Core/ServerServiceDefinition.cs b/src/csharp/Grpc.Core/ServerServiceDefinition.cs
index deb1431ca3..ac08c04bf6 100644
--- a/src/csharp/Grpc.Core/ServerServiceDefinition.cs
+++ b/src/csharp/Grpc.Core/ServerServiceDefinition.cs
@@ -63,11 +63,10 @@ namespace Grpc.Core
/// <summary>
/// Creates a new builder object for <c>ServerServiceDefinition</c>.
/// </summary>
- /// <param name="serviceName">The service name.</param>
/// <returns>The builder object.</returns>
- public static Builder CreateBuilder(string serviceName)
+ public static Builder CreateBuilder()
{
- return new Builder(serviceName);
+ return new Builder();
}
/// <summary>
@@ -75,16 +74,13 @@ namespace Grpc.Core
/// </summary>
public class Builder
{
- readonly string serviceName;
readonly Dictionary<string, IServerCallHandler> callHandlers = new Dictionary<string, IServerCallHandler>();
/// <summary>
/// Creates a new instance of builder.
/// </summary>
- /// <param name="serviceName">The service name.</param>
- public Builder(string serviceName)
+ public Builder()
{
- this.serviceName = serviceName;
}
/// <summary>
diff --git a/src/csharp/Grpc.Core/WriteOptions.cs b/src/csharp/Grpc.Core/WriteOptions.cs
index 7523ada84a..4c9706d966 100644
--- a/src/csharp/Grpc.Core/WriteOptions.cs
+++ b/src/csharp/Grpc.Core/WriteOptions.cs
@@ -1,6 +1,6 @@
#region Copyright notice and license
-// Copyright 2015, Google Inc.
+// Copyright 2015-2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,7 @@ namespace Grpc.Core
/// </summary>
public static readonly WriteOptions Default = new WriteOptions();
- private WriteFlags flags;
+ private readonly WriteFlags flags;
/// <summary>
/// Initializes a new instance of <c>WriteOptions</c> class.
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index d700a18778..4bbefcbe01 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -81,103 +81,12 @@ namespace Math {
get { return global::Math.MathReflection.Descriptor.Services[0]; }
}
- /// <summary>Client for Math</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IMathClient
- {
- /// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
- /// </summary>
- global::Math.DivReply Div(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
- /// </summary>
- global::Math.DivReply Div(global::Math.DivArgs request, CallOptions options);
- /// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
- /// </summary>
- AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
- /// </summary>
- AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, CallOptions options);
- /// <summary>
- /// DivMany accepts an arbitrary number of division args from the client stream
- /// and sends back the results in the reply stream. The stream continues until
- /// the client closes its end; the server does the same after sending all the
- /// replies. The stream ends immediately if either end aborts.
- /// </summary>
- AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// DivMany accepts an arbitrary number of division args from the client stream
- /// and sends back the results in the reply stream. The stream continues until
- /// the client closes its end; the server does the same after sending all the
- /// replies. The stream ends immediately if either end aborts.
- /// </summary>
- AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(CallOptions options);
- /// <summary>
- /// Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
- /// generates up to limit numbers; otherwise it continues until the call is
- /// canceled. Unlike Fib above, Fib has no final FibReply.
- /// </summary>
- AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
- /// generates up to limit numbers; otherwise it continues until the call is
- /// canceled. Unlike Fib above, Fib has no final FibReply.
- /// </summary>
- AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, CallOptions options);
- /// <summary>
- /// Sum sums a stream of numbers, returning the final result once the stream
- /// is closed.
- /// </summary>
- AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Sum sums a stream of numbers, returning the final result once the stream
- /// is closed.
- /// </summary>
- AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of Math</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IMath
- {
- /// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
- /// </summary>
- global::System.Threading.Tasks.Task<global::Math.DivReply> Div(global::Math.DivArgs request, ServerCallContext context);
- /// <summary>
- /// DivMany accepts an arbitrary number of division args from the client stream
- /// and sends back the results in the reply stream. The stream continues until
- /// the client closes its end; the server does the same after sending all the
- /// replies. The stream ends immediately if either end aborts.
- /// </summary>
- global::System.Threading.Tasks.Task DivMany(IAsyncStreamReader<global::Math.DivArgs> requestStream, IServerStreamWriter<global::Math.DivReply> responseStream, ServerCallContext context);
- /// <summary>
- /// Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
- /// generates up to limit numbers; otherwise it continues until the call is
- /// canceled. Unlike Fib above, Fib has no final FibReply.
- /// </summary>
- global::System.Threading.Tasks.Task Fib(global::Math.FibArgs request, IServerStreamWriter<global::Math.Num> responseStream, ServerCallContext context);
- /// <summary>
- /// Sum sums a stream of numbers, returning the final result once the stream
- /// is closed.
- /// </summary>
- global::System.Threading.Tasks.Task<global::Math.Num> Sum(IAsyncStreamReader<global::Math.Num> requestStream, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of Math</summary>
public abstract class MathBase
{
/// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
+ /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ /// and remainder.
/// </summary>
public virtual global::System.Threading.Tasks.Task<global::Math.DivReply> Div(global::Math.DivArgs request, ServerCallContext context)
{
@@ -196,7 +105,7 @@ namespace Math {
}
/// <summary>
- /// Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib
/// generates up to limit numbers; otherwise it continues until the call is
/// canceled. Unlike Fib above, Fib has no final FibReply.
/// </summary>
@@ -217,9 +126,7 @@ namespace Math {
}
/// <summary>Client for Math</summary>
- #pragma warning disable 0618
- public class MathClient : ClientBase<MathClient>, IMathClient
- #pragma warning restore 0618
+ public class MathClient : ClientBase<MathClient>
{
public MathClient(Channel channel) : base(channel)
{
@@ -237,32 +144,32 @@ namespace Math {
}
/// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
+ /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ /// and remainder.
/// </summary>
public virtual global::Math.DivReply Div(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
return Div(request, new CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
+ /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ /// and remainder.
/// </summary>
public virtual global::Math.DivReply Div(global::Math.DivArgs request, CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_Div, null, options, request);
}
/// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
+ /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ /// and remainder.
/// </summary>
public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
return DivAsync(request, new CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
- /// Div divides args.dividend by args.divisor and returns the quotient and
- /// remainder.
+ /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ /// and remainder.
/// </summary>
public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, CallOptions options)
{
@@ -289,7 +196,7 @@ namespace Math {
return CallInvoker.AsyncDuplexStreamingCall(__Method_DivMany, null, options);
}
/// <summary>
- /// Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib
/// generates up to limit numbers; otherwise it continues until the call is
/// canceled. Unlike Fib above, Fib has no final FibReply.
/// </summary>
@@ -298,7 +205,7 @@ namespace Math {
return Fib(request, new CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
- /// Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib
/// generates up to limit numbers; otherwise it continues until the call is
/// canceled. Unlike Fib above, Fib has no final FibReply.
/// </summary>
@@ -335,23 +242,9 @@ namespace Math {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IMath serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_Div, serviceImpl.Div)
- .AddMethod(__Method_DivMany, serviceImpl.DivMany)
- .AddMethod(__Method_Fib, serviceImpl.Fib)
- .AddMethod(__Method_Sum, serviceImpl.Sum).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(MathBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_Div, serviceImpl.Div)
.AddMethod(__Method_DivMany, serviceImpl.DivMany)
.AddMethod(__Method_Fib, serviceImpl.Fib)
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index 51c6a39b1d..d0ade7d02b 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -58,23 +58,6 @@ namespace Grpc.Health.V1 {
get { return global::Grpc.Health.V1.HealthReflection.Descriptor.Services[0]; }
}
- /// <summary>Client for Health</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IHealthClient
- {
- global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, CallOptions options);
- AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of Health</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IHealth
- {
- global::System.Threading.Tasks.Task<global::Grpc.Health.V1.HealthCheckResponse> Check(global::Grpc.Health.V1.HealthCheckRequest request, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of Health</summary>
public abstract class HealthBase
{
@@ -86,9 +69,7 @@ namespace Grpc.Health.V1 {
}
/// <summary>Client for Health</summary>
- #pragma warning disable 0618
- public class HealthClient : ClientBase<HealthClient>, IHealthClient
- #pragma warning restore 0618
+ public class HealthClient : ClientBase<HealthClient>
{
public HealthClient(Channel channel) : base(channel)
{
@@ -134,20 +115,9 @@ namespace Grpc.Health.V1 {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IHealth serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_Check, serviceImpl.Check).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(HealthBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_Check, serviceImpl.Check).Build();
}
diff --git a/src/csharp/Grpc.IntegrationTesting/GenericService.cs b/src/csharp/Grpc.IntegrationTesting/GenericService.cs
index c6128264ac..53fa1ee5f6 100644
--- a/src/csharp/Grpc.IntegrationTesting/GenericService.cs
+++ b/src/csharp/Grpc.IntegrationTesting/GenericService.cs
@@ -64,7 +64,7 @@ namespace Grpc.IntegrationTesting
public static ServerServiceDefinition BindHandler(DuplexStreamingServerMethod<byte[], byte[]> handler)
{
- return ServerServiceDefinition.CreateBuilder(StreamingCallMethod.ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(StreamingCallMethod, handler).Build();
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
index 1541cfd7bb..aea40afee2 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
@@ -471,8 +471,16 @@ namespace Grpc.IntegrationTesting
cts.Cancel();
- var ex = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseStream.MoveNext());
- Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
+ try
+ {
+ // cannot use Assert.ThrowsAsync because it uses Task.Wait and would deadlock.
+ await call.ResponseStream.MoveNext();
+ Assert.Fail();
+ }
+ catch (RpcException ex)
+ {
+ Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
+ }
}
Console.WriteLine("Passed!");
}
@@ -497,9 +505,16 @@ namespace Grpc.IntegrationTesting
// Deadline was reached before write has started. Eat the exception and continue.
}
- var ex = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseStream.MoveNext());
- // We can't guarantee the status code always DeadlineExceeded. See issue #2685.
- Assert.Contains(ex.Status.StatusCode, new[] { StatusCode.DeadlineExceeded, StatusCode.Internal });
+ try
+ {
+ await call.ResponseStream.MoveNext();
+ Assert.Fail();
+ }
+ catch (RpcException ex)
+ {
+ // We can't guarantee the status code always DeadlineExceeded. See issue #2685.
+ Assert.Contains(ex.Status.StatusCode, new[] { StatusCode.DeadlineExceeded, StatusCode.Internal });
+ }
}
Console.WriteLine("Passed!");
}
@@ -577,9 +592,17 @@ namespace Grpc.IntegrationTesting
await call.RequestStream.WriteAsync(request);
await call.RequestStream.CompleteAsync();
- var e = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseStream.ToListAsync());
- Assert.AreEqual(StatusCode.Unknown, e.Status.StatusCode);
- Assert.AreEqual(echoStatus.Message, e.Status.Detail);
+ try
+ {
+ // cannot use Assert.ThrowsAsync because it uses Task.Wait and would deadlock.
+ await call.ResponseStream.ToListAsync();
+ Assert.Fail();
+ }
+ catch (RpcException e)
+ {
+ Assert.AreEqual(StatusCode.Unknown, e.Status.StatusCode);
+ Assert.AreEqual(echoStatus.Message, e.Status.Detail);
+ }
}
Console.WriteLine("Passed!");
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index 9d31d1c514..22bd27ec0a 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -72,53 +72,6 @@ namespace Grpc.Testing {
get { return global::Grpc.Testing.MetricsReflection.Descriptor.Services[0]; }
}
- /// <summary>Client for MetricsService</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IMetricsServiceClient
- {
- /// <summary>
- /// Returns the values of all the gauges that are currently being maintained by
- /// the service
- /// </summary>
- AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Returns the values of all the gauges that are currently being maintained by
- /// the service
- /// </summary>
- AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, CallOptions options);
- /// <summary>
- /// Returns the value of one gauge
- /// </summary>
- global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Returns the value of one gauge
- /// </summary>
- global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, CallOptions options);
- /// <summary>
- /// Returns the value of one gauge
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Returns the value of one gauge
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of MetricsService</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IMetricsService
- {
- /// <summary>
- /// Returns the values of all the gauges that are currently being maintained by
- /// the service
- /// </summary>
- global::System.Threading.Tasks.Task GetAllGauges(global::Grpc.Testing.EmptyMessage request, IServerStreamWriter<global::Grpc.Testing.GaugeResponse> responseStream, ServerCallContext context);
- /// <summary>
- /// Returns the value of one gauge
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.GaugeResponse> GetGauge(global::Grpc.Testing.GaugeRequest request, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of MetricsService</summary>
public abstract class MetricsServiceBase
{
@@ -142,9 +95,7 @@ namespace Grpc.Testing {
}
/// <summary>Client for MetricsService</summary>
- #pragma warning disable 0618
- public class MetricsServiceClient : ClientBase<MetricsServiceClient>, IMetricsServiceClient
- #pragma warning restore 0618
+ public class MetricsServiceClient : ClientBase<MetricsServiceClient>
{
public MetricsServiceClient(Channel channel) : base(channel)
{
@@ -218,21 +169,9 @@ namespace Grpc.Testing {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IMetricsService serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_GetAllGauges, serviceImpl.GetAllGauges)
- .AddMethod(__Method_GetGauge, serviceImpl.GetGauge).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(MetricsServiceBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_GetAllGauges, serviceImpl.GetAllGauges)
.AddMethod(__Method_GetGauge, serviceImpl.GetGauge).Build();
}
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index f7071ebf6b..9c99296115 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -67,58 +67,6 @@ namespace Grpc.Testing {
get { return global::Grpc.Testing.ServicesReflection.Descriptor.Services[0]; }
}
- /// <summary>Client for BenchmarkService</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IBenchmarkServiceClient
- {
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options);
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options);
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of BenchmarkService</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IBenchmarkService
- {
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context);
- /// <summary>
- /// One request followed by one response.
- /// The server returns the client payload as-is.
- /// </summary>
- global::System.Threading.Tasks.Task StreamingCall(IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of BenchmarkService</summary>
public abstract class BenchmarkServiceBase
{
@@ -143,9 +91,7 @@ namespace Grpc.Testing {
}
/// <summary>Client for BenchmarkService</summary>
- #pragma warning disable 0618
- public class BenchmarkServiceClient : ClientBase<BenchmarkServiceClient>, IBenchmarkServiceClient
- #pragma warning restore 0618
+ public class BenchmarkServiceClient : ClientBase<BenchmarkServiceClient>
{
public BenchmarkServiceClient(Channel channel) : base(channel)
{
@@ -223,21 +169,9 @@ namespace Grpc.Testing {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IBenchmarkService serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
- .AddMethod(__Method_StreamingCall, serviceImpl.StreamingCall).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(BenchmarkServiceBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
.AddMethod(__Method_StreamingCall, serviceImpl.StreamingCall).Build();
}
@@ -289,112 +223,6 @@ namespace Grpc.Testing {
get { return global::Grpc.Testing.ServicesReflection.Descriptor.Services[1]; }
}
- /// <summary>Client for WorkerService</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IWorkerServiceClient
- {
- /// <summary>
- /// Start server with specified workload.
- /// First request sent specifies the ServerConfig followed by ServerStatus
- /// response. After that, a "Mark" can be sent anytime to request the latest
- /// stats. Closing the stream will initiate shutdown of the test server
- /// and once the shutdown has finished, the OK status is sent to terminate
- /// this RPC.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Start server with specified workload.
- /// First request sent specifies the ServerConfig followed by ServerStatus
- /// response. After that, a "Mark" can be sent anytime to request the latest
- /// stats. Closing the stream will initiate shutdown of the test server
- /// and once the shutdown has finished, the OK status is sent to terminate
- /// this RPC.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(CallOptions options);
- /// <summary>
- /// Start client with specified workload.
- /// First request sent specifies the ClientConfig followed by ClientStatus
- /// response. After that, a "Mark" can be sent anytime to request the latest
- /// stats. Closing the stream will initiate shutdown of the test client
- /// and once the shutdown has finished, the OK status is sent to terminate
- /// this RPC.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Start client with specified workload.
- /// First request sent specifies the ClientConfig followed by ClientStatus
- /// response. After that, a "Mark" can be sent anytime to request the latest
- /// stats. Closing the stream will initiate shutdown of the test client
- /// and once the shutdown has finished, the OK status is sent to terminate
- /// this RPC.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(CallOptions options);
- /// <summary>
- /// Just return the core count - unary call
- /// </summary>
- global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Just return the core count - unary call
- /// </summary>
- global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, CallOptions options);
- /// <summary>
- /// Just return the core count - unary call
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Just return the core count - unary call
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, CallOptions options);
- /// <summary>
- /// Quit this worker
- /// </summary>
- global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Quit this worker
- /// </summary>
- global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, CallOptions options);
- /// <summary>
- /// Quit this worker
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// Quit this worker
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of WorkerService</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IWorkerService
- {
- /// <summary>
- /// Start server with specified workload.
- /// First request sent specifies the ServerConfig followed by ServerStatus
- /// response. After that, a "Mark" can be sent anytime to request the latest
- /// stats. Closing the stream will initiate shutdown of the test server
- /// and once the shutdown has finished, the OK status is sent to terminate
- /// this RPC.
- /// </summary>
- global::System.Threading.Tasks.Task RunServer(IAsyncStreamReader<global::Grpc.Testing.ServerArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ServerStatus> responseStream, ServerCallContext context);
- /// <summary>
- /// Start client with specified workload.
- /// First request sent specifies the ClientConfig followed by ClientStatus
- /// response. After that, a "Mark" can be sent anytime to request the latest
- /// stats. Closing the stream will initiate shutdown of the test client
- /// and once the shutdown has finished, the OK status is sent to terminate
- /// this RPC.
- /// </summary>
- global::System.Threading.Tasks.Task RunClient(IAsyncStreamReader<global::Grpc.Testing.ClientArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ClientStatus> responseStream, ServerCallContext context);
- /// <summary>
- /// Just return the core count - unary call
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.CoreResponse> CoreCount(global::Grpc.Testing.CoreRequest request, ServerCallContext context);
- /// <summary>
- /// Quit this worker
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.Void> QuitWorker(global::Grpc.Testing.Void request, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of WorkerService</summary>
public abstract class WorkerServiceBase
{
@@ -443,9 +271,7 @@ namespace Grpc.Testing {
}
/// <summary>Client for WorkerService</summary>
- #pragma warning disable 0618
- public class WorkerServiceClient : ClientBase<WorkerServiceClient>, IWorkerServiceClient
- #pragma warning restore 0618
+ public class WorkerServiceClient : ClientBase<WorkerServiceClient>
{
public WorkerServiceClient(Channel channel) : base(channel)
{
@@ -579,23 +405,9 @@ namespace Grpc.Testing {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IWorkerService serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_RunServer, serviceImpl.RunServer)
- .AddMethod(__Method_RunClient, serviceImpl.RunClient)
- .AddMethod(__Method_CoreCount, serviceImpl.CoreCount)
- .AddMethod(__Method_QuitWorker, serviceImpl.QuitWorker).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(WorkerServiceBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_RunServer, serviceImpl.RunServer)
.AddMethod(__Method_RunClient, serviceImpl.RunClient)
.AddMethod(__Method_CoreCount, serviceImpl.CoreCount)
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index cf43a77118..6c252013f8 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -105,127 +105,6 @@ namespace Grpc.Testing {
get { return global::Grpc.Testing.TestReflection.Descriptor.Services[0]; }
}
- /// <summary>Client for TestService</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface ITestServiceClient
- {
- /// <summary>
- /// One empty request followed by one empty response.
- /// </summary>
- global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One empty request followed by one empty response.
- /// </summary>
- global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, CallOptions options);
- /// <summary>
- /// One empty request followed by one empty response.
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One empty request followed by one empty response.
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, CallOptions options);
- /// <summary>
- /// One request followed by one response.
- /// </summary>
- global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One request followed by one response.
- /// </summary>
- global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options);
- /// <summary>
- /// One request followed by one response.
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One request followed by one response.
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options);
- /// <summary>
- /// One request followed by a sequence of responses (streamed download).
- /// The server returns the payload with client desired type and sizes.
- /// </summary>
- AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// One request followed by a sequence of responses (streamed download).
- /// The server returns the payload with client desired type and sizes.
- /// </summary>
- AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, CallOptions options);
- /// <summary>
- /// A sequence of requests followed by one response (streamed upload).
- /// The server returns the aggregated size of client payload as the result.
- /// </summary>
- AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// A sequence of requests followed by one response (streamed upload).
- /// The server returns the aggregated size of client payload as the result.
- /// </summary>
- AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(CallOptions options);
- /// <summary>
- /// A sequence of requests with each request served by the server immediately.
- /// As one request could lead to multiple responses, this interface
- /// demonstrates the idea of full duplexing.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// A sequence of requests with each request served by the server immediately.
- /// As one request could lead to multiple responses, this interface
- /// demonstrates the idea of full duplexing.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(CallOptions options);
- /// <summary>
- /// A sequence of requests followed by a sequence of responses.
- /// The server buffers all the client requests and then serves them in order. A
- /// stream of responses are returned to the client when the server starts with
- /// first request.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// A sequence of requests followed by a sequence of responses.
- /// The server buffers all the client requests and then serves them in order. A
- /// stream of responses are returned to the client when the server starts with
- /// first request.
- /// </summary>
- AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of TestService</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface ITestService
- {
- /// <summary>
- /// One empty request followed by one empty response.
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> EmptyCall(global::Grpc.Testing.Empty request, ServerCallContext context);
- /// <summary>
- /// One request followed by one response.
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context);
- /// <summary>
- /// One request followed by a sequence of responses (streamed download).
- /// The server returns the payload with client desired type and sizes.
- /// </summary>
- global::System.Threading.Tasks.Task StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context);
- /// <summary>
- /// A sequence of requests followed by one response (streamed upload).
- /// The server returns the aggregated size of client payload as the result.
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(IAsyncStreamReader<global::Grpc.Testing.StreamingInputCallRequest> requestStream, ServerCallContext context);
- /// <summary>
- /// A sequence of requests with each request served by the server immediately.
- /// As one request could lead to multiple responses, this interface
- /// demonstrates the idea of full duplexing.
- /// </summary>
- global::System.Threading.Tasks.Task FullDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context);
- /// <summary>
- /// A sequence of requests followed by a sequence of responses.
- /// The server buffers all the client requests and then serves them in order. A
- /// stream of responses are returned to the client when the server starts with
- /// first request.
- /// </summary>
- global::System.Threading.Tasks.Task HalfDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of TestService</summary>
public abstract class TestServiceBase
{
@@ -287,9 +166,7 @@ namespace Grpc.Testing {
}
/// <summary>Client for TestService</summary>
- #pragma warning disable 0618
- public class TestServiceClient : ClientBase<TestServiceClient>, ITestServiceClient
- #pragma warning restore 0618
+ public class TestServiceClient : ClientBase<TestServiceClient>
{
public TestServiceClient(Channel channel) : base(channel)
{
@@ -445,25 +322,9 @@ namespace Grpc.Testing {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(ITestService serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_EmptyCall, serviceImpl.EmptyCall)
- .AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
- .AddMethod(__Method_StreamingOutputCall, serviceImpl.StreamingOutputCall)
- .AddMethod(__Method_StreamingInputCall, serviceImpl.StreamingInputCall)
- .AddMethod(__Method_FullDuplexCall, serviceImpl.FullDuplexCall)
- .AddMethod(__Method_HalfDuplexCall, serviceImpl.HalfDuplexCall).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(TestServiceBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_EmptyCall, serviceImpl.EmptyCall)
.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
.AddMethod(__Method_StreamingOutputCall, serviceImpl.StreamingOutputCall)
@@ -496,38 +357,6 @@ namespace Grpc.Testing {
get { return global::Grpc.Testing.TestReflection.Descriptor.Services[1]; }
}
- /// <summary>Client for UnimplementedService</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IUnimplementedServiceClient
- {
- /// <summary>
- /// A call that no server should implement
- /// </summary>
- global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// A call that no server should implement
- /// </summary>
- global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options);
- /// <summary>
- /// A call that no server should implement
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- /// <summary>
- /// A call that no server should implement
- /// </summary>
- AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of UnimplementedService</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IUnimplementedService
- {
- /// <summary>
- /// A call that no server should implement
- /// </summary>
- global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of UnimplementedService</summary>
public abstract class UnimplementedServiceBase
{
@@ -542,9 +371,7 @@ namespace Grpc.Testing {
}
/// <summary>Client for UnimplementedService</summary>
- #pragma warning disable 0618
- public class UnimplementedServiceClient : ClientBase<UnimplementedServiceClient>, IUnimplementedServiceClient
- #pragma warning restore 0618
+ public class UnimplementedServiceClient : ClientBase<UnimplementedServiceClient>
{
public UnimplementedServiceClient(Channel channel) : base(channel)
{
@@ -602,20 +429,9 @@ namespace Grpc.Testing {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IUnimplementedService serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(UnimplementedServiceBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall).Build();
}
@@ -651,28 +467,6 @@ namespace Grpc.Testing {
get { return global::Grpc.Testing.TestReflection.Descriptor.Services[2]; }
}
- /// <summary>Client for ReconnectService</summary>
- [System.Obsolete("Client side interfaced will be removed in the next release. Use client class directly.")]
- public interface IReconnectServiceClient
- {
- global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, CallOptions options);
- AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, CallOptions options);
- global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, CallOptions options);
- AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken));
- AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, CallOptions options);
- }
-
- /// <summary>Interface of server-side implementations of ReconnectService</summary>
- [System.Obsolete("Service implementations should inherit from the generated abstract base class instead.")]
- public interface IReconnectService
- {
- global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> Start(global::Grpc.Testing.ReconnectParams request, ServerCallContext context);
- global::System.Threading.Tasks.Task<global::Grpc.Testing.ReconnectInfo> Stop(global::Grpc.Testing.Empty request, ServerCallContext context);
- }
-
/// <summary>Base class for server-side implementations of ReconnectService</summary>
public abstract class ReconnectServiceBase
{
@@ -689,9 +483,7 @@ namespace Grpc.Testing {
}
/// <summary>Client for ReconnectService</summary>
- #pragma warning disable 0618
- public class ReconnectServiceClient : ClientBase<ReconnectServiceClient>, IReconnectServiceClient
- #pragma warning restore 0618
+ public class ReconnectServiceClient : ClientBase<ReconnectServiceClient>
{
public ReconnectServiceClient(Channel channel) : base(channel)
{
@@ -753,21 +545,9 @@ namespace Grpc.Testing {
}
/// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
- public static ServerServiceDefinition BindService(IReconnectService serviceImpl)
- #pragma warning restore 0618
- {
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
- .AddMethod(__Method_Start, serviceImpl.Start)
- .AddMethod(__Method_Stop, serviceImpl.Stop).Build();
- }
-
- /// <summary>Creates service definition that can be registered with a server</summary>
- #pragma warning disable 0618
public static ServerServiceDefinition BindService(ReconnectServiceBase serviceImpl)
- #pragma warning restore 0618
{
- return ServerServiceDefinition.CreateBuilder(__ServiceName)
+ return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_Start, serviceImpl.Start)
.AddMethod(__Method_Stop, serviceImpl.Stop).Build();
}
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index 5b8ff9b819..4782654250 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -45,7 +45,7 @@
#include <string.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#define GPR_EXPORT __declspec(dllexport)
#define GPR_CALLTYPE __stdcall
#endif
@@ -503,6 +503,7 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata, uint32_t write_flags) {
/* TODO: don't use magic number */
grpc_op ops[6];
+ memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@@ -555,6 +556,7 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[4];
+ memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@@ -596,6 +598,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
size_t send_buffer_len, grpc_metadata_array *initial_metadata, uint32_t write_flags) {
/* TODO: don't use magic number */
grpc_op ops[4];
+ memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@@ -638,6 +641,7 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[2];
+ memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@@ -684,6 +688,7 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
int32_t send_empty_initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[2];
+ memset(ops, 0, sizeof(ops));
size_t nops = send_empty_initial_metadata ? 2 : 1;
ops[0].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
@@ -691,8 +696,6 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
ops[0].flags = write_flags;
ops[0].reserved = NULL;
ops[1].op = GRPC_OP_SEND_INITIAL_METADATA;
- ops[1].data.send_initial_metadata.count = 0;
- ops[1].data.send_initial_metadata.metadata = NULL;
ops[1].flags = 0;
ops[1].reserved = NULL;
@@ -719,6 +722,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
size_t optional_send_buffer_len, uint32_t write_flags) {
/* TODO: don't use magic number */
grpc_op ops[3];
+ memset(ops, 0, sizeof(ops));
size_t nops = 1;
ops[0].op = GRPC_OP_SEND_STATUS_FROM_SERVER;
ops[0].data.send_status_from_server.status = status_code;
@@ -743,8 +747,6 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
}
if (send_empty_initial_metadata) {
ops[nops].op = GRPC_OP_SEND_INITIAL_METADATA;
- ops[nops].data.send_initial_metadata.count = 0;
- ops[nops].data.send_initial_metadata.metadata = NULL;
ops[nops].flags = 0;
ops[nops].reserved = NULL;
nops++;
@@ -784,6 +786,7 @@ grpcsharp_call_send_initial_metadata(grpc_call *call,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[1];
+ memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@@ -806,11 +809,14 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_set_credentials(
/* Server */
GPR_EXPORT grpc_server *GPR_CALLTYPE
-grpcsharp_server_create(grpc_completion_queue *cq,
- const grpc_channel_args *args) {
- grpc_server *server = grpc_server_create(args, NULL);
+grpcsharp_server_create(const grpc_channel_args *args) {
+ return grpc_server_create(args, NULL);
+}
+
+GPR_EXPORT void GPR_CALLTYPE
+grpcsharp_server_register_completion_queue(grpc_server *server,
+ grpc_completion_queue *cq) {
grpc_server_register_completion_queue(server, cq, NULL);
- return server;
}
GPR_EXPORT int32_t GPR_CALLTYPE
diff --git a/src/csharp/tests.json b/src/csharp/tests.json
index f6af3408d5..7e7aee1093 100644
--- a/src/csharp/tests.json
+++ b/src/csharp/tests.json
@@ -7,6 +7,7 @@
"Grpc.Core.Internal.Tests.CompletionQueueSafeHandleTest",
"Grpc.Core.Internal.Tests.MetadataArraySafeHandleTest",
"Grpc.Core.Internal.Tests.TimespecTest",
+ "Grpc.Core.Tests.AppDomainUnloadTest",
"Grpc.Core.Tests.CallCredentialsTest",
"Grpc.Core.Tests.CallOptionsTest",
"Grpc.Core.Tests.ChannelCredentialsTest",
@@ -25,6 +26,9 @@
"Grpc.Core.Tests.ResponseHeadersTest",
"Grpc.Core.Tests.SanityTest",
"Grpc.Core.Tests.ServerTest",
+ "Grpc.Core.Tests.ShutdownHookClientTest",
+ "Grpc.Core.Tests.ShutdownHookPendingCallTest",
+ "Grpc.Core.Tests.ShutdownHookServerTest",
"Grpc.Core.Tests.ShutdownTest",
"Grpc.Core.Tests.TimeoutsTest",
"Grpc.Core.Tests.UserAgentStringTest"
diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc
index 8e0b6916e9..3479a67702 100644
--- a/src/node/ext/byte_buffer.cc
+++ b/src/node/ext/byte_buffer.cc
@@ -72,17 +72,13 @@ Local<Value> ByteBufferToBuffer(grpc_byte_buffer *buffer) {
if (buffer == NULL) {
return scope.Escape(Nan::Null());
}
- size_t length = grpc_byte_buffer_length(buffer);
- char *result = new char[length];
- size_t offset = 0;
grpc_byte_buffer_reader reader;
grpc_byte_buffer_reader_init(&reader, buffer);
- gpr_slice next;
- while (grpc_byte_buffer_reader_next(&reader, &next) != 0) {
- memcpy(result + offset, GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next));
- offset += GPR_SLICE_LENGTH(next);
- gpr_slice_unref(next);
- }
+ gpr_slice slice = grpc_byte_buffer_reader_readall(&reader);
+ size_t length = GPR_SLICE_LENGTH(slice);
+ char *result = new char[length];
+ memcpy(result, GPR_SLICE_START_PTR(slice), length);
+ gpr_slice_unref(slice);
return scope.Escape(MakeFastBuffer(
Nan::NewBuffer(result, length, delete_buffer, NULL).ToLocalChecked()));
}
diff --git a/src/node/ext/node_grpc.cc b/src/node/ext/node_grpc.cc
index 6b6e42737b..f18ce01c6f 100644
--- a/src/node/ext/node_grpc.cc
+++ b/src/node/ext/node_grpc.cc
@@ -220,7 +220,7 @@ void InitConnectivityStateConstants(Local<Object> exports) {
Nan::Set(channel_state, Nan::New("TRANSIENT_FAILURE").ToLocalChecked(),
TRANSIENT_FAILURE);
Local<Value> FATAL_FAILURE(
- Nan::New<Uint32, uint32_t>(GRPC_CHANNEL_FATAL_FAILURE));
+ Nan::New<Uint32, uint32_t>(GRPC_CHANNEL_SHUTDOWN));
Nan::Set(channel_state, Nan::New("FATAL_FAILURE").ToLocalChecked(),
FATAL_FAILURE);
}
diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc
index b9e1fe9160..dd1b777ac8 100644
--- a/src/node/ext/server.cc
+++ b/src/node/ext/server.cc
@@ -35,15 +35,15 @@
#include "server.h"
-#include <node.h>
#include <nan.h>
+#include <node.h>
#include <vector>
+#include "call.h"
+#include "completion_queue_async_worker.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
#include "grpc/support/log.h"
-#include "call.h"
-#include "completion_queue_async_worker.h"
#include "server_credentials.h"
#include "timeval.h"
@@ -100,8 +100,8 @@ class NewCallOp : public Op {
Nan::Set(obj, Nan::New("host").ToLocalChecked(),
Nan::New(details.host).ToLocalChecked());
Nan::Set(obj, Nan::New("deadline").ToLocalChecked(),
- Nan::New<Date>(
- TimespecToMilliseconds(details.deadline)).ToLocalChecked());
+ Nan::New<Date>(TimespecToMilliseconds(details.deadline))
+ .ToLocalChecked());
Nan::Set(obj, Nan::New("metadata").ToLocalChecked(),
ParseMetadata(&request_metadata));
return scope.Escape(obj);
@@ -117,14 +117,13 @@ class NewCallOp : public Op {
grpc_metadata_array request_metadata;
protected:
- std::string GetTypeString() const {
- return "new_call";
- }
+ std::string GetTypeString() const { return "new_call"; }
};
Server::Server(grpc_server *server) : wrapped_server(server) {
shutdown_queue = grpc_completion_queue_create(NULL);
- grpc_server_register_completion_queue(server, shutdown_queue, NULL);
+ grpc_server_register_non_listening_completion_queue(server, shutdown_queue,
+ NULL);
}
Server::~Server() {
@@ -156,8 +155,7 @@ bool Server::HasInstance(Local<Value> val) {
}
void Server::ShutdownServer() {
- grpc_server_shutdown_and_notify(this->wrapped_server,
- this->shutdown_queue,
+ grpc_server_shutdown_and_notify(this->wrapped_server, this->shutdown_queue,
NULL);
grpc_server_cancel_all_calls(this->wrapped_server);
grpc_completion_queue_pluck(this->shutdown_queue, NULL,
@@ -170,8 +168,8 @@ NAN_METHOD(Server::New) {
if (!info.IsConstructCall()) {
const int argc = 1;
Local<Value> argv[argc] = {info[0]};
- MaybeLocal<Object> maybe_instance = constructor->GetFunction()->NewInstance(
- argc, argv);
+ MaybeLocal<Object> maybe_instance =
+ constructor->GetFunction()->NewInstance(argc, argv);
if (maybe_instance.IsEmpty()) {
// There's probably a pending exception
return;
@@ -185,8 +183,9 @@ NAN_METHOD(Server::New) {
grpc_channel_args *channel_args;
if (!ParseChannelArgs(info[0], &channel_args)) {
DeallocateChannelArgs(channel_args);
- return Nan::ThrowTypeError("Server options must be an object with "
- "string keys and integer or string values");
+ return Nan::ThrowTypeError(
+ "Server options must be an object with "
+ "string keys and integer or string values");
}
wrapped_server = grpc_server_create(channel_args, NULL);
DeallocateChannelArgs(channel_args);
@@ -218,8 +217,7 @@ NAN_METHOD(Server::RequestCall) {
NAN_METHOD(Server::AddHttp2Port) {
if (!HasInstance(info.This())) {
- return Nan::ThrowTypeError(
- "addHttp2Port can only be called on a Server");
+ return Nan::ThrowTypeError("addHttp2Port can only be called on a Server");
}
if (!info[0]->IsString()) {
return Nan::ThrowTypeError(
@@ -239,8 +237,7 @@ NAN_METHOD(Server::AddHttp2Port) {
*Utf8String(info[0]));
} else {
port = grpc_server_add_secure_http2_port(server->wrapped_server,
- *Utf8String(info[0]),
- creds);
+ *Utf8String(info[0]), creds);
}
info.GetReturnValue().Set(Nan::New<Number>(port));
}
@@ -262,8 +259,7 @@ NAN_METHOD(Server::TryShutdown) {
Server *server = ObjectWrap::Unwrap<Server>(info.This());
unique_ptr<OpVec> ops(new OpVec());
grpc_server_shutdown_and_notify(
- server->wrapped_server,
- CompletionQueueAsyncWorker::GetQueue(),
+ server->wrapped_server, CompletionQueueAsyncWorker::GetQueue(),
new struct tag(new Nan::Callback(info[0].As<Function>()), ops.release(),
shared_ptr<Resources>(nullptr)));
CompletionQueueAsyncWorker::Next();
diff --git a/src/node/performance/benchmark_client.js b/src/node/performance/benchmark_client.js
index 262aa33862..5ef5260a96 100644
--- a/src/node/performance/benchmark_client.js
+++ b/src/node/performance/benchmark_client.js
@@ -42,6 +42,8 @@ var fs = require('fs');
var path = require('path');
var util = require('util');
var EventEmitter = require('events');
+
+var async = require('async');
var _ = require('lodash');
var PoissonProcess = require('poisson-process');
var Histogram = require('./histogram');
@@ -128,6 +130,36 @@ function BenchmarkClient(server_targets, channels, histogram_params,
util.inherits(BenchmarkClient, EventEmitter);
/**
+ * Start every client in the list of clients by waiting for each to be ready,
+ * then starting outstanding_rpcs_per_channel calls on each of them
+ * @param {Array<grpc.Client>} client_list The list of clients
+ * @param {Number} outstanding_rpcs_per_channel The number of calls to start
+ * on each client
+ * @param {function(grpc.Client)} makeCall Function to make a single call on
+ * a single client
+ * @param {EventEmitter} emitter The event emitter to send errors on, if
+ * necessary
+ */
+function startAllClients(client_list, outstanding_rpcs_per_channel, makeCall,
+ emitter) {
+ var ready_wait_funcs = _.map(client_list, function(client) {
+ return _.partial(grpc.waitForClientReady, client, Infinity);
+ });
+ async.parallel(ready_wait_funcs, function(err) {
+ if (err) {
+ emitter.emit('error', err);
+ return;
+ }
+
+ _.each(client_list, function(client) {
+ _.times(outstanding_rpcs_per_channel, function() {
+ makeCall(client);
+ });
+ });
+ });
+}
+
+/**
* Start a closed-loop test. For each channel, start
* outstanding_rpcs_per_channel RPCs. Then, whenever an RPC finishes, start
* another one.
@@ -212,11 +244,7 @@ BenchmarkClient.prototype.startClosedLoop = function(
};
}
- _.each(client_list, function(client) {
- _.times(outstanding_rpcs_per_channel, function() {
- makeCall(client);
- });
- });
+ startAllClients(client_list, outstanding_rpcs_per_channel, makeCall, self);
};
/**
@@ -310,14 +338,12 @@ BenchmarkClient.prototype.startPoisson = function(
var averageIntervalMs = (1 / offered_load) * 1000;
- _.each(client_list, function(client) {
- _.times(outstanding_rpcs_per_channel, function() {
- var p = PoissonProcess.create(averageIntervalMs, function() {
- makeCall(client, p);
- });
- p.start();
+ startAllClients(client_list, outstanding_rpcs_per_channel, function(client){
+ var p = PoissonProcess.create(averageIntervalMs, function() {
+ makeCall(client, p);
});
- });
+ p.start();
+ }, self);
};
/**
diff --git a/src/node/test/math/math_grpc_pb.js b/src/node/test/math/math_grpc_pb.js
index 083ed66913..17a4bf7243 100644
--- a/src/node/test/math/math_grpc_pb.js
+++ b/src/node/test/math/math_grpc_pb.js
@@ -1,94 +1,135 @@
// GENERATED CODE -- DO NOT EDIT!
+// Original file comments:
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
'use strict';
var grpc = require('grpc');
-var math_pb = require('./math_pb.js');
+var math_math_pb = require('../math/math_pb.js');
function serialize_DivArgs(arg) {
- if (!(arg instanceof math_pb.DivArgs)) {
+ if (!(arg instanceof math_math_pb.DivArgs)) {
throw new Error('Expected argument of type DivArgs');
}
return new Buffer(arg.serializeBinary());
}
function deserialize_DivArgs(buffer_arg) {
- return math_pb.DivArgs.deserializeBinary(new Uint8Array(buffer_arg));
+ return math_math_pb.DivArgs.deserializeBinary(new Uint8Array(buffer_arg));
}
function serialize_DivReply(arg) {
- if (!(arg instanceof math_pb.DivReply)) {
+ if (!(arg instanceof math_math_pb.DivReply)) {
throw new Error('Expected argument of type DivReply');
}
return new Buffer(arg.serializeBinary());
}
function deserialize_DivReply(buffer_arg) {
- return math_pb.DivReply.deserializeBinary(new Uint8Array(buffer_arg));
+ return math_math_pb.DivReply.deserializeBinary(new Uint8Array(buffer_arg));
}
function serialize_FibArgs(arg) {
- if (!(arg instanceof math_pb.FibArgs)) {
+ if (!(arg instanceof math_math_pb.FibArgs)) {
throw new Error('Expected argument of type FibArgs');
}
return new Buffer(arg.serializeBinary());
}
function deserialize_FibArgs(buffer_arg) {
- return math_pb.FibArgs.deserializeBinary(new Uint8Array(buffer_arg));
+ return math_math_pb.FibArgs.deserializeBinary(new Uint8Array(buffer_arg));
}
function serialize_Num(arg) {
- if (!(arg instanceof math_pb.Num)) {
+ if (!(arg instanceof math_math_pb.Num)) {
throw new Error('Expected argument of type Num');
}
return new Buffer(arg.serializeBinary());
}
function deserialize_Num(buffer_arg) {
- return math_pb.Num.deserializeBinary(new Uint8Array(buffer_arg));
+ return math_math_pb.Num.deserializeBinary(new Uint8Array(buffer_arg));
}
var MathService = exports.MathService = {
+ // Div divides args.dividend by args.divisor and returns the quotient and
+ // remainder.
div: {
path: '/math.Math/Div',
requestStream: false,
responseStream: false,
- requestType: math_pb.DivArgs,
- responseType: math_pb.DivReply,
+ requestType: math_math_pb.DivArgs,
+ responseType: math_math_pb.DivReply,
requestSerialize: serialize_DivArgs,
requestDeserialize: deserialize_DivArgs,
responseSerialize: serialize_DivReply,
responseDeserialize: deserialize_DivReply,
},
+ // DivMany accepts an arbitrary number of division args from the client stream
+ // and sends back the results in the reply stream. The stream continues until
+ // the client closes its end; the server does the same after sending all the
+ // replies. The stream ends immediately if either end aborts.
divMany: {
path: '/math.Math/DivMany',
requestStream: true,
responseStream: true,
- requestType: math_pb.DivArgs,
- responseType: math_pb.DivReply,
+ requestType: math_math_pb.DivArgs,
+ responseType: math_math_pb.DivReply,
requestSerialize: serialize_DivArgs,
requestDeserialize: deserialize_DivArgs,
responseSerialize: serialize_DivReply,
responseDeserialize: deserialize_DivReply,
},
+ // Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ // generates up to limit numbers; otherwise it continues until the call is
+ // canceled. Unlike Fib above, Fib has no final FibReply.
fib: {
path: '/math.Math/Fib',
requestStream: false,
responseStream: true,
- requestType: math_pb.FibArgs,
- responseType: math_pb.Num,
+ requestType: math_math_pb.FibArgs,
+ responseType: math_math_pb.Num,
requestSerialize: serialize_FibArgs,
requestDeserialize: deserialize_FibArgs,
responseSerialize: serialize_Num,
responseDeserialize: deserialize_Num,
},
+ // Sum sums a stream of numbers, returning the final result once the stream
+ // is closed.
sum: {
path: '/math.Math/Sum',
requestStream: true,
responseStream: false,
- requestType: math_pb.Num,
- responseType: math_pb.Num,
+ requestType: math_math_pb.Num,
+ responseType: math_math_pb.Num,
requestSerialize: serialize_Num,
requestDeserialize: deserialize_Num,
responseSerialize: serialize_Num,
diff --git a/src/node/test/math/math_pb.js b/src/node/test/math/math_pb.js
index 3489143bec..ccc05c6e06 100644
--- a/src/node/test/math/math_pb.js
+++ b/src/node/test/math/math_pb.js
@@ -65,7 +65,7 @@ proto.math.DivArgs.toObject = function(includeInstance, msg) {
};
if (includeInstance) {
- obj.$jspbMessageInstance = msg
+ obj.$jspbMessageInstance = msg;
}
return obj;
};
@@ -251,7 +251,7 @@ proto.math.DivReply.toObject = function(includeInstance, msg) {
};
if (includeInstance) {
- obj.$jspbMessageInstance = msg
+ obj.$jspbMessageInstance = msg;
}
return obj;
};
@@ -436,7 +436,7 @@ proto.math.FibArgs.toObject = function(includeInstance, msg) {
};
if (includeInstance) {
- obj.$jspbMessageInstance = msg
+ obj.$jspbMessageInstance = msg;
}
return obj;
};
@@ -595,7 +595,7 @@ proto.math.Num.toObject = function(includeInstance, msg) {
};
if (includeInstance) {
- obj.$jspbMessageInstance = msg
+ obj.$jspbMessageInstance = msg;
}
return obj;
};
@@ -754,7 +754,7 @@ proto.math.FibReply.toObject = function(includeInstance, msg) {
};
if (includeInstance) {
- obj.$jspbMessageInstance = msg
+ obj.$jspbMessageInstance = msg;
}
return obj;
};
diff --git a/src/objective-c/CronetFramework.podspec b/src/objective-c/CronetFramework.podspec
new file mode 100644
index 0000000000..20af7647f7
--- /dev/null
+++ b/src/objective-c/CronetFramework.podspec
@@ -0,0 +1,43 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Pod::Spec.new do |s|
+ s.name = "CronetFramework"
+ s.version = "0.0.2"
+ s.summary = "Cronet, precompiled and used as a framework."
+ s.homepage = "http://chromium.org"
+ s.license = { :type => 'BSD' }
+ s.vendored_framework = "Cronet.framework"
+ s.author = "The Chromium Authors"
+ s.ios.deployment_target = "8.0"
+ s.source = { :http => 'https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework.zip' }
+ s.preserve_paths = "Cronet.framework"
+ s.public_header_files = "Cronet.framework/Headers/**/*{.h}"
+end
diff --git a/src/objective-c/GRPCClient/GRPCCall+Cronet.h b/src/objective-c/GRPCClient/GRPCCall+Cronet.h
new file mode 100644
index 0000000000..2d8f7ac8fb
--- /dev/null
+++ b/src/objective-c/GRPCClient/GRPCCall+Cronet.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifdef GRPC_COMPILE_WITH_CRONET
+#import <Cronet/Cronet.h>
+
+#import "GRPCCall.h"
+
+/**
+ * Methods for using cronet transport.
+ */
+@interface GRPCCall (Cronet)
+
+/**
+ * This method should be called before issuing the first RPC. It should be
+ * called only once. Create an instance of Cronet engine in your app elsewhere
+ * and pass the instance pointer in the cronet_engine parameter. Once set,
+ * all subsequent RPCs will use Cronet transport. The method is not thread
+ * safe.
+ */
++(void)useCronetWithEngine:(cronet_engine *)engine;
+
++(cronet_engine *)cronetEngine;
+
++(BOOL)isUsingCronet;
+
+@end
+#endif
diff --git a/src/objective-c/GRPCClient/GRPCCall+Cronet.m b/src/objective-c/GRPCClient/GRPCCall+Cronet.m
new file mode 100644
index 0000000000..76ca1a2537
--- /dev/null
+++ b/src/objective-c/GRPCClient/GRPCCall+Cronet.m
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#import "GRPCCall+Cronet.h"
+
+#ifdef GRPC_COMPILE_WITH_CRONET
+static BOOL useCronet = NO;
+static cronet_engine *globalCronetEngine;
+
+@implementation GRPCCall (Cronet)
+
++ (void)useCronetWithEngine:(cronet_engine *)engine {
+ useCronet = YES;
+ globalCronetEngine = engine;
+}
+
++ (cronet_engine *)cronetEngine {
+ return globalCronetEngine;
+}
+
++ (BOOL)isUsingCronet {
+ return useCronet;
+}
+
+@end
+#endif
diff --git a/src/objective-c/GRPCClient/GRPCCall.h b/src/objective-c/GRPCClient/GRPCCall.h
index 7a77ae60b6..b9e741dfa8 100644
--- a/src/objective-c/GRPCClient/GRPCCall.h
+++ b/src/objective-c/GRPCClient/GRPCCall.h
@@ -220,6 +220,8 @@ extern id const kGRPCTrailersKey;
* messages to the response side of the call indefinitely (depending on the semantics of the
* specific remote method called).
* To finish a call right away, invoke cancel.
+ * host parameter should not contain the scheme (http:// or https://), only the name or IP addr
+ * and the port number, for example @"localhost:5050".
*/
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index 0eb10656dd..e9678f38a9 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -76,7 +76,6 @@ NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey";
NSString *_host;
NSString *_path;
GRPCWrappedCall *_wrappedCall;
- dispatch_once_t _callAlreadyInvoked;
GRPCConnectivityMonitor *_connectivityMonitor;
// The C gRPC library has less guarantees on the ordering of events than we
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.h b/src/objective-c/GRPCClient/private/GRPCChannel.h
index 70d1a9bd2f..40e78a92d6 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.h
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.h
@@ -56,6 +56,13 @@ struct grpc_channel_credentials;
+ (nullable GRPCChannel *)secureChannelWithHost:(nonnull NSString *)host;
/**
+ * Creates a secure channel to the specified @c host using Cronet as a transport mechanism.
+ */
+#ifdef GRPC_COMPILE_WITH_CRONET
++ (nullable GRPCChannel *)secureCronetChannelWithHost:(NSString *)host
+ channelArgs:(NSDictionary *)channelArgs;
+#endif
+/**
* Creates a secure channel to the specified @c host using the specified @c credentials and
* @c channelArgs. Only in tests should @c GRPC_SSL_TARGET_NAME_OVERRIDE_ARG channel arg be set.
*/
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.m b/src/objective-c/GRPCClient/private/GRPCChannel.m
index 203ef58c0d..d3192c983d 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.m
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.m
@@ -34,10 +34,17 @@
#import "GRPCChannel.h"
#include <grpc/grpc_security.h>
+#ifdef GRPC_COMPILE_WITH_CRONET
+#include <grpc/grpc_cronet.h>
+#endif
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#ifdef GRPC_COMPILE_WITH_CRONET
+#import <Cronet/Cronet.h>
+#import <GRPCClient/GRPCCall+Cronet.h>
+#endif
#import "GRPCCompletionQueue.h"
void freeChannelArgs(grpc_channel_args *channel_args) {
@@ -99,6 +106,24 @@ grpc_channel_args * buildChannelArgs(NSDictionary *dictionary) {
grpc_channel_args *_channelArgs;
}
+#ifdef GRPC_COMPILE_WITH_CRONET
+- (instancetype)initWithHost:(NSString *)host
+ cronetEngine:(cronet_engine *)cronetEngine
+ channelArgs:(NSDictionary *)channelArgs {
+ if (!host) {
+ [NSException raise:NSInvalidArgumentException format:@"host argument missing"];
+ }
+
+ if (self = [super init]) {
+ _channelArgs = buildChannelArgs(channelArgs);
+ _host = [host copy];
+ _unmanagedChannel = grpc_cronet_secure_channel_create(cronetEngine, _host.UTF8String, _channelArgs,
+ NULL);
+ }
+
+ return self;
+}
+#endif
- (instancetype)initWithHost:(NSString *)host
secure:(BOOL)secure
@@ -133,6 +158,19 @@ grpc_channel_args * buildChannelArgs(NSDictionary *dictionary) {
freeChannelArgs(_channelArgs);
}
+#ifdef GRPC_COMPILE_WITH_CRONET
++ (GRPCChannel *)secureCronetChannelWithHost:(NSString *)host
+ channelArgs:(NSDictionary *)channelArgs {
+ cronet_engine *engine = [GRPCCall cronetEngine];
+ if (!engine) {
+ [NSException raise:NSInvalidArgumentException
+ format:@"cronet_engine is NULL. Set it first."];
+ return nil;
+ }
+ return [[GRPCChannel alloc] initWithHost:host cronetEngine:engine channelArgs:channelArgs];
+}
+#endif
+
+ (GRPCChannel *)secureChannelWithHost:(NSString *)host {
return [[GRPCChannel alloc] initWithHost:host secure:YES credentials:NULL channelArgs:NULL];
}
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index 43166cbb52..fef6385cea 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -36,7 +36,10 @@
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#import <GRPCClient/GRPCCall.h>
+#ifdef GRPC_COMPILE_WITH_CRONET
#import <GRPCClient/GRPCCall+ChannelArg.h>
+#import <GRPCClient/GRPCCall+Cronet.h>
+#endif
#import "GRPCChannel.h"
#import "GRPCCompletionQueue.h"
@@ -200,15 +203,26 @@ NS_ASSUME_NONNULL_BEGIN
- (GRPCChannel *)newChannel {
NSDictionary *args = [self channelArgs];
+#ifdef GRPC_COMPILE_WITH_CRONET
+ BOOL useCronet = [GRPCCall isUsingCronet];
+#endif
if (_secure) {
GRPCChannel *channel;
@synchronized(self) {
if (_channelCreds == nil) {
[self setTLSPEMRootCerts:nil withPrivateKey:nil withCertChain:nil error:nil];
}
- channel = [GRPCChannel secureChannelWithHost:_address
- credentials:_channelCreds
- channelArgs:args];
+#ifdef GRPC_COMPILE_WITH_CRONET
+ if (useCronet) {
+ channel = [GRPCChannel secureCronetChannelWithHost:_address
+ channelArgs:args];
+ } else
+#endif
+ {
+ channel = [GRPCChannel secureChannelWithHost:_address
+ credentials:_channelCreds
+ channelArgs:args];
+ }
}
return channel;
} else {
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
index 16e5bff7ff..a3fa5938cd 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
@@ -72,6 +72,8 @@
_op.op = GRPC_OP_SEND_INITIAL_METADATA;
_op.data.send_initial_metadata.count = metadata.count;
_op.data.send_initial_metadata.metadata = metadata.grpc_metadataArray;
+ _op.data.send_initial_metadata.maybe_compression_level.is_set = false;
+ _op.data.send_initial_metadata.maybe_compression_level.level = 0;
_handler = handler;
}
return self;
diff --git a/src/objective-c/ProtoRPC/ProtoMethod.h b/src/objective-c/ProtoRPC/ProtoMethod.h
index a0ed2cf98a..f9fdbb35ff 100644
--- a/src/objective-c/ProtoRPC/ProtoMethod.h
+++ b/src/objective-c/ProtoRPC/ProtoMethod.h
@@ -37,6 +37,7 @@
* A fully-qualified proto service method name. Full qualification is needed because a gRPC endpoint
* can implement multiple services.
*/
+__attribute__((deprecated("Please use GRPCProtoMethod.")))
@interface ProtoMethod : NSObject
@property(nonatomic, readonly) NSString *package;
@property(nonatomic, readonly) NSString *service;
@@ -48,3 +49,11 @@
service:(NSString *)service
method:(NSString *)method;
@end
+
+/**
+ * This subclass is empty now. Eventually we'll remove ProtoMethod class
+ * to avoid potential naming conflict
+ */
+@interface GRPCProtoMethod : ProtoMethod
+
+@end
diff --git a/src/objective-c/ProtoRPC/ProtoMethod.m b/src/objective-c/ProtoRPC/ProtoMethod.m
index 4b7ed63123..e9978f38af 100644
--- a/src/objective-c/ProtoRPC/ProtoMethod.m
+++ b/src/objective-c/ProtoRPC/ProtoMethod.m
@@ -53,3 +53,7 @@
}
}
@end
+
+@implementation GRPCProtoMethod
+
+@end
diff --git a/src/objective-c/ProtoRPC/ProtoRPC.h b/src/objective-c/ProtoRPC/ProtoRPC.h
index bd926b7328..5f91f6bce1 100644
--- a/src/objective-c/ProtoRPC/ProtoRPC.h
+++ b/src/objective-c/ProtoRPC/ProtoRPC.h
@@ -36,13 +36,26 @@
#import "ProtoMethod.h"
+__attribute__((deprecated("Please use GRPCProtoCall.")))
@interface ProtoRPC : GRPCCall
+/**
+ * host parameter should not contain the scheme (http:// or https://), only the name or IP addr
+ * and the port number, for example @"localhost:5050".
+ */
- (instancetype)initWithHost:(NSString *)host
- method:(ProtoMethod *)method
+ method:(GRPCProtoMethod *)method
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable NS_DESIGNATED_INITIALIZER;
- (void)start;
@end
+
+/**
+ * This subclass is empty now. Eventually we'll remove ProtoRPC class
+ * to avoid potential naming conflict
+ */
+@interface GRPCProtoCall : ProtoRPC
+
+@end
diff --git a/src/objective-c/ProtoRPC/ProtoRPC.m b/src/objective-c/ProtoRPC/ProtoRPC.m
index 9bf66f347a..fb0b566f19 100644
--- a/src/objective-c/ProtoRPC/ProtoRPC.m
+++ b/src/objective-c/ProtoRPC/ProtoRPC.m
@@ -70,7 +70,7 @@ static NSError *ErrorForBadProto(id proto, Class expectedClass, NSError *parsing
// Designated initializer
- (instancetype)initWithHost:(NSString *)host
- method:(ProtoMethod *)method
+ method:(GRPCProtoMethod *)method
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable {
@@ -117,3 +117,7 @@ static NSError *ErrorForBadProto(id proto, Class expectedClass, NSError *parsing
_responseWriteable = nil;
}
@end
+
+@implementation GRPCProtoCall
+
+@end
diff --git a/src/objective-c/ProtoRPC/ProtoService.h b/src/objective-c/ProtoRPC/ProtoService.h
index 2e8cb33696..87d06e1ae5 100644
--- a/src/objective-c/ProtoRPC/ProtoService.h
+++ b/src/objective-c/ProtoRPC/ProtoService.h
@@ -33,17 +33,28 @@
#import <Foundation/Foundation.h>
-@class ProtoRPC;
+@class GRPCProtoCall;
@protocol GRXWriteable;
@class GRXWriter;
+
+__attribute__((deprecated("Please use GRPCProtoService.")))
@interface ProtoService : NSObject
- (instancetype)initWithHost:(NSString *)host
packageName:(NSString *)packageName
serviceName:(NSString *)serviceName NS_DESIGNATED_INITIALIZER;
-- (ProtoRPC *)RPCToMethod:(NSString *)method
+- (GRPCProtoCall *)RPCToMethod:(NSString *)method
requestsWriter:(GRXWriter *)requestsWriter
responseClass:(Class)responseClass
responsesWriteable:(id<GRXWriteable>)responsesWriteable;
@end
+
+
+/**
+ * This subclass is empty now. Eventually we'll remove ProtoService class
+ * to avoid potential naming conflict
+ */
+@interface GRPCProtoService : ProtoService
+
+@end
diff --git a/src/objective-c/ProtoRPC/ProtoService.m b/src/objective-c/ProtoRPC/ProtoService.m
index fccc6aadc9..597c3cf0fe 100644
--- a/src/objective-c/ProtoRPC/ProtoService.m
+++ b/src/objective-c/ProtoRPC/ProtoService.m
@@ -79,3 +79,7 @@
responsesWriteable:responsesWriteable];
}
@end
+
+@implementation GRPCProtoService
+
+@end
diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m
index 4f096b9efa..781c500f73 100644
--- a/src/objective-c/tests/InteropTests.m
+++ b/src/objective-c/tests/InteropTests.m
@@ -35,7 +35,9 @@
#include <grpc/status.h>
+#import <Cronet/Cronet.h>
#import <GRPCClient/GRPCCall+Tests.h>
+#import <GRPCClient/GRPCCall+Cronet.h>
#import <ProtoRPC/ProtoRPC.h>
#import <RemoteTest/Empty.pbobjc.h>
#import <RemoteTest/Messages.pbobjc.h>
@@ -78,6 +80,8 @@
#pragma mark Tests
+static cronet_engine *cronetEngine = NULL;
+
@implementation InteropTests {
RMTTestService *_service;
}
@@ -88,6 +92,15 @@
- (void)setUp {
_service = self.class.host ? [RMTTestService serviceWithHost:self.class.host] : nil;
+#ifdef GRPC_COMPILE_WITH_CRONET
+ if (cronetEngine == NULL) {
+ // Cronet setup
+ [Cronet setHttp2Enabled:YES];
+ [Cronet start];
+ cronetEngine = [Cronet getGlobalEngine];
+ [GRPCCall useCronetWithEngine:cronetEngine];
+ }
+#endif
}
- (void)testEmptyUnaryRPC {
@@ -245,6 +258,8 @@
[self waitForExpectationsWithTimeout:4 handler:nil];
}
+#ifndef GRPC_COMPILE_WITH_CRONET
+// TODO(makdharma@): Fix this test
- (void)testEmptyStreamRPC {
XCTAssertNotNil(self.class.host);
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"EmptyStream"];
@@ -258,6 +273,7 @@
}];
[self waitForExpectationsWithTimeout:2 handler:nil];
}
+#endif
- (void)testCancelAfterBeginRPC {
XCTAssertNotNil(self.class.host);
diff --git a/src/objective-c/tests/Podfile b/src/objective-c/tests/Podfile
index 7ec7a25898..a7a88a3b9d 100644
--- a/src/objective-c/tests/Podfile
+++ b/src/objective-c/tests/Podfile
@@ -1,31 +1,36 @@
source 'https://github.com/CocoaPods/Specs.git'
platform :ios, '8.0'
-pod 'Protobuf', :path => "../../../third_party/protobuf"
-pod 'BoringSSL', :podspec => ".."
-pod 'gRPC', :path => "../../.."
-pod 'RemoteTest', :path => "RemoteTestClient"
-
-link_with 'AllTests',
- 'RxLibraryUnitTests',
- 'InteropTests',
- 'InteropTestsLocalSSL',
- 'InteropTestsLocalCleartext'
+install! 'cocoapods', :deterministic_uuids => false
+
+def shared_pods
+ pod 'Protobuf', :path => "../../../third_party/protobuf"
+ pod 'BoringSSL', :podspec => ".."
+ pod 'CronetFramework', :podspec => ".."
+ pod 'gRPC', :path => "../../.."
+ pod 'RemoteTest', :path => "RemoteTestClient"
+end
target 'Tests' do
+ shared_pods
end
target 'AllTests' do
+ shared_pods
end
target 'RxLibraryUnitTests' do
+ shared_pods
end
target 'InteropTestsRemote' do
+ shared_pods
end
target 'InteropTestsLocalSSL' do
+ shared_pods
end
target 'InteropTestsLocalCleartext' do
+ shared_pods
end
diff --git a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
index 6ecef0593b..e1fd991038 100644
--- a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
+++ b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
@@ -2,6 +2,10 @@ Pod::Spec.new do |s|
s.name = "RemoteTest"
s.version = "0.0.1"
s.license = "New BSD"
+ s.authors = { 'gRPC contributors' => 'grpc-io@googlegroups.com' }
+ s.homepage = "http://www.grpc.io/"
+ s.summary = "RemoteTest example"
+ s.source = { :git => 'https://github.com/grpc/grpc.git' }
s.ios.deployment_target = '7.1'
s.osx.deployment_target = '10.9'
diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
index b0429617c0..89e0ea60b9 100644
--- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
+++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
@@ -7,33 +7,34 @@
objects = {
/* Begin PBXBuildFile section */
- 036D953EE34B1FD523647ACD /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */; };
- 08A8BB02D19A53D902B214B8 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */; };
- 50267643BA114A2A724D4FDF /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */; };
+ 0F9232F984C08643FD40C34F /* libPods-InteropTestsRemote.a in Frameworks */ = {isa = PBXBuildFile; fileRef = DBE059B4AC7A51919467EEC0 /* libPods-InteropTestsRemote.a */; };
+ 16A9E77B6E336B3C0B9BA6E0 /* libPods-InteropTestsLocalSSL.a in Frameworks */ = {isa = PBXBuildFile; fileRef = DBEDE45BDA60DF1E1C8950C0 /* libPods-InteropTestsLocalSSL.a */; };
+ 20DFDF829DD993A4A00D5662 /* libPods-RxLibraryUnitTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = A58BE6DF1C62D1739EBB2C78 /* libPods-RxLibraryUnitTests.a */; };
+ 333E8FC01C8285B7C547D799 /* libPods-InteropTestsLocalCleartext.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FD346DB2C23F676C4842F3FF /* libPods-InteropTestsLocalCleartext.a */; };
+ 3D7C85F6AA68C4A205E3BA16 /* libPods-Tests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 20DFF2F3C97EF098FE5A3171 /* libPods-Tests.a */; };
6312AE4E1B1BF49B00341DEE /* GRPCClientTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */; };
63423F4A1B150A5F006CF63C /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; };
635697CD1B14FC11007A7283 /* Tests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635697CC1B14FC11007A7283 /* Tests.m */; };
635ED2EC1B1A3BC400FDE5C3 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; };
63715F561B780C020029CB0B /* InteropTestsLocalCleartext.m in Sources */ = {isa = PBXBuildFile; fileRef = 63715F551B780C020029CB0B /* InteropTestsLocalCleartext.m */; };
- 6379CC4D1BE1662A001BC0A1 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; settings = {ASSET_TAGS = (); }; };
- 6379CC4E1BE1662B001BC0A1 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; settings = {ASSET_TAGS = (); }; };
- 6379CC501BE16703001BC0A1 /* InteropTestsRemote.m in Sources */ = {isa = PBXBuildFile; fileRef = 6379CC4F1BE16703001BC0A1 /* InteropTestsRemote.m */; settings = {ASSET_TAGS = (); }; };
- 6379CC511BE1683B001BC0A1 /* InteropTestsRemote.m in Sources */ = {isa = PBXBuildFile; fileRef = 6379CC4F1BE16703001BC0A1 /* InteropTestsRemote.m */; settings = {ASSET_TAGS = (); }; };
- 6379CC531BE17709001BC0A1 /* TestCertificates.bundle in Resources */ = {isa = PBXBuildFile; fileRef = 63E240CF1B6C63DC005F3B0E /* TestCertificates.bundle */; settings = {ASSET_TAGS = (); }; };
- 63DC84181BE15179000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; settings = {ASSET_TAGS = (); }; };
- 63DC841E1BE15180000708E8 /* RxLibraryUnitTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 63423F501B151B77006CF63C /* RxLibraryUnitTests.m */; settings = {ASSET_TAGS = (); }; };
- 63DC84281BE15267000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; settings = {ASSET_TAGS = (); }; };
- 63DC842E1BE15278000708E8 /* RxLibraryUnitTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 63423F501B151B77006CF63C /* RxLibraryUnitTests.m */; settings = {ASSET_TAGS = (); }; };
- 63DC842F1BE1527D000708E8 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; settings = {ASSET_TAGS = (); }; };
- 63DC84391BE15294000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; settings = {ASSET_TAGS = (); }; };
- 63DC84481BE152B5000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; settings = {ASSET_TAGS = (); }; };
- 63DC844E1BE15350000708E8 /* InteropTestsLocalCleartext.m in Sources */ = {isa = PBXBuildFile; fileRef = 63715F551B780C020029CB0B /* InteropTestsLocalCleartext.m */; settings = {ASSET_TAGS = (); }; };
- 63DC844F1BE15353000708E8 /* InteropTestsLocalSSL.m in Sources */ = {isa = PBXBuildFile; fileRef = 63E240CD1B6C4E2B005F3B0E /* InteropTestsLocalSSL.m */; settings = {ASSET_TAGS = (); }; };
- 63DC84501BE153AA000708E8 /* GRPCClientTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */; settings = {ASSET_TAGS = (); }; };
+ 6379CC4D1BE1662A001BC0A1 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; };
+ 6379CC4E1BE1662B001BC0A1 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; };
+ 6379CC501BE16703001BC0A1 /* InteropTestsRemote.m in Sources */ = {isa = PBXBuildFile; fileRef = 6379CC4F1BE16703001BC0A1 /* InteropTestsRemote.m */; };
+ 6379CC511BE1683B001BC0A1 /* InteropTestsRemote.m in Sources */ = {isa = PBXBuildFile; fileRef = 6379CC4F1BE16703001BC0A1 /* InteropTestsRemote.m */; };
+ 6379CC531BE17709001BC0A1 /* TestCertificates.bundle in Resources */ = {isa = PBXBuildFile; fileRef = 63E240CF1B6C63DC005F3B0E /* TestCertificates.bundle */; };
+ 63DC84181BE15179000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; };
+ 63DC841E1BE15180000708E8 /* RxLibraryUnitTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 63423F501B151B77006CF63C /* RxLibraryUnitTests.m */; };
+ 63DC84281BE15267000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; };
+ 63DC842E1BE15278000708E8 /* RxLibraryUnitTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 63423F501B151B77006CF63C /* RxLibraryUnitTests.m */; };
+ 63DC842F1BE1527D000708E8 /* InteropTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */; };
+ 63DC84391BE15294000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; };
+ 63DC84481BE152B5000708E8 /* libTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 635697C71B14FC11007A7283 /* libTests.a */; };
+ 63DC844E1BE15350000708E8 /* InteropTestsLocalCleartext.m in Sources */ = {isa = PBXBuildFile; fileRef = 63715F551B780C020029CB0B /* InteropTestsLocalCleartext.m */; };
+ 63DC844F1BE15353000708E8 /* InteropTestsLocalSSL.m in Sources */ = {isa = PBXBuildFile; fileRef = 63E240CD1B6C4E2B005F3B0E /* InteropTestsLocalSSL.m */; };
+ 63DC84501BE153AA000708E8 /* GRPCClientTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */; };
63E240CE1B6C4E2B005F3B0E /* InteropTestsLocalSSL.m in Sources */ = {isa = PBXBuildFile; fileRef = 63E240CD1B6C4E2B005F3B0E /* InteropTestsLocalSSL.m */; };
63E240D01B6C63DC005F3B0E /* TestCertificates.bundle in Resources */ = {isa = PBXBuildFile; fileRef = 63E240CF1B6C63DC005F3B0E /* TestCertificates.bundle */; };
- 7D8A186224D39101F90230F6 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */; };
- DCFAE001609CCBFE69DFA6A1 /* libPods.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */; };
+ F15EF7852DC70770EFDB1D2C /* libPods-AllTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = CAE086D5B470DA367D415AB0 /* libPods-AllTests.a */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@@ -87,8 +88,15 @@
/* End PBXCopyFilesBuildPhase section */
/* Begin PBXFileReference section */
+ 060EF32D7EC0DF67ED617507 /* Pods-Tests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Tests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-Tests/Pods-Tests.debug.xcconfig"; sourceTree = "<group>"; };
+ 07D10A965323BEA7FE59A74B /* Pods-RxLibraryUnitTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RxLibraryUnitTests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests.debug.xcconfig"; sourceTree = "<group>"; };
0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.release.xcconfig; path = "Pods/Target Support Files/Pods/Pods.release.xcconfig"; sourceTree = "<group>"; };
+ 20DFF2F3C97EF098FE5A3171 /* libPods-Tests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-Tests.a"; sourceTree = BUILT_PRODUCTS_DIR; };
35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libPods.a; sourceTree = BUILT_PRODUCTS_DIR; };
+ 3B0861FC805389C52DB260D4 /* Pods-RxLibraryUnitTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RxLibraryUnitTests.release.xcconfig"; path = "Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests.release.xcconfig"; sourceTree = "<group>"; };
+ 51A275E86C141416ED63FF76 /* Pods-InteropTestsLocalCleartext.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalCleartext.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext.release.xcconfig"; sourceTree = "<group>"; };
+ 553BBBED24E4162D1F769D65 /* Pods-InteropTestsLocalSSL.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalSSL.debug.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL.debug.xcconfig"; sourceTree = "<group>"; };
+ 5761E98978DDDF136A58CB7E /* Pods-AllTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-AllTests.release.xcconfig"; path = "Pods/Target Support Files/Pods-AllTests/Pods-AllTests.release.xcconfig"; sourceTree = "<group>"; };
6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GRPCClientTests.m; sourceTree = "<group>"; };
63423F441B150A5F006CF63C /* AllTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = AllTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
63423F501B151B77006CF63C /* RxLibraryUnitTests.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RxLibraryUnitTests.m; sourceTree = "<group>"; };
@@ -105,6 +113,17 @@
63E240CC1B6C4D3A005F3B0E /* InteropTests.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InteropTests.h; sourceTree = "<group>"; };
63E240CD1B6C4E2B005F3B0E /* InteropTestsLocalSSL.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = InteropTestsLocalSSL.m; sourceTree = "<group>"; };
63E240CF1B6C63DC005F3B0E /* TestCertificates.bundle */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.plug-in"; path = TestCertificates.bundle; sourceTree = "<group>"; };
+ 7A2E97E3F469CC2A758D77DE /* Pods-InteropTestsLocalSSL.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalSSL.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL.release.xcconfig"; sourceTree = "<group>"; };
+ A58BE6DF1C62D1739EBB2C78 /* libPods-RxLibraryUnitTests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-RxLibraryUnitTests.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+ B94C27C06733CF98CE1B2757 /* Pods-AllTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-AllTests.debug.xcconfig"; path = "Pods/Target Support Files/Pods-AllTests/Pods-AllTests.debug.xcconfig"; sourceTree = "<group>"; };
+ CAE086D5B470DA367D415AB0 /* libPods-AllTests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-AllTests.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+ DBE059B4AC7A51919467EEC0 /* libPods-InteropTestsRemote.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-InteropTestsRemote.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+ DBEDE45BDA60DF1E1C8950C0 /* libPods-InteropTestsLocalSSL.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-InteropTestsLocalSSL.a"; sourceTree = BUILT_PRODUCTS_DIR; };
+ DC3CA1D948F068E76957A861 /* Pods-InteropTestsRemote.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsRemote.debug.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote.debug.xcconfig"; sourceTree = "<group>"; };
+ E1486220285AF123EB124008 /* Pods-InteropTestsLocalCleartext.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsLocalCleartext.debug.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext.debug.xcconfig"; sourceTree = "<group>"; };
+ E4275A759BDBDF143B9B438F /* Pods-InteropTestsRemote.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-InteropTestsRemote.release.xcconfig"; path = "Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote.release.xcconfig"; sourceTree = "<group>"; };
+ E6733B838B28453434B556E2 /* Pods-Tests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Tests.release.xcconfig"; path = "Pods/Target Support Files/Pods-Tests/Pods-Tests.release.xcconfig"; sourceTree = "<group>"; };
+ FD346DB2C23F676C4842F3FF /* libPods-InteropTestsLocalCleartext.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-InteropTestsLocalCleartext.a"; sourceTree = BUILT_PRODUCTS_DIR; };
FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = Pods.debug.xcconfig; path = "Pods/Target Support Files/Pods/Pods.debug.xcconfig"; sourceTree = "<group>"; };
/* End PBXFileReference section */
@@ -114,7 +133,7 @@
buildActionMask = 2147483647;
files = (
63423F4A1B150A5F006CF63C /* libTests.a in Frameworks */,
- 7D8A186224D39101F90230F6 /* libPods.a in Frameworks */,
+ F15EF7852DC70770EFDB1D2C /* libPods-AllTests.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -122,6 +141,7 @@
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
+ 3D7C85F6AA68C4A205E3BA16 /* libPods-Tests.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -130,7 +150,7 @@
buildActionMask = 2147483647;
files = (
63DC84181BE15179000708E8 /* libTests.a in Frameworks */,
- 036D953EE34B1FD523647ACD /* libPods.a in Frameworks */,
+ 20DFDF829DD993A4A00D5662 /* libPods-RxLibraryUnitTests.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -139,7 +159,7 @@
buildActionMask = 2147483647;
files = (
63DC84281BE15267000708E8 /* libTests.a in Frameworks */,
- DCFAE001609CCBFE69DFA6A1 /* libPods.a in Frameworks */,
+ 0F9232F984C08643FD40C34F /* libPods-InteropTestsRemote.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -148,7 +168,7 @@
buildActionMask = 2147483647;
files = (
63DC84391BE15294000708E8 /* libTests.a in Frameworks */,
- 08A8BB02D19A53D902B214B8 /* libPods.a in Frameworks */,
+ 16A9E77B6E336B3C0B9BA6E0 /* libPods-InteropTestsLocalSSL.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -157,7 +177,7 @@
buildActionMask = 2147483647;
files = (
63DC84481BE152B5000708E8 /* libTests.a in Frameworks */,
- 50267643BA114A2A724D4FDF /* libPods.a in Frameworks */,
+ 333E8FC01C8285B7C547D799 /* libPods-InteropTestsLocalCleartext.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -168,6 +188,12 @@
isa = PBXGroup;
children = (
35F2B6BF3BAE8F0DC4AFD76E /* libPods.a */,
+ CAE086D5B470DA367D415AB0 /* libPods-AllTests.a */,
+ FD346DB2C23F676C4842F3FF /* libPods-InteropTestsLocalCleartext.a */,
+ DBEDE45BDA60DF1E1C8950C0 /* libPods-InteropTestsLocalSSL.a */,
+ DBE059B4AC7A51919467EEC0 /* libPods-InteropTestsRemote.a */,
+ A58BE6DF1C62D1739EBB2C78 /* libPods-RxLibraryUnitTests.a */,
+ 20DFF2F3C97EF098FE5A3171 /* libPods-Tests.a */,
);
name = Frameworks;
sourceTree = "<group>";
@@ -177,6 +203,18 @@
children = (
FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */,
0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */,
+ B94C27C06733CF98CE1B2757 /* Pods-AllTests.debug.xcconfig */,
+ 5761E98978DDDF136A58CB7E /* Pods-AllTests.release.xcconfig */,
+ E1486220285AF123EB124008 /* Pods-InteropTestsLocalCleartext.debug.xcconfig */,
+ 51A275E86C141416ED63FF76 /* Pods-InteropTestsLocalCleartext.release.xcconfig */,
+ 553BBBED24E4162D1F769D65 /* Pods-InteropTestsLocalSSL.debug.xcconfig */,
+ 7A2E97E3F469CC2A758D77DE /* Pods-InteropTestsLocalSSL.release.xcconfig */,
+ DC3CA1D948F068E76957A861 /* Pods-InteropTestsRemote.debug.xcconfig */,
+ E4275A759BDBDF143B9B438F /* Pods-InteropTestsRemote.release.xcconfig */,
+ 07D10A965323BEA7FE59A74B /* Pods-RxLibraryUnitTests.debug.xcconfig */,
+ 3B0861FC805389C52DB260D4 /* Pods-RxLibraryUnitTests.release.xcconfig */,
+ 060EF32D7EC0DF67ED617507 /* Pods-Tests.debug.xcconfig */,
+ E6733B838B28453434B556E2 /* Pods-Tests.release.xcconfig */,
);
name = Pods;
sourceTree = "<group>";
@@ -236,12 +274,12 @@
isa = PBXNativeTarget;
buildConfigurationList = 63423F4D1B150A5F006CF63C /* Build configuration list for PBXNativeTarget "AllTests" */;
buildPhases = (
- 914ADDD7106BA9BB8A7E569F /* Check Pods Manifest.lock */,
+ 914ADDD7106BA9BB8A7E569F /* 📦 Check Pods Manifest.lock */,
63423F401B150A5F006CF63C /* Sources */,
63423F411B150A5F006CF63C /* Frameworks */,
63423F421B150A5F006CF63C /* Resources */,
- A441F71824DCB9D0CA297748 /* Copy Pods Resources */,
- 5F14F59509E10C2852014F9E /* Embed Pods Frameworks */,
+ A441F71824DCB9D0CA297748 /* 📦 Copy Pods Resources */,
+ 5F14F59509E10C2852014F9E /* 📦 Embed Pods Frameworks */,
);
buildRules = (
);
@@ -257,9 +295,11 @@
isa = PBXNativeTarget;
buildConfigurationList = 635697DB1B14FC11007A7283 /* Build configuration list for PBXNativeTarget "Tests" */;
buildPhases = (
+ 796680C7599CB4ED736DD62A /* 📦 Check Pods Manifest.lock */,
635697C31B14FC11007A7283 /* Sources */,
635697C41B14FC11007A7283 /* Frameworks */,
635697C51B14FC11007A7283 /* CopyFiles */,
+ AEEBFC914CBAEE347382E8C4 /* 📦 Copy Pods Resources */,
);
buildRules = (
);
@@ -274,12 +314,12 @@
isa = PBXNativeTarget;
buildConfigurationList = 63DC841B1BE15179000708E8 /* Build configuration list for PBXNativeTarget "RxLibraryUnitTests" */;
buildPhases = (
- B2986CEEE8CDD4901C97598B /* Check Pods Manifest.lock */,
+ B2986CEEE8CDD4901C97598B /* 📦 Check Pods Manifest.lock */,
63DC840F1BE15179000708E8 /* Sources */,
63DC84101BE15179000708E8 /* Frameworks */,
63DC84111BE15179000708E8 /* Resources */,
- 4F5690DC0E6AD6663FE78B8B /* Embed Pods Frameworks */,
- C977426A8727267BBAC7D48E /* Copy Pods Resources */,
+ 4F5690DC0E6AD6663FE78B8B /* 📦 Embed Pods Frameworks */,
+ C977426A8727267BBAC7D48E /* 📦 Copy Pods Resources */,
);
buildRules = (
);
@@ -295,12 +335,12 @@
isa = PBXNativeTarget;
buildConfigurationList = 63DC842B1BE15267000708E8 /* Build configuration list for PBXNativeTarget "InteropTestsRemote" */;
buildPhases = (
- 4C406327D3907A5E5FBA8AC9 /* Check Pods Manifest.lock */,
+ 4C406327D3907A5E5FBA8AC9 /* 📦 Check Pods Manifest.lock */,
63DC841F1BE15267000708E8 /* Sources */,
63DC84201BE15267000708E8 /* Frameworks */,
63DC84211BE15267000708E8 /* Resources */,
- 900B6EDD4D16BE7D765C3885 /* Embed Pods Frameworks */,
- C2E09DC4BD239F71160F0CC1 /* Copy Pods Resources */,
+ 900B6EDD4D16BE7D765C3885 /* 📦 Embed Pods Frameworks */,
+ C2E09DC4BD239F71160F0CC1 /* 📦 Copy Pods Resources */,
);
buildRules = (
);
@@ -316,12 +356,12 @@
isa = PBXNativeTarget;
buildConfigurationList = 63DC843C1BE15294000708E8 /* Build configuration list for PBXNativeTarget "InteropTestsLocalSSL" */;
buildPhases = (
- 5C20DCCB71C3991E6FE78C22 /* Check Pods Manifest.lock */,
+ 5C20DCCB71C3991E6FE78C22 /* 📦 Check Pods Manifest.lock */,
63DC84301BE15294000708E8 /* Sources */,
63DC84311BE15294000708E8 /* Frameworks */,
63DC84321BE15294000708E8 /* Resources */,
- C591129ACE9F6CC5EE03FCDE /* Embed Pods Frameworks */,
- 693DD0B453431D64EA24FD66 /* Copy Pods Resources */,
+ C591129ACE9F6CC5EE03FCDE /* 📦 Embed Pods Frameworks */,
+ 693DD0B453431D64EA24FD66 /* 📦 Copy Pods Resources */,
);
buildRules = (
);
@@ -337,12 +377,12 @@
isa = PBXNativeTarget;
buildConfigurationList = 63DC844B1BE152B5000708E8 /* Build configuration list for PBXNativeTarget "InteropTestsLocalCleartext" */;
buildPhases = (
- 7418AC7B3844B29E48D24FC7 /* Check Pods Manifest.lock */,
+ 7418AC7B3844B29E48D24FC7 /* 📦 Check Pods Manifest.lock */,
63DC843F1BE152B5000708E8 /* Sources */,
63DC84401BE152B5000708E8 /* Frameworks */,
63DC84411BE152B5000708E8 /* Resources */,
- A8E3AC66DF770B774114A30E /* Embed Pods Frameworks */,
- 8AD3130D3C58A0FB32FF2A36 /* Copy Pods Resources */,
+ A8E3AC66DF770B774114A30E /* 📦 Embed Pods Frameworks */,
+ 8AD3130D3C58A0FB32FF2A36 /* 📦 Copy Pods Resources */,
);
buildRules = (
);
@@ -446,14 +486,14 @@
/* End PBXResourcesBuildPhase section */
/* Begin PBXShellScriptBuildPhase section */
- 4C406327D3907A5E5FBA8AC9 /* Check Pods Manifest.lock */ = {
+ 4C406327D3907A5E5FBA8AC9 /* 📦 Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Check Pods Manifest.lock";
+ name = "📦 Check Pods Manifest.lock";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
@@ -461,29 +501,29 @@
shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
showEnvVarsInLog = 0;
};
- 4F5690DC0E6AD6663FE78B8B /* Embed Pods Frameworks */ = {
+ 4F5690DC0E6AD6663FE78B8B /* 📦 Embed Pods Frameworks */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Embed Pods Frameworks";
+ name = "📦 Embed Pods Frameworks";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests-frameworks.sh\"\n";
showEnvVarsInLog = 0;
};
- 5C20DCCB71C3991E6FE78C22 /* Check Pods Manifest.lock */ = {
+ 5C20DCCB71C3991E6FE78C22 /* 📦 Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Check Pods Manifest.lock";
+ name = "📦 Check Pods Manifest.lock";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
@@ -491,44 +531,44 @@
shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
showEnvVarsInLog = 0;
};
- 5F14F59509E10C2852014F9E /* Embed Pods Frameworks */ = {
+ 5F14F59509E10C2852014F9E /* 📦 Embed Pods Frameworks */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Embed Pods Frameworks";
+ name = "📦 Embed Pods Frameworks";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-AllTests/Pods-AllTests-frameworks.sh\"\n";
showEnvVarsInLog = 0;
};
- 693DD0B453431D64EA24FD66 /* Copy Pods Resources */ = {
+ 693DD0B453431D64EA24FD66 /* 📦 Copy Pods Resources */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Copy Pods Resources";
+ name = "📦 Copy Pods Resources";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL-resources.sh\"\n";
showEnvVarsInLog = 0;
};
- 7418AC7B3844B29E48D24FC7 /* Check Pods Manifest.lock */ = {
+ 7418AC7B3844B29E48D24FC7 /* 📦 Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Check Pods Manifest.lock";
+ name = "📦 Check Pods Manifest.lock";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
@@ -536,44 +576,59 @@
shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
showEnvVarsInLog = 0;
};
- 8AD3130D3C58A0FB32FF2A36 /* Copy Pods Resources */ = {
+ 796680C7599CB4ED736DD62A /* 📦 Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Copy Pods Resources";
+ name = "📦 Check Pods Manifest.lock";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+ shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
+ showEnvVarsInLog = 0;
+ };
+ 8AD3130D3C58A0FB32FF2A36 /* 📦 Copy Pods Resources */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ );
+ name = "📦 Copy Pods Resources";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /bin/sh;
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext-resources.sh\"\n";
showEnvVarsInLog = 0;
};
- 900B6EDD4D16BE7D765C3885 /* Embed Pods Frameworks */ = {
+ 900B6EDD4D16BE7D765C3885 /* 📦 Embed Pods Frameworks */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Embed Pods Frameworks";
+ name = "📦 Embed Pods Frameworks";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote-frameworks.sh\"\n";
showEnvVarsInLog = 0;
};
- 914ADDD7106BA9BB8A7E569F /* Check Pods Manifest.lock */ = {
+ 914ADDD7106BA9BB8A7E569F /* 📦 Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Check Pods Manifest.lock";
+ name = "📦 Check Pods Manifest.lock";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
@@ -581,44 +636,59 @@
shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
showEnvVarsInLog = 0;
};
- A441F71824DCB9D0CA297748 /* Copy Pods Resources */ = {
+ A441F71824DCB9D0CA297748 /* 📦 Copy Pods Resources */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ );
+ name = "📦 Copy Pods Resources";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /bin/sh;
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-AllTests/Pods-AllTests-resources.sh\"\n";
+ showEnvVarsInLog = 0;
+ };
+ A8E3AC66DF770B774114A30E /* 📦 Embed Pods Frameworks */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Copy Pods Resources";
+ name = "📦 Embed Pods Frameworks";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext-frameworks.sh\"\n";
showEnvVarsInLog = 0;
};
- A8E3AC66DF770B774114A30E /* Embed Pods Frameworks */ = {
+ AEEBFC914CBAEE347382E8C4 /* 📦 Copy Pods Resources */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Embed Pods Frameworks";
+ name = "📦 Copy Pods Resources";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-Tests/Pods-Tests-resources.sh\"\n";
showEnvVarsInLog = 0;
};
- B2986CEEE8CDD4901C97598B /* Check Pods Manifest.lock */ = {
+ B2986CEEE8CDD4901C97598B /* 📦 Check Pods Manifest.lock */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Check Pods Manifest.lock";
+ name = "📦 Check Pods Manifest.lock";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
@@ -626,49 +696,49 @@
shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
showEnvVarsInLog = 0;
};
- C2E09DC4BD239F71160F0CC1 /* Copy Pods Resources */ = {
+ C2E09DC4BD239F71160F0CC1 /* 📦 Copy Pods Resources */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Copy Pods Resources";
+ name = "📦 Copy Pods Resources";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote-resources.sh\"\n";
showEnvVarsInLog = 0;
};
- C591129ACE9F6CC5EE03FCDE /* Embed Pods Frameworks */ = {
+ C591129ACE9F6CC5EE03FCDE /* 📦 Embed Pods Frameworks */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Embed Pods Frameworks";
+ name = "📦 Embed Pods Frameworks";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-frameworks.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL-frameworks.sh\"\n";
showEnvVarsInLog = 0;
};
- C977426A8727267BBAC7D48E /* Copy Pods Resources */ = {
+ C977426A8727267BBAC7D48E /* 📦 Copy Pods Resources */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputPaths = (
);
- name = "Copy Pods Resources";
+ name = "📦 Copy Pods Resources";
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods/Pods-resources.sh\"\n";
+ shellScript = "\"${SRCROOT}/Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests-resources.sh\"\n";
showEnvVarsInLog = 0;
};
/* End PBXShellScriptBuildPhase section */
@@ -764,7 +834,7 @@
/* Begin XCBuildConfiguration section */
63423F4E1B150A5F006CF63C /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */;
+ baseConfigurationReference = B94C27C06733CF98CE1B2757 /* Pods-AllTests.debug.xcconfig */;
buildSettings = {
FRAMEWORK_SEARCH_PATHS = (
"$(SDKROOT)/Developer/Library/Frameworks",
@@ -782,7 +852,7 @@
};
63423F4F1B150A5F006CF63C /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */;
+ baseConfigurationReference = 5761E98978DDDF136A58CB7E /* Pods-AllTests.release.xcconfig */;
buildSettings = {
FRAMEWORK_SEARCH_PATHS = (
"$(SDKROOT)/Developer/Library/Frameworks",
@@ -874,6 +944,7 @@
};
635697DC1B14FC11007A7283 /* Debug */ = {
isa = XCBuildConfiguration;
+ baseConfigurationReference = 060EF32D7EC0DF67ED617507 /* Pods-Tests.debug.xcconfig */;
buildSettings = {
PRODUCT_NAME = "$(TARGET_NAME)";
SKIP_INSTALL = YES;
@@ -882,6 +953,7 @@
};
635697DD1B14FC11007A7283 /* Release */ = {
isa = XCBuildConfiguration;
+ baseConfigurationReference = E6733B838B28453434B556E2 /* Pods-Tests.release.xcconfig */;
buildSettings = {
PRODUCT_NAME = "$(TARGET_NAME)";
SKIP_INSTALL = YES;
@@ -890,7 +962,7 @@
};
63DC841C1BE15179000708E8 /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */;
+ baseConfigurationReference = 07D10A965323BEA7FE59A74B /* Pods-RxLibraryUnitTests.debug.xcconfig */;
buildSettings = {
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_TESTABILITY = YES;
@@ -904,7 +976,7 @@
};
63DC841D1BE15179000708E8 /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */;
+ baseConfigurationReference = 3B0861FC805389C52DB260D4 /* Pods-RxLibraryUnitTests.release.xcconfig */;
buildSettings = {
INFOPLIST_FILE = Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 9.0;
@@ -916,7 +988,7 @@
};
63DC842C1BE15267000708E8 /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */;
+ baseConfigurationReference = DC3CA1D948F068E76957A861 /* Pods-InteropTestsRemote.debug.xcconfig */;
buildSettings = {
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_TESTABILITY = YES;
@@ -930,7 +1002,7 @@
};
63DC842D1BE15267000708E8 /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */;
+ baseConfigurationReference = E4275A759BDBDF143B9B438F /* Pods-InteropTestsRemote.release.xcconfig */;
buildSettings = {
INFOPLIST_FILE = Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 9.0;
@@ -942,7 +1014,7 @@
};
63DC843D1BE15294000708E8 /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */;
+ baseConfigurationReference = 553BBBED24E4162D1F769D65 /* Pods-InteropTestsLocalSSL.debug.xcconfig */;
buildSettings = {
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_TESTABILITY = YES;
@@ -956,7 +1028,7 @@
};
63DC843E1BE15294000708E8 /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */;
+ baseConfigurationReference = 7A2E97E3F469CC2A758D77DE /* Pods-InteropTestsLocalSSL.release.xcconfig */;
buildSettings = {
INFOPLIST_FILE = Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 9.0;
@@ -968,7 +1040,7 @@
};
63DC844C1BE152B5000708E8 /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = FF7B5489BCFE40111D768DD0 /* Pods.debug.xcconfig */;
+ baseConfigurationReference = E1486220285AF123EB124008 /* Pods-InteropTestsLocalCleartext.debug.xcconfig */;
buildSettings = {
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_TESTABILITY = YES;
@@ -982,7 +1054,7 @@
};
63DC844D1BE152B5000708E8 /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 0A4F89D9C90E9C30990218F0 /* Pods.release.xcconfig */;
+ baseConfigurationReference = 51A275E86C141416ED63FF76 /* Pods-InteropTestsLocalCleartext.release.xcconfig */;
buildSettings = {
INFOPLIST_FILE = Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 9.0;
diff --git a/src/objective-c/tests/build_tests.sh b/src/objective-c/tests/build_tests.sh
index e7ad31e403..8547bfd3a8 100755
--- a/src/objective-c/tests/build_tests.sh
+++ b/src/objective-c/tests/build_tests.sh
@@ -33,6 +33,9 @@
set -e
+# CocoaPods requires the terminal to be using UTF-8 encoding.
+export LANG=en_US.UTF-8
+
cd $(dirname $0)
hash pod 2>/dev/null || { echo >&2 "Cocoapods needs to be installed."; exit 1; }
diff --git a/src/python/grpcio/grpc/_adapter/_implementations.py b/src/php/bin/stress_client.sh
index b85f228bf6..8c49b7aa4b 100644..100755
--- a/src/python/grpcio/grpc/_adapter/_implementations.py
+++ b/src/php/bin/stress_client.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
@@ -27,22 +28,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import collections
-
-from grpc.beta import interfaces
-
-class AuthMetadataContext(collections.namedtuple(
- 'AuthMetadataContext', [
- 'service_url',
- 'method_name'
- ]), interfaces.GRPCAuthMetadataContext):
- pass
-
-
-class AuthMetadataPluginCallback(interfaces.GRPCAuthMetadataContext):
-
- def __init__(self, callback):
- self._callback = callback
-
- def __call__(self, metadata, error):
- self._callback(metadata, error)
+set -e
+cd $(dirname $0)
+source ./determine_extension_dir.sh
+php $extension_dir -d max_execution_time=300 \
+ ../tests/interop/stress_client.php $@ 1>&2
diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c
index 884130e7d4..0ec502262d 100644
--- a/src/php/ext/grpc/call.c
+++ b/src/php/ext/grpc/call.c
@@ -292,6 +292,7 @@ PHP_METHOD(Call, startBatch) {
grpc_metadata_array_init(&recv_trailing_metadata);
MAKE_STD_ZVAL(result);
object_init(result);
+ memset(ops, 0, sizeof(ops));
/* "a" == 1 array */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) ==
FAILURE) {
diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c
index 762c01385c..f4cb5b28cc 100644
--- a/src/php/ext/grpc/php_grpc.c
+++ b/src/php/ext/grpc/php_grpc.c
@@ -227,7 +227,7 @@ PHP_MINIT_FUNCTION(grpc) {
GRPC_CHANNEL_TRANSIENT_FAILURE,
CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("Grpc\\CHANNEL_FATAL_FAILURE",
- GRPC_CHANNEL_FATAL_FAILURE,
+ GRPC_CHANNEL_SHUTDOWN,
CONST_CS | CONST_PERSISTENT);
grpc_init_call(TSRMLS_C);
diff --git a/src/php/lib/Grpc/AbstractCall.php b/src/php/lib/Grpc/AbstractCall.php
index 712af91eb2..c86d298805 100644
--- a/src/php/lib/Grpc/AbstractCall.php
+++ b/src/php/lib/Grpc/AbstractCall.php
@@ -39,6 +39,7 @@ abstract class AbstractCall
protected $call;
protected $deserialize;
protected $metadata;
+ protected $trailing_metadata;
/**
* Create a new Call wrapper object.
@@ -66,6 +67,7 @@ abstract class AbstractCall
$this->call = new Call($channel, $method, $deadline);
$this->deserialize = $deserialize;
$this->metadata = null;
+ $this->trailing_metadata = null;
if (isset($options['call_credentials_callback']) &&
is_callable($call_credentials_callback =
$options['call_credentials_callback'])) {
@@ -84,6 +86,14 @@ abstract class AbstractCall
}
/**
+ * @return The trailing metadata sent by the server.
+ */
+ public function getTrailingMetadata()
+ {
+ return $this->trailing_metadata;
+ }
+
+ /**
* @return string The URI of the endpoint.
*/
public function getPeer()
diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php
index 2de1b337e5..70644fac87 100755
--- a/src/php/lib/Grpc/BaseStub.php
+++ b/src/php/lib/Grpc/BaseStub.php
@@ -52,8 +52,9 @@ class BaseStub
* - 'update_metadata': (optional) a callback function which takes in a
* metadata array, and returns an updated metadata array
* - 'grpc.primary_user_agent': (optional) a user-agent string
+ * @param $channel Channel An already created Channel object
*/
- public function __construct($hostname, $opts)
+ public function __construct($hostname, $opts, $channel = null)
{
$this->hostname = $hostname;
$this->update_metadata = null;
@@ -77,7 +78,15 @@ class BaseStub
'required. Please see one of the '.
'ChannelCredentials::create methods');
}
- $this->channel = new Channel($hostname, $opts);
+ if ($channel) {
+ if (!is_a($channel, 'Channel')) {
+ throw new \Exception("The channel argument is not a".
+ "Channel object");
+ }
+ $this->channel = $channel;
+ } else {
+ $this->channel = new Channel($hostname, $opts);
+ }
}
/**
diff --git a/src/php/lib/Grpc/BidiStreamingCall.php b/src/php/lib/Grpc/BidiStreamingCall.php
index bf813c12e7..95e51c5088 100644
--- a/src/php/lib/Grpc/BidiStreamingCall.php
+++ b/src/php/lib/Grpc/BidiStreamingCall.php
@@ -112,6 +112,7 @@ class BidiStreamingCall extends AbstractCall
OP_RECV_STATUS_ON_CLIENT => true,
]);
+ $this->trailing_metadata = $status_event->status->metadata;
return $status_event->status;
}
}
diff --git a/src/php/lib/Grpc/ClientStreamingCall.php b/src/php/lib/Grpc/ClientStreamingCall.php
index 500cfe0d7a..315a406735 100644
--- a/src/php/lib/Grpc/ClientStreamingCall.php
+++ b/src/php/lib/Grpc/ClientStreamingCall.php
@@ -86,6 +86,8 @@ class ClientStreamingCall extends AbstractCall
]);
$this->metadata = $event->metadata;
- return [$this->deserializeResponse($event->message), $event->status];
+ $status = $event->status;
+ $this->trailing_metadata = $status->metadata;
+ return [$this->deserializeResponse($event->message), $status];
}
}
diff --git a/src/php/lib/Grpc/ServerStreamingCall.php b/src/php/lib/Grpc/ServerStreamingCall.php
index da48523717..53599fe4ae 100644
--- a/src/php/lib/Grpc/ServerStreamingCall.php
+++ b/src/php/lib/Grpc/ServerStreamingCall.php
@@ -91,6 +91,7 @@ class ServerStreamingCall extends AbstractCall
OP_RECV_STATUS_ON_CLIENT => true,
]);
+ $this->trailing_metadata = $status_event->status->metadata;
return $status_event->status;
}
}
diff --git a/src/php/lib/Grpc/UnaryCall.php b/src/php/lib/Grpc/UnaryCall.php
index b57903d6d0..b114b771b8 100644
--- a/src/php/lib/Grpc/UnaryCall.php
+++ b/src/php/lib/Grpc/UnaryCall.php
@@ -75,6 +75,8 @@ class UnaryCall extends AbstractCall
OP_RECV_STATUS_ON_CLIENT => true,
]);
- return [$this->deserializeResponse($event->message), $event->status];
+ $status = $event->status;
+ $this->trailing_metadata = $status->metadata;
+ return [$this->deserializeResponse($event->message), $status];
}
}
diff --git a/src/php/tests/interop/interop_client.php b/src/php/tests/interop/interop_client.php
index 565bfce74f..43b3199d92 100755
--- a/src/php/tests/interop/interop_client.php
+++ b/src/php/tests/interop/interop_client.php
@@ -388,6 +388,103 @@ function timeoutOnSleepingServer($stub)
'Call status was not DEADLINE_EXCEEDED');
}
+function customMetadata($stub)
+{
+ $ECHO_INITIAL_KEY = 'x-grpc-test-echo-initial';
+ $ECHO_INITIAL_VALUE = 'test_initial_metadata_value';
+ $ECHO_TRAILING_KEY = 'x-grpc-test-echo-trailing-bin';
+ $ECHO_TRAILING_VALUE = 'ababab';
+ $request_len = 271828;
+ $response_len = 314159;
+
+ $request = new grpc\testing\SimpleRequest();
+ $request->setResponseType(grpc\testing\PayloadType::COMPRESSABLE);
+ $request->setResponseSize($response_len);
+ $payload = new grpc\testing\Payload();
+ $payload->setType(grpc\testing\PayloadType::COMPRESSABLE);
+ $payload->setBody(str_repeat("\0", $request_len));
+ $request->setPayload($payload);
+
+ $metadata = [
+ $ECHO_INITIAL_KEY => [$ECHO_INITIAL_VALUE],
+ $ECHO_TRAILING_KEY => [$ECHO_TRAILING_VALUE],
+ ];
+ $call = $stub->UnaryCall($request, $metadata);
+
+ $initial_metadata = $call->getMetadata();
+ hardAssert(array_key_exists($ECHO_INITIAL_KEY, $initial_metadata),
+ 'Initial metadata does not contain expected key');
+ hardAssert($initial_metadata[$ECHO_INITIAL_KEY][0] ==
+ $ECHO_INITIAL_VALUE,
+ 'Incorrect initial metadata value');
+
+ list($result, $status) = $call->wait();
+ hardAssert($status->code === Grpc\STATUS_OK,
+ 'Call did not complete successfully');
+
+ $trailing_metadata = $call->getTrailingMetadata();
+ hardAssert(array_key_exists($ECHO_TRAILING_KEY, $trailing_metadata),
+ 'Trailing metadata does not contain expected key');
+ hardAssert($trailing_metadata[$ECHO_TRAILING_KEY][0] ==
+ $ECHO_TRAILING_VALUE, 'Incorrect trailing metadata value');
+
+ $streaming_call = $stub->FullDuplexCall($metadata);
+
+ $streaming_request = new grpc\testing\StreamingOutputCallRequest();
+ $streaming_request->setPayload($payload);
+ $streaming_call->write($streaming_request);
+ $streaming_call->writesDone();
+
+ hardAssert($streaming_call->getStatus()->code === Grpc\STATUS_OK,
+ 'Call did not complete successfully');
+
+ $streaming_trailing_metadata = $streaming_call->getTrailingMetadata();
+ hardAssert(array_key_exists($ECHO_TRAILING_KEY,
+ $streaming_trailing_metadata),
+ 'Trailing metadata does not contain expected key');
+ hardAssert($streaming_trailing_metadata[$ECHO_TRAILING_KEY][0] ==
+ $ECHO_TRAILING_VALUE, 'Incorrect trailing metadata value');
+}
+
+function statusCodeAndMessage($stub)
+{
+ $echo_status = new grpc\testing\EchoStatus();
+ $echo_status->setCode(2);
+ $echo_status->setMessage("test status message");
+
+ $request = new grpc\testing\SimpleRequest();
+ $request->setResponseStatus($echo_status);
+
+ $call = $stub->UnaryCall($request);
+ list($result, $status) = $call->wait();
+
+ hardAssert($status->code === 2,
+ 'Received unexpected status code');
+ hardAssert($status->details === "test status message",
+ 'Received unexpected status details');
+
+ $streaming_call = $stub->FullDuplexCall();
+
+ $streaming_request = new grpc\testing\StreamingOutputCallRequest();
+ $streaming_request->setResponseStatus($echo_status);
+ $streaming_call->write($streaming_request);
+ $streaming_call->writesDone();
+
+ $status = $streaming_call->getStatus();
+ hardAssert($status->code === 2,
+ 'Received unexpected status code');
+ hardAssert($status->details === "test status message",
+ 'Received unexpected status details');
+}
+
+function unimplementedMethod($stub)
+{
+ $call = $stub->UnimplementedCall(new grpc\testing\EmptyMessage());
+ list($result, $status) = $call->wait();
+ hardAssert($status->code === Grpc\STATUS_UNIMPLEMENTED,
+ 'Received unexpected status code');
+}
+
function _makeStub($args)
{
if (!array_key_exists('server_host', $args)) {
@@ -472,12 +569,17 @@ function _makeStub($args)
$opts['update_metadata'] = $update_metadata;
}
- $stub = new grpc\testing\TestServiceClient($server_address, $opts);
+ if ($test_case == 'unimplemented_method') {
+ $stub = new grpc\testing\UnimplementedServiceClient($server_address, $opts);
+ } else {
+ $stub = new grpc\testing\TestServiceClient($server_address, $opts);
+ }
return $stub;
}
-function interop_main($args, $stub = false) {
+function interop_main($args, $stub = false)
+{
if (!$stub) {
$stub = _makeStub($args);
}
@@ -513,6 +615,15 @@ function interop_main($args, $stub = false) {
case 'timeout_on_sleeping_server':
timeoutOnSleepingServer($stub);
break;
+ case 'custom_metadata':
+ customMetadata($stub);
+ break;
+ case 'status_code_and_message':
+ statusCodeAndMessage($stub);
+ break;
+ case 'unimplemented_method':
+ unimplementedMethod($stub);
+ break;
case 'service_account_creds':
serviceAccountCreds($stub, $args);
break;
diff --git a/src/php/tests/interop/messages.proto b/src/php/tests/interop/messages.proto
index de0b1a2320..44e3c3b8f9 100644
--- a/src/php/tests/interop/messages.proto
+++ b/src/php/tests/interop/messages.proto
@@ -1,5 +1,5 @@
-// Copyright 2015, Google Inc.
+// Copyright 2015-2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -41,9 +41,6 @@ enum PayloadType {
// Uncompressable binary format.
UNCOMPRESSABLE = 1;
-
- // Randomly chosen from all other formats defined in this enum.
- RANDOM = 2;
}
// A block of data, to simply increase gRPC message size.
@@ -54,6 +51,13 @@ message Payload {
optional bytes body = 2;
}
+// A protobuf representation for grpc status. This is used by test
+// clients to specify a status that the server should attempt to return.
+message EchoStatus {
+ optional int32 code = 1;
+ optional string message = 2;
+}
+
// Unary request.
message SimpleRequest {
// Desired payload type in the response from the server.
@@ -72,6 +76,12 @@ message SimpleRequest {
// Whether SimpleResponse should include OAuth scope.
optional bool fill_oauth_scope = 5;
+
+ // Whether to request the server to compress the response.
+ optional bool request_compressed_response = 6;
+
+ // Whether server should return a given status
+ optional EchoStatus response_status = 7;
}
// Unary response, as configured by the request.
@@ -123,6 +133,12 @@ message StreamingOutputCallRequest {
// Optional input payload sent along with the request.
optional Payload payload = 3;
+
+ // Whether to request the server to compress the response.
+ optional bool request_compressed_response = 6;
+
+ // Whether server should return a given status
+ optional EchoStatus response_status = 7;
}
// Server-streaming response, as configured by the request and parameters.
@@ -130,3 +146,17 @@ message StreamingOutputCallResponse {
// Payload to increase response size.
optional Payload payload = 1;
}
+
+// For reconnect interop test only.
+// Client tells server what reconnection parameters it used.
+message ReconnectParams {
+ optional int32 max_reconnect_backoff_ms = 1;
+}
+
+// For reconnect interop test only.
+// Server tells client whether its reconnects are following the spec and the
+// reconnect backoffs it saw.
+message ReconnectInfo {
+ optional bool passed = 1;
+ repeated int32 backoff_ms = 2;
+}
diff --git a/src/php/tests/interop/metrics_client.php b/src/php/tests/interop/metrics_client.php
index 46f4212f77..19510dc5d8 100644
--- a/src/php/tests/interop/metrics_client.php
+++ b/src/php/tests/interop/metrics_client.php
@@ -39,11 +39,11 @@ $server_port = (count($parts) == 2) ? $parts[1] : '';
$socket = socket_create(AF_INET, SOCK_STREAM, 0);
if (@!socket_connect($socket, $server_host, $server_port)) {
- echo "Cannot connect to merics server...\n";
- exit(1);
+ echo "Cannot connect to merics server...\n";
+ exit(1);
}
socket_write($socket, 'qps');
while ($out = socket_read($socket, 1024)) {
- echo "$out\n";
+ echo "$out\n";
}
socket_close($socket);
diff --git a/src/php/tests/interop/stress_client.php b/src/php/tests/interop/stress_client.php
index 2339f0d105..f9cfe8aba5 100644
--- a/src/php/tests/interop/stress_client.php
+++ b/src/php/tests/interop/stress_client.php
@@ -32,50 +32,52 @@
*
*/
-include_once('interop_client.php');
+include_once 'interop_client.php';
-function stress_main($args) {
- mt_srand();
- set_time_limit(0);
+function stress_main($args)
+{
+ mt_srand();
+ set_time_limit(0);
- // open socket to listen as metrics server
- $socket = socket_create(AF_INET, SOCK_STREAM, 0);
- socket_set_option($socket, SOL_SOCKET, SO_REUSEADDR, 1);
- if (@!socket_bind($socket, 'localhost', $args['metrics_port'])) {
- echo "Cannot create socket for metrics server...\n";
- exit(1);
- }
- socket_listen($socket);
- socket_set_nonblock($socket);
+ // open socket to listen as metrics server
+ $socket = socket_create(AF_INET, SOCK_STREAM, 0);
+ socket_set_option($socket, SOL_SOCKET, SO_REUSEADDR, 1);
+ if (@!socket_bind($socket, 'localhost', $args['metrics_port'])) {
+ echo "Cannot create socket for metrics server...\n";
+ exit(1);
+ }
+ socket_listen($socket);
+ socket_set_nonblock($socket);
- $start_time = microtime(true);
- $count = 0;
- $deadline = $args['test_duration_secs'] ?
- ($start_time + $args['test_duration_secs']) : false;
- $num_test_cases = count($args['test_cases']);
- $stub = false;
+ $start_time = microtime(true);
+ $count = 0;
+ $deadline = $args['test_duration_secs'] ?
+ ($start_time + $args['test_duration_secs']) : false;
+ $num_test_cases = count($args['test_cases']);
+ $stub = false;
- while (true) {
- $current_time = microtime(true);
- if ($deadline && $current_time > $deadline) {
- break;
- }
- if ($client_connection = socket_accept($socket)) {
- // there is an incoming request, respond with qps metrics
- $input = socket_read($client_connection, 1024);
- $qps = round($count / ($current_time - $start_time));
- socket_write($client_connection, "qps: $qps");
- socket_close($client_connection);
- } else {
- // do actual work, run one interop test case
- $args['test_case'] =
- $args['test_cases'][mt_rand(0, $num_test_cases - 1)];
- $stub = @interop_main($args, $stub);
- $count++;
+ while (true) {
+ $current_time = microtime(true);
+ if ($deadline && $current_time > $deadline) {
+ break;
+ }
+ if ($client_connection = socket_accept($socket)) {
+ // there is an incoming request, respond with qps metrics
+ $input = socket_read($client_connection, 1024);
+ $qps = round($count / ($current_time - $start_time));
+ socket_write($client_connection, "qps: $qps");
+ socket_close($client_connection);
+ } else {
+ // do actual work, run one interop test case
+ $args['test_case'] =
+ $args['test_cases'][mt_rand(0, $num_test_cases - 1)];
+ $stub = @interop_main($args, $stub);
+ ++$count;
+ }
}
- }
- socket_close($socket);
- echo "Number of interop tests run in $args[test_duration_secs] seconds: $count.\n";
+ socket_close($socket);
+ echo "Number of interop tests run in $args[test_duration_secs] ".
+ "seconds: $count.\n";
}
// process command line arguments
@@ -85,31 +87,32 @@ $raw_args = getopt('',
'metrics_port::',
'test_duration_secs::',
'num_channels_per_server::',
- 'num_stubs_per_channel::']);
+ 'num_stubs_per_channel::',
+ ]);
$args = [];
if (empty($raw_args['server_addresses'])) {
- $args['server_host'] = 'localhost';
- $args['server_port'] = '8080';
+ $args['server_host'] = 'localhost';
+ $args['server_port'] = '8080';
} else {
- $parts = explode(':', $raw_args['server_addresses']);
- $args['server_host'] = $parts[0];
- $args['server_port'] = (count($parts) == 2) ? $parts[1] : '';
+ $parts = explode(':', $raw_args['server_addresses']);
+ $args['server_host'] = $parts[0];
+ $args['server_port'] = (count($parts) == 2) ? $parts[1] : '';
}
$args['metrics_port'] = empty($raw_args['metrics_port']) ?
- '8081' : $args['metrics_port'];
+ '8081' : $raw_args['metrics_port'];
$args['test_duration_secs'] = empty($raw_args['test_duration_secs']) ||
- $raw_args['test_duration_secs'] == -1 ?
- false : $raw_args['test_duration_secs'];
+ $raw_args['test_duration_secs'] == -1 ?
+ false : $raw_args['test_duration_secs'];
$test_cases = [];
$test_case_strs = explode(',', $raw_args['test_cases']);
foreach ($test_case_strs as $test_case_str) {
- $parts = explode(':', $test_case_str);
- $test_cases = array_merge($test_cases, array_fill(0, $parts[1], $parts[0]));
+ $parts = explode(':', $test_case_str);
+ $test_cases = array_merge($test_cases, array_fill(0, $parts[1], $parts[0]));
}
$args['test_cases'] = $test_cases;
diff --git a/src/php/tests/interop/test.proto b/src/php/tests/interop/test.proto
index 0d169e7f64..57ef30ee1c 100644
--- a/src/php/tests/interop/test.proto
+++ b/src/php/tests/interop/test.proto
@@ -1,5 +1,5 @@
-// Copyright 2015, Google Inc.
+// Copyright 2015-2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -69,3 +69,10 @@ service TestService {
rpc HalfDuplexCall(stream StreamingOutputCallRequest)
returns (stream StreamingOutputCallResponse);
}
+
+// A simple service NOT implemented at servers so clients can test for
+// that case.
+service UnimplementedService {
+ // A call that no server should implement
+ rpc UnimplementedCall(grpc.testing.EmptyMessage) returns (grpc.testing.EmptyMessage);
+}
diff --git a/src/proto/census/census.options b/src/proto/census/census.options
new file mode 100644
index 0000000000..a1f80395c7
--- /dev/null
+++ b/src/proto/census/census.options
@@ -0,0 +1,3 @@
+google.census.Tag.key max_size:255
+google.census.Tag.value max_size:255
+google.census.View.tag_key max_count:15
diff --git a/src/proto/census/census.proto b/src/proto/census/census.proto
new file mode 100644
index 0000000000..c869d851ff
--- /dev/null
+++ b/src/proto/census/census.proto
@@ -0,0 +1,313 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.census;
+
+// All the census protos.
+//
+// Nomenclature note: capitalized names below (like Metric) are protos.
+//
+// Census lets you define a Metric - something which can be measured, like the
+// latency of an RPC, the number of CPU cycles spent on an operation, or
+// anything else you care to measure. You can record individual instances of
+// measurements (a double value) for every metric of interest. These
+// individual measurements are aggregated together into an Aggregation. There
+// are two Aggregation types available: Distribution (describes the
+// distribution of all measurements, possibly with a histogram) and
+// IntervalStats (the count and mean of measurements across specified time
+// periods). An Aggregation is described by an AggregationDescriptor.
+//
+// You can define how your stats are broken down by Tag values and which
+// Aggregations to use through a View. The corresponding combination of
+// Metric/View/Aggregation which is available to census clients is called a
+// ViewAggregation.
+
+
+// The following two types are copied from
+// google/protobuf/{duration,timestamp}.proto. Ideally, we would be able to
+// import them, but this causes compilation issues on C-based systems
+// (e.g. https://koti.kapsi.fi/jpa/nanopb/), which cannot process the C++
+// headers generated from the standard protobuf distribution. See the relevant
+// proto files for full documentation of these types.
+
+message Duration {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
+
+message Timestamp {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
+
+// Describes a metric
+message Metric {
+ // name of metric, e.g. rpc_latency, cpu.
+ string name = 1;
+
+ // More detailed description of the metric, used in documentation.
+ string description = 2;
+
+ // Fundamental units of measurement supported by Census
+ // TODO(aveitch): expand this to include other S.I. units?
+ message BasicUnit {
+ enum Measure {
+ UNKNOWN = 0;
+ BITS = 1;
+ BYTES = 2;
+ SECS = 3;
+ CORES = 4;
+ MAX_UNITS = 5;
+ }
+ Measure type = 1;
+ }
+
+ // MeasurementUnit lets you build compound units of the form
+ // 10^n * (A * B * ...) / (X * Y * ...),
+ // where the elements in the numerator and denominator are all BasicUnits. A
+ // MeasurementUnit must have at least one BasicUnit in its numerator.
+ //
+ // To specify multiplication in the numerator or denominator, simply specify
+ // multiple numerator or denominator fields. For example:
+ //
+ // - byte-seconds (i.e. bytes * seconds):
+ // numerator: BYTES
+ // numerator: SECS
+ //
+ // - events/sec^2 (i.e. rate of change of events/sec):
+ // numerator: COUNT
+ // denominator: SECS
+ // denominator: SECS
+ //
+ // To specify multiples (in power of 10) units, specify a non-zero prefix
+ // value, for example:
+ //
+ // - MB/s (i.e. megabytes / s):
+ // prefix: 6
+ // numerator: BYTES
+ // denominator: SECS
+ //
+ // - nanoseconds
+ // prefix: -9
+ // numerator: SECS
+ message MeasurementUnit {
+ int32 prefix = 1;
+ repeated BasicUnit numerator = 2;
+ repeated BasicUnit denominator = 3;
+ }
+
+ // The units in which the Metric value is reported.
+ MeasurementUnit unit = 3;
+
+ // Metrics will be assigned an ID when registered. Invalid if <= 0.
+ int32 id = 4;
+}
+
+// An Aggregation summarizes a series of individual Metric measurements, an
+// AggregationDescriptor describes an Aggregation.
+message AggregationDescriptor {
+ // At most one set of options. If neither option is set, a default type
+ // of Distribution (without a histogram component) will be used.
+ oneof options {
+ // Defines the histogram bucket boundaries for Distributions.
+ BucketBoundaries bucket_boundaries = 1;
+ // Defines the time windows to record for IntervalStats.
+ IntervalBoundaries interval_boundaries = 2;
+ }
+
+ // A Distribution may optionally contain a histogram of the values in the
+ // population. The bucket boundaries for that histogram is described by
+ // `bucket_boundaries`.
+ //
+ // Describes histogram bucket boundaries. Defines `size(bounds) + 1` (= N)
+ // buckets (for size(bounds) >= 1; if size(bounds) == 0, then no histogram
+ // will be defined. The boundaries for bucket index i are:
+ //
+ // [-infinity, bounds[i]) for i == 0
+ // [bounds[i-1], bounds[i]) for 0 < i < N-2
+ // [bounds[i-1], +infinity) for i == N-1
+ //
+ // i.e. an underflow bucket (number 0), zero or more finite buckets (1
+ // through N - 2, and an overflow bucket (N - 1), with inclusive lower
+ // bounds and exclusive upper bounds.
+ //
+ // There must be at least one element in `bounds`. If `bounds` has only one
+ // element, there are no finite buckets, and that single element is the
+ // common boundary of the overflow and underflow buckets.
+ message BucketBoundaries {
+ // The values must be monotonically increasing.
+ repeated double bounds = 1;
+ }
+
+ // For Interval stats, describe the size of each window.
+ message IntervalBoundaries {
+ // For each time window, specify a duration in seconds.
+ repeated double window_size = 1;
+ }
+}
+
+// Distribution contains summary statistics for a population of values and,
+// optionally, a histogram representing the distribution of those values across
+// a specified set of histogram buckets, as defined in
+// Aggregation.bucket_options.
+//
+// The summary statistics are the count, mean, sum of the squared deviation from
+// the mean, the minimum, and the maximum of the set of population of values.
+//
+// Although it is not forbidden, it is generally a bad idea to include
+// non-finite values (infinities or NaNs) in the population of values, as this
+// will render the `mean` field meaningless.
+message Distribution {
+ // The number of values in the population. Must be non-negative.
+ int64 count = 1;
+
+ // The arithmetic mean of the values in the population. If `count` is zero
+ // then this field must be zero.
+ double mean = 2;
+
+ // Describes a range of population values.
+ message Range {
+ // The minimum of the population values.
+ double min = 1;
+ // The maximum of the population values.
+ double max = 2;
+ }
+
+ // The range of the population values. If `count` is zero, this field will not
+ // be defined.
+ Range range = 3;
+
+ // A Distribution may optionally contain a histogram of the values in the
+ // population. The histogram is given in `bucket_count` as counts of values
+ // that fall into one of a sequence of non-overlapping buckets, as described
+ // by `AggregationDescriptor.options.bucket_boundaries`.
+ // The sum of the values in `bucket_counts` must equal the value in `count`.
+ //
+ // Bucket counts are given in order under the numbering scheme described
+ // above (the underflow bucket has number 0; the finite buckets, if any,
+ // have numbers 1 through N-2; the overflow bucket has number N-1).
+ //
+ // The size of `bucket_count` must be no greater than N as defined in
+ // `bucket_boundaries`.
+ //
+ // Any suffix of trailing zero bucket_count fields may be omitted.
+ repeated int64 bucket_count = 4;
+}
+
+// Record summary stats over various time windows.
+message IntervalStats {
+ // Summary statistic over a single time window.
+ message Window {
+ // The window duration.
+ Duration window_size = 1;
+ // The number of measurements in this window.
+ int64 count = 2;
+ // The arithmetic mean of all measurements in the window.
+ double mean = 3;
+ }
+
+ // Full set of windows for this metric.
+ repeated Window window = 1;
+}
+
+// A Tag: key-value pair.
+message Tag {
+ string key = 1;
+ string value = 2;
+}
+
+// A View specifies an Aggregation and a set of tag keys. The Aggregation will
+// be broken down by the unique set of matching tag values for each measurement.
+message View {
+ // Name of view.
+ string name = 1;
+
+ // More detailed description, for documentation purposes.
+ string description = 2;
+
+ // ID of Metric to associate with this View.
+ int32 metric_id = 3;
+
+ // Aggregation type to associate with this View.
+ AggregationDescriptor aggregation = 4;
+
+ // Tag keys to match with a given Metric. If no keys are specified, then all
+ // stats for the Metric are recorded. Keys must be unique.
+ repeated string tag_key = 5;
+}
+
+// An Aggregation summarizes a series of individual Metric measures.
+message Aggregation {
+ // Name of this aggregation.
+ string name = 1;
+
+ // More detailed description, for documentation purposes.
+ string description = 2;
+
+ // The data for this Aggregation.
+ oneof data {
+ Distribution distribution = 3;
+ IntervalStats interval_stats = 4;
+ }
+
+ // Tags associated with this Aggregation.
+ repeated Tag tag = 5;
+}
+
+// A ViewAggregations represents all the Aggregations for a particular view.
+message ViewAggregations {
+ // Aggregations - each will have a unique set of tag values for the tag_keys
+ // associated with the corresponding View.
+ repeated Aggregation aggregation = 1;
+
+ // Start and end timestamps over which the value was accumulated. These
+ // values are not relevant/defined for IntervalStats aggregations, which are
+ // always accumulated over a fixed time period.
+ Timestamp start = 2;
+ Timestamp end = 3;
+}
diff --git a/src/proto/grpc/testing/messages.proto b/src/proto/grpc/testing/messages.proto
index a063b470c7..e1090156ab 100644
--- a/src/proto/grpc/testing/messages.proto
+++ b/src/proto/grpc/testing/messages.proto
@@ -41,17 +41,6 @@ enum PayloadType {
// Uncompressable binary format.
UNCOMPRESSABLE = 1;
-
- // Randomly chosen from all other formats defined in this enum.
- RANDOM = 2;
-}
-
-// Compression algorithms
-enum CompressionType {
- // No compression
- NONE = 0;
- GZIP = 1;
- DEFLATE = 2;
}
// A block of data, to simply increase gRPC message size.
@@ -88,8 +77,8 @@ message SimpleRequest {
// Whether SimpleResponse should include OAuth scope.
bool fill_oauth_scope = 5;
- // Compression algorithm to be used by the server for the response (stream)
- CompressionType response_compression = 6;
+ // Whether to request the server to compress the response.
+ bool request_compressed_response = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
@@ -145,8 +134,8 @@ message StreamingOutputCallRequest {
// Optional input payload sent along with the request.
Payload payload = 3;
- // Compression algorithm to be used by the server for the response (stream)
- CompressionType response_compression = 6;
+ // Whether to request the server to compress the response.
+ bool request_compressed_response = 6;
// Whether server should return a given status
EchoStatus response_status = 7;
diff --git a/src/proto/math/math.proto b/src/proto/math/math.proto
index 311e148c02..269c60bde8 100644
--- a/src/proto/math/math.proto
+++ b/src/proto/math/math.proto
@@ -55,8 +55,8 @@ message FibReply {
}
service Math {
- // Div divides args.dividend by args.divisor and returns the quotient and
- // remainder.
+ // Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
+ // and remainder.
rpc Div (DivArgs) returns (DivReply) {
}
@@ -67,7 +67,7 @@ service Math {
rpc DivMany (stream DivArgs) returns (stream DivReply) {
}
- // Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ // Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib
// generates up to limit numbers; otherwise it continues until the call is
// canceled. Unlike Fib above, Fib has no final FibReply.
rpc Fib (FibArgs) returns (stream Num) {
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 295dab2d27..3e974eba0a 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -191,7 +191,7 @@ class BuildProtoModules(setuptools.Command):
except subprocess.CalledProcessError as e:
sys.stderr.write(
'warning: Command:\n{}\nMessage:\n{}\nOutput:\n{}'.format(
- command, e.message, e.output))
+ command, str(e), e.output))
# Generated proto directories dont include __init__.py, but
# these are needed for python package resolution
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index b844a14c48..28adca3772 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -27,5 +27,1215 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""gRPC's Python API."""
+
__import__('pkg_resources').declare_namespace(__name__)
+import abc
+import enum
+
+import six
+
+from grpc._cython import cygrpc as _cygrpc
+
+
+############################## Future Interface ###############################
+
+
+class FutureTimeoutError(Exception):
+ """Indicates that a method call on a Future timed out."""
+
+
+class FutureCancelledError(Exception):
+ """Indicates that the computation underlying a Future was cancelled."""
+
+
+class Future(six.with_metaclass(abc.ABCMeta)):
+ """A representation of a computation in another control flow.
+
+ Computations represented by a Future may be yet to be begun, may be ongoing,
+ or may have already completed.
+ """
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
+
+ This method does not block.
+
+ Returns:
+ True if the computation has not yet begun, will not be allowed to take
+ place, and determination of both was possible without blocking. False
+ under all other circumstances including but not limited to the
+ computation's already having begun, the computation's already having
+ finished, and the computation's having been scheduled for execution on a
+ remote system for which a determination of whether or not it commenced
+ before being cancelled cannot be made without blocking.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
+
+ This method does not block.
+
+ Returns:
+ True if the computation was cancelled any time before its result became
+ immediately available. False under all other circumstances including but
+ not limited to this object's cancel method not having been called and
+ the computation's result having become immediately available.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
+
+ This method does not block.
+
+ Returns:
+ True if the computation is scheduled to take place in the future or is
+ taking place now, or False if the computation took place in the past or
+ was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
+
+ This method does not block.
+
+ Returns:
+ True if the computation is known to have either completed or have been
+ unscheduled or interrupted. False if the computation may possibly be
+ executing or scheduled to execute later.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Accesses the outcome of the computation or raises its exception.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ finish or be cancelled, or None if this method should block until the
+ computation has finished or is cancelled no matter how long that takes.
+
+ Returns:
+ The return value of the computation.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation does
+ not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ Exception: If the computation raised an exception, this call will raise
+ the same exception.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled, or None if this method should block until
+ the computation is terminated or is cancelled no matter how long that
+ takes.
+
+ Returns:
+ The exception raised by the computation, or None if the computation did
+ not raise an exception.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation does
+ not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled, or None if this method should block until
+ the computation is terminated or is cancelled no matter how long that
+ takes.
+
+ Returns:
+ The traceback of the exception raised by the computation, or None if the
+ computation did not raise an exception.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation does
+ not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
+
+ The callback will be passed this Future object describing the outcome of
+ the computation.
+
+ If the computation has already completed, the callback will be called
+ immediately.
+
+ Args:
+ fn: A callable taking this Future object as its single parameter.
+ """
+ raise NotImplementedError()
+
+
+################################ gRPC Enums ##################################
+
+
+@enum.unique
+class ChannelConnectivity(enum.Enum):
+ """Mirrors grpc_connectivity_state in the gRPC Core.
+
+ Attributes:
+ IDLE: The channel is idle.
+ CONNECTING: The channel is connecting.
+ READY: The channel is ready to conduct RPCs.
+ TRANSIENT_FAILURE: The channel has seen a failure from which it expects to
+ recover.
+ SHUTDOWN: The channel has seen a failure from which it cannot recover.
+ """
+ IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
+ CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
+ READY = (_cygrpc.ConnectivityState.ready, 'ready')
+ TRANSIENT_FAILURE = (
+ _cygrpc.ConnectivityState.transient_failure, 'transient failure')
+ SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
+
+
+@enum.unique
+class StatusCode(enum.Enum):
+ """Mirrors grpc_status_code in the gRPC Core."""
+ OK = (_cygrpc.StatusCode.ok, 'ok')
+ CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
+ UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
+ INVALID_ARGUMENT = (
+ _cygrpc.StatusCode.invalid_argument, 'invalid argument')
+ DEADLINE_EXCEEDED = (
+ _cygrpc.StatusCode.deadline_exceeded, 'deadline exceeded')
+ NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
+ ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
+ PERMISSION_DENIED = (
+ _cygrpc.StatusCode.permission_denied, 'permission denied')
+ RESOURCE_EXHAUSTED = (
+ _cygrpc.StatusCode.resource_exhausted, 'resource exhausted')
+ FAILED_PRECONDITION = (
+ _cygrpc.StatusCode.failed_precondition, 'failed precondition')
+ ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
+ OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
+ UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
+ INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
+ UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
+ DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
+ UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
+
+
+############################# gRPC Exceptions ################################
+
+
+class RpcError(Exception):
+ """Raised by the gRPC library to indicate non-OK-status RPC termination."""
+
+
+############################## Shared Context ################################
+
+
+class RpcContext(six.with_metaclass(abc.ABCMeta)):
+ """Provides RPC-related information and action."""
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have timed
+ out, or None if no deadline was specified for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
+
+ Idempotent and has no effect if the RPC has already terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_callback(self, callback):
+ """Registers a callback to be called on RPC termination.
+
+ Args:
+ callback: A no-parameter callable to be called on RPC termination.
+
+ Returns:
+ True if the callback was added and will be called later; False if the
+ callback was not added and will not later be called (because the RPC
+ already terminated or some other reason).
+ """
+ raise NotImplementedError()
+
+
+######################### Invocation-Side Context ############################
+
+
+class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
+ """Invocation-side utility object for an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata from the service-side of the RPC.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The initial metadata as a sequence of pairs of bytes.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def trailing_metadata(self):
+ """Accesses the trailing metadata from the service-side of the RPC.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The trailing metadata as a sequence of pairs of bytes.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the status code emitted by the service-side of the RPC.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details value emitted by the service-side of the RPC.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The bytes of the details of the RPC.
+ """
+ raise NotImplementedError()
+
+
+############ Authentication & Authorization Interfaces & Classes #############
+
+
+class ChannelCredentials(object):
+ """A value encapsulating the data required to create a secure Channel.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class CallCredentials(object):
+ """A value encapsulating data asserting an identity over a channel.
+
+ A CallCredentials may be composed with ChannelCredentials to always assert
+ identity for every call over that Channel.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class AuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
+ """Provides information to call credentials metadata plugins.
+
+ Attributes:
+ service_url: A string URL of the service being called into.
+ method_name: A string of the fully qualified method name being called.
+ """
+
+
+class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
+ """Callback object received by a metadata plugin."""
+
+ def __call__(self, metadata, error):
+ """Inform the gRPC runtime of the metadata to construct a CallCredentials.
+
+ Args:
+ metadata: An iterable of 2-sequences (e.g. tuples) of metadata key/value
+ pairs.
+ error: An Exception to indicate error or None to indicate success.
+ """
+ raise NotImplementedError()
+
+
+class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
+ """A specification for custom authentication."""
+
+ def __call__(self, context, callback):
+ """Implements authentication by passing metadata to a callback.
+
+ Implementations of this method must not block.
+
+ Args:
+ context: An AuthMetadataContext providing information on the RPC that the
+ plugin is being called to authenticate.
+ callback: An AuthMetadataPluginCallback to be invoked either synchronously
+ or asynchronously.
+ """
+ raise NotImplementedError()
+
+
+class ServerCredentials(object):
+ """A value encapsulating the data required to open a secure port on a Server.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+######################## Multi-Callable Interfaces ###########################
+
+
+class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a unary-unary RPC."""
+
+ @abc.abstractmethod
+ def __call__(
+ self, request, timeout=None, metadata=None, credentials=None,
+ with_call=False):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: An optional sequence of pairs of bytes to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self, request, timeout=None, metadata=None, credentials=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: An optional sequence of pairs of bytes to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future. In the event of
+ RPC completion, the return Future's result value will be the response
+ message of the RPC. Should the event terminate with non-OK status, the
+ returned Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a unary-stream RPC."""
+
+ @abc.abstractmethod
+ def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: An optional sequence of pairs of bytes to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ RpcError indicating termination of the RPC with non-OK status.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a stream-unary RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(
+ self, request_iterator, timeout=None, metadata=None, credentials=None,
+ with_call=False):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: An optional sequence of pairs of bytes to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(
+ self, request_iterator, timeout=None, metadata=None, credentials=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: An optional sequence of pairs of bytes to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future. In the event of
+ RPC completion, the return Future's result value will be the response
+ message of the RPC. Should the event terminate with non-OK status, the
+ returned Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a stream-stream RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(
+ self, request_iterator, timeout=None, metadata=None, credentials=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: An optional sequence of pairs of bytes to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ RpcError indicating termination of the RPC with non-OK status.
+ """
+ raise NotImplementedError()
+
+
+############################# Channel Interface ##############################
+
+
+class Channel(six.with_metaclass(abc.ABCMeta)):
+ """Affords RPC invocation via generic methods."""
+
+ @abc.abstractmethod
+ def subscribe(self, callback, try_to_connect=False):
+ """Subscribes to this Channel's connectivity.
+
+ Args:
+ callback: A callable to be invoked and passed a ChannelConnectivity value
+ describing this Channel's connectivity. The callable will be invoked
+ immediately upon subscription and again for every change to this
+ Channel's connectivity thereafter until it is unsubscribed or this
+ Channel object goes out of scope.
+ try_to_connect: A boolean indicating whether or not this Channel should
+ attempt to connect if it is not already connected and ready to conduct
+ RPCs.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unsubscribe(self, callback):
+ """Unsubscribes a callback from this Channel's connectivity.
+
+ Args:
+ callback: A callable previously registered with this Channel from having
+ been passed to its "subscribe" method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_unary(
+ self, method, request_serializer=None, response_deserializer=None):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+ Args:
+ method: The name of the RPC method.
+
+ Returns:
+ A UnaryUnaryMultiCallable value for the named unary-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_stream(
+ self, method, request_serializer=None, response_deserializer=None):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+ Args:
+ method: The name of the RPC method.
+
+ Returns:
+ A UnaryStreamMultiCallable value for the name unary-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_unary(
+ self, method, request_serializer=None, response_deserializer=None):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+ Args:
+ method: The name of the RPC method.
+
+ Returns:
+ A StreamUnaryMultiCallable value for the named stream-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_stream(
+ self, method, request_serializer=None, response_deserializer=None):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+ Args:
+ method: The name of the RPC method.
+
+ Returns:
+ A StreamStreamMultiCallable value for the named stream-stream method.
+ """
+ raise NotImplementedError()
+
+
+########################## Service-Side Context ##############################
+
+
+class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
+ """A context object passed to method implementations."""
+
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata from the invocation-side of the RPC.
+
+ Returns:
+ The invocation metadata object as a sequence of pairs of bytes.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
+
+ Returns:
+ A string identifying the peer that invoked the RPC being serviced.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def send_initial_metadata(self, initial_metadata):
+ """Sends the initial metadata value to the invocation-side of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side initial metadata to transmit.
+
+ Args:
+ initial_metadata: The initial metadata of the RPC as a sequence of pairs
+ of bytes.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_trailing_metadata(self, trailing_metadata):
+ """Accepts the trailing metadata value of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side trailing metadata to transmit.
+
+ Args:
+ trailing_metadata: The trailing metadata of the RPC as a sequence of pairs
+ of bytes.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_code(self, code):
+ """Accepts the status code of the RPC.
+
+ This method need not be called by method implementations if they wish the
+ gRPC runtime to determine the status code of the RPC.
+
+ Args:
+ code: The integer status code of the RPC to be transmitted to the
+ invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_details(self, details):
+ """Accepts the service-side details of the RPC.
+
+ This method need not be called by method implementations if they have no
+ details to transmit.
+
+ Args:
+ details: The details bytes of the RPC to be transmitted to
+ the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+
+##################### Service-Side Handler Interfaces ########################
+
+
+class RpcMethodHandler(six.with_metaclass(abc.ABCMeta)):
+ """An implementation of a single RPC method.
+
+ Attributes:
+ request_streaming: Whether the RPC supports exactly one request message or
+ any arbitrary number of request messages.
+ response_streaming: Whether the RPC supports exactly one response message or
+ any arbitrary number of response messages.
+ request_deserializer: A callable behavior that accepts a byte string and
+ returns an object suitable to be passed to this object's business logic,
+ or None to indicate that this object's business logic should be passed the
+ raw request bytes.
+ response_serializer: A callable behavior that accepts an object produced by
+ this object's business logic and returns a byte string, or None to
+ indicate that the byte strings produced by this object's business logic
+ should be transmitted on the wire as they are.
+ unary_unary: This object's application-specific business logic as a callable
+ value that takes a request value and a ServicerContext object and returns
+ a response value. Only non-None if both request_streaming and
+ response_streaming are False.
+ unary_stream: This object's application-specific business logic as a
+ callable value that takes a request value and a ServicerContext object and
+ returns an iterator of response values. Only non-None if request_streaming
+ is False and response_streaming is True.
+ stream_unary: This object's application-specific business logic as a
+ callable value that takes an iterator of request values and a
+ ServicerContext object and returns a response value. Only non-None if
+ request_streaming is True and response_streaming is False.
+ stream_stream: This object's application-specific business logic as a
+ callable value that takes an iterator of request values and a
+ ServicerContext object and returns an iterator of response values. Only
+ non-None if request_streaming and response_streaming are both True.
+ """
+
+
+class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
+ """Describes an RPC that has just arrived for service.
+ Attributes:
+ method: The method name of the RPC.
+ invocation_metadata: The metadata from the invocation side of the RPC.
+ """
+
+
+class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
+ """An implementation of arbitrarily many RPC methods."""
+
+ @abc.abstractmethod
+ def service(self, handler_call_details):
+ """Services an RPC (or not).
+
+ Args:
+ handler_call_details: A HandlerCallDetails describing the RPC.
+
+ Returns:
+ An RpcMethodHandler with which the RPC may be serviced, or None to
+ indicate that this object will not be servicing the RPC.
+ """
+ raise NotImplementedError()
+
+
+############################# Server Interface ###############################
+
+
+class Server(six.with_metaclass(abc.ABCMeta)):
+ """Services RPCs."""
+
+ @abc.abstractmethod
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ """Registers GenericRpcHandlers with this Server.
+
+ This method is only safe to call before the server is started.
+
+ Args:
+ generic_rpc_handlers: An iterable of GenericRpcHandlers that will be used
+ to service RPCs after this Server is started.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Reserves a port for insecure RPC service once this Server becomes active.
+
+ This method may only be called before calling this Server's start method is
+ called.
+
+ Args:
+ address: The address for which to open a port.
+
+ Returns:
+ An integer port on which RPCs will be serviced after this link has been
+ started. This is typically the same number as the port number contained
+ in the passed address, but will likely be different if the port number
+ contained in the passed address was zero.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Reserves a port for secure RPC service after this Server becomes active.
+
+ This method may only be called before calling this Server's start method is
+ called.
+
+ Args:
+ address: The address for which to open a port.
+ server_credentials: A ServerCredentials.
+
+ Returns:
+ An integer port on which RPCs will be serviced after this link has been
+ started. This is typically the same number as the port number contained
+ in the passed address, but will likely be different if the port number
+ contained in the passed address was zero.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server's service of RPCs.
+
+ This method may only be called while the server is not serving RPCs (i.e. it
+ is not idempotent).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server's service of RPCs.
+
+ All calls to this method immediately stop service of new RPCs. When existing
+ RPCs are aborted is controlled by the grace period parameter passed to this
+ method.
+
+ This method may be called at any time and is idempotent. Passing a smaller
+ grace value than has been passed in a previous call will have the effect of
+ stopping the Server sooner. Passing a larger grace value than has been
+ passed in a previous call will not have the effect of stopping the server
+ later.
+
+ Args:
+ grace: A duration of time in seconds to allow existing RPCs to complete
+ before being aborted by this Server's stopping. If None, this method
+ will block until the server is completely stopped.
+
+ Returns:
+ A threading.Event that will be set when this Server has completely
+ stopped. The returned event may not be set until after the full grace
+ period (if some ongoing RPC continues for the full length of the period)
+ of it may be set much sooner (such as if this Server had no RPCs underway
+ at the time it was stopped or if all RPCs that it had underway completed
+ very early in the grace period).
+ """
+ raise NotImplementedError()
+
+
+################################# Functions ################################
+
+
+def unary_unary_rpc_method_handler(
+ behavior, request_deserializer=None, response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-unary RPC method.
+
+ Args:
+ behavior: The implementation of an RPC method as a callable behavior taking
+ a single request value and returning a single response value.
+ request_deserializer: An optional request deserialization behavior.
+ response_serializer: An optional response serialization behavior.
+
+ Returns:
+ An RpcMethodHandler for a unary-unary RPC method constructed from the given
+ parameters.
+ """
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(
+ False, False, request_deserializer, response_serializer, behavior, None,
+ None, None)
+
+
+def unary_stream_rpc_method_handler(
+ behavior, request_deserializer=None, response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-stream RPC method.
+
+ Args:
+ behavior: The implementation of an RPC method as a callable behavior taking
+ a single request value and returning an iterator of response values.
+ request_deserializer: An optional request deserialization behavior.
+ response_serializer: An optional response serialization behavior.
+
+ Returns:
+ An RpcMethodHandler for a unary-stream RPC method constructed from the
+ given parameters.
+ """
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(
+ False, True, request_deserializer, response_serializer, None, behavior,
+ None, None)
+
+
+def stream_unary_rpc_method_handler(
+ behavior, request_deserializer=None, response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-unary RPC method.
+
+ Args:
+ behavior: The implementation of an RPC method as a callable behavior taking
+ an iterator of request values and returning a single response value.
+ request_deserializer: An optional request deserialization behavior.
+ response_serializer: An optional response serialization behavior.
+
+ Returns:
+ An RpcMethodHandler for a stream-unary RPC method constructed from the
+ given parameters.
+ """
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(
+ True, False, request_deserializer, response_serializer, None, None,
+ behavior, None)
+
+
+def stream_stream_rpc_method_handler(
+ behavior, request_deserializer=None, response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-stream RPC method.
+
+ Args:
+ behavior: The implementation of an RPC method as a callable behavior taking
+ an iterator of request values and returning an iterator of response
+ values.
+ request_deserializer: An optional request deserialization behavior.
+ response_serializer: An optional response serialization behavior.
+
+ Returns:
+ An RpcMethodHandler for a stream-stream RPC method constructed from the
+ given parameters.
+ """
+ from grpc import _utilities
+ return _utilities.RpcMethodHandler(
+ True, True, request_deserializer, response_serializer, None, None, None,
+ behavior)
+
+
+def method_handlers_generic_handler(service, method_handlers):
+ """Creates a grpc.GenericRpcHandler from RpcMethodHandlers.
+
+ Args:
+ service: A service name to be used for the given method handlers.
+ method_handlers: A dictionary from method name to RpcMethodHandler
+ implementing the named method.
+
+ Returns:
+ A GenericRpcHandler constructed from the given parameters.
+ """
+ from grpc import _utilities
+ return _utilities.DictionaryGenericHandler(service, method_handlers)
+
+
+def ssl_channel_credentials(
+ root_certificates=None, private_key=None, certificate_chain=None):
+ """Creates a ChannelCredentials for use with an SSL-enabled Channel.
+
+ Args:
+ root_certificates: The PEM-encoded root certificates or unset to ask for
+ them to be retrieved from a default location.
+ private_key: The PEM-encoded private key to use or unset if no private key
+ should be used.
+ certificate_chain: The PEM-encoded certificate chain to use or unset if no
+ certificate chain should be used.
+
+ Returns:
+ A ChannelCredentials for use with an SSL-enabled Channel.
+ """
+ if private_key is not None or certificate_chain is not None:
+ pair = _cygrpc.SslPemKeyCertPair(private_key, certificate_chain)
+ else:
+ pair = None
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_ssl(root_certificates, pair))
+
+
+def metadata_call_credentials(metadata_plugin, name=None):
+ """Construct CallCredentials from an AuthMetadataPlugin.
+
+ Args:
+ metadata_plugin: An AuthMetadataPlugin to use as the authentication behavior
+ in the created CallCredentials.
+ name: A name for the plugin.
+
+ Returns:
+ A CallCredentials.
+ """
+ from grpc import _plugin_wrapping
+ if name is None:
+ try:
+ effective_name = metadata_plugin.__name__
+ except AttributeError:
+ effective_name = metadata_plugin.__class__.__name__
+ else:
+ effective_name = name
+ return CallCredentials(
+ _plugin_wrapping.call_credentials_metadata_plugin(
+ metadata_plugin, effective_name))
+
+
+def access_token_call_credentials(access_token):
+ """Construct CallCredentials from an access token.
+
+ Args:
+ access_token: A string to place directly in the http request
+ authorization header, ie "Authorization: Bearer <access_token>".
+
+ Returns:
+ A CallCredentials.
+ """
+ from grpc import _auth
+ return metadata_call_credentials(
+ _auth.AccessTokenCallCredentials(access_token))
+
+
+def composite_call_credentials(call_credentials, additional_call_credentials):
+ """Compose two CallCredentials to make a new one.
+
+ Args:
+ call_credentials: A CallCredentials object.
+ additional_call_credentials: Another CallCredentials object to compose on
+ top of call_credentials.
+
+ Returns:
+ A new CallCredentials composed of the two given CallCredentials.
+ """
+ return CallCredentials(
+ _cygrpc.call_credentials_composite(
+ call_credentials._credentials,
+ additional_call_credentials._credentials))
+
+
+def composite_channel_credentials(channel_credentials, call_credentials):
+ """Compose a ChannelCredentials and a CallCredentials.
+
+ Args:
+ channel_credentials: A ChannelCredentials.
+ call_credentials: A CallCredentials.
+
+ Returns:
+ A ChannelCredentials composed of the given ChannelCredentials and
+ CallCredentials.
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_composite(
+ channel_credentials._credentials, call_credentials._credentials))
+
+
+def ssl_server_credentials(
+ private_key_certificate_chain_pairs, root_certificates=None,
+ require_client_auth=False):
+ """Creates a ServerCredentials for use with an SSL-enabled Server.
+
+ Args:
+ private_key_certificate_chain_pairs: A nonempty sequence each element of
+ which is a pair the first element of which is a PEM-encoded private key
+ and the second element of which is the corresponding PEM-encoded
+ certificate chain.
+ root_certificates: PEM-encoded client root certificates to be used for
+ verifying authenticated clients. If omitted, require_client_auth must also
+ be omitted or be False.
+ require_client_auth: A boolean indicating whether or not to require clients
+ to be authenticated. May only be True if root_certificates is not None.
+
+ Returns:
+ A ServerCredentials for use with an SSL-enabled Server.
+ """
+ if len(private_key_certificate_chain_pairs) == 0:
+ raise ValueError(
+ 'At least one private key-certificate chain pair is required!')
+ elif require_client_auth and root_certificates is None:
+ raise ValueError(
+ 'Illegal to require client auth without providing root certificates!')
+ else:
+ return ServerCredentials(
+ _cygrpc.server_credentials_ssl(
+ root_certificates,
+ [_cygrpc.SslPemKeyCertPair(key, pem)
+ for key, pem in private_key_certificate_chain_pairs],
+ require_client_auth))
+
+
+def channel_ready_future(channel):
+ """Creates a Future tracking when a Channel is ready.
+
+ Cancelling the returned Future does not tell the given Channel to abandon
+ attempts it may have been making to connect; cancelling merely deactivates the
+ returned Future's subscription to the given Channel's connectivity.
+
+ Args:
+ channel: A Channel.
+
+ Returns:
+ A Future that matures when the given Channel has connectivity
+ ChannelConnectivity.READY.
+ """
+ from grpc import _utilities
+ return _utilities.channel_ready_future(channel)
+
+
+def insecure_channel(target, options=None):
+ """Creates an insecure Channel to a server.
+
+ Args:
+ target: The target to which to connect.
+ options: A sequence of string-value pairs according to which to configure
+ the created channel.
+
+ Returns:
+ A Channel to the target through which RPCs may be conducted.
+ """
+ from grpc import _channel
+ return _channel.Channel(target, options, None)
+
+
+def secure_channel(target, credentials, options=None):
+ """Creates an insecure Channel to a server.
+
+ Args:
+ target: The target to which to connect.
+ credentials: A ChannelCredentials instance.
+ options: A sequence of string-value pairs according to which to configure
+ the created channel.
+
+ Returns:
+ A Channel to the target through which RPCs may be conducted.
+ """
+ from grpc import _channel
+ return _channel.Channel(target, options, credentials)
+
+
+def server(generic_rpc_handlers, thread_pool, options=None):
+ """Creates a Server with which RPCs can be serviced.
+
+ The GenericRpcHandlers passed to this function needn't be the only
+ GenericRpcHandlers that will be used to serve RPCs; others may be added later
+ by calling add_generic_rpc_handlers any time before the returned server is
+ started.
+
+ Args:
+ generic_rpc_handlers: Some number of GenericRpcHandlers that will be used
+ to service RPCs after the returned Server is started.
+ thread_pool: A futures.ThreadPoolExecutor to be used by the returned Server
+ to service RPCs.
+
+ Returns:
+ A Server with which RPCs can be serviced.
+ """
+ from grpc import _server
+ return _server.Server(generic_rpc_handlers, thread_pool)
+
+
+################################### __all__ #################################
+
+
+__all__ = (
+ 'FutureTimeoutError',
+ 'FutureCancelledError',
+ 'Future',
+ 'ChannelConnectivity',
+ 'StatusCode',
+ 'RpcError',
+ 'RpcContext',
+ 'Call',
+ 'ChannelCredentials',
+ 'CallCredentials',
+ 'AuthMetadataContext',
+ 'AuthMetadataPluginCallback',
+ 'AuthMetadataPlugin',
+ 'ServerCredentials',
+ 'UnaryUnaryMultiCallable',
+ 'UnaryStreamMultiCallable',
+ 'StreamUnaryMultiCallable',
+ 'StreamStreamMultiCallable',
+ 'Channel',
+ 'ServicerContext',
+ 'RpcMethodHandler',
+ 'HandlerCallDetails',
+ 'GenericRpcHandler',
+ 'Server',
+ 'unary_unary_rpc_method_handler',
+ 'unary_stream_rpc_method_handler',
+ 'stream_unary_rpc_method_handler',
+ 'stream_stream_rpc_method_handler',
+ 'method_handlers_generic_handler',
+ 'ssl_channel_credentials',
+ 'metadata_call_credentials',
+ 'access_token_call_credentials',
+ 'composite_call_credentials',
+ 'composite_channel_credentials',
+ 'ssl_server_credentials',
+ 'channel_ready_future',
+ 'insecure_channel',
+ 'secure_channel',
+ 'server',
+)
diff --git a/src/python/grpcio/grpc/_adapter/_low.py b/src/python/grpcio/grpc/_adapter/_low.py
index 00788bd4cf..48410167a0 100644
--- a/src/python/grpcio/grpc/_adapter/_low.py
+++ b/src/python/grpcio/grpc/_adapter/_low.py
@@ -30,8 +30,8 @@
import threading
from grpc import _grpcio_metadata
+from grpc import _plugin_wrapping
from grpc._cython import cygrpc
-from grpc._adapter import _implementations
from grpc._adapter import _types
_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
@@ -57,78 +57,8 @@ def channel_credentials_ssl(
return cygrpc.channel_credentials_ssl(root_certificates, pair)
-class _WrappedCygrpcCallback(object):
-
- def __init__(self, cygrpc_callback):
- self.is_called = False
- self.error = None
- self.is_called_lock = threading.Lock()
- self.cygrpc_callback = cygrpc_callback
-
- def _invoke_failure(self, error):
- # TODO(atash) translate different Exception superclasses into different
- # status codes.
- self.cygrpc_callback(
- cygrpc.Metadata([]), cygrpc.StatusCode.internal, error.message)
-
- def _invoke_success(self, metadata):
- try:
- cygrpc_metadata = cygrpc.Metadata(
- cygrpc.Metadatum(key, value)
- for key, value in metadata)
- except Exception as error:
- self._invoke_failure(error)
- return
- self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, '')
-
- def __call__(self, metadata, error):
- with self.is_called_lock:
- if self.is_called:
- raise RuntimeError('callback should only ever be invoked once')
- if self.error:
- self._invoke_failure(self.error)
- return
- self.is_called = True
- if error is None:
- self._invoke_success(metadata)
- else:
- self._invoke_failure(error)
-
- def notify_failure(self, error):
- with self.is_called_lock:
- if not self.is_called:
- self.error = error
-
-
-class _WrappedPlugin(object):
-
- def __init__(self, plugin):
- self.plugin = plugin
-
- def __call__(self, context, cygrpc_callback):
- wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback)
- wrapped_context = _implementations.AuthMetadataContext(context.service_url,
- context.method_name)
- try:
- self.plugin(
- wrapped_context,
- _implementations.AuthMetadataPluginCallback(wrapped_cygrpc_callback))
- except Exception as error:
- wrapped_cygrpc_callback.notify_failure(error)
- raise
-
-
-def call_credentials_metadata_plugin(plugin, name):
- """
- Args:
- plugin: A callable accepting a _types.AuthMetadataContext
- object and a callback (itself accepting a list of metadata key/value
- 2-tuples and a None-able exception value). The callback must be eventually
- called, but need not be called in plugin's invocation.
- plugin's invocation must be non-blocking.
- """
- return cygrpc.call_credentials_metadata_plugin(
- cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), name))
+call_credentials_metadata_plugin = (
+ _plugin_wrapping.call_credentials_metadata_plugin)
class CompletionQueue(_types.CompletionQueue):
diff --git a/src/python/grpcio/grpc/_adapter/_types.py b/src/python/grpcio/grpc/_adapter/_types.py
index f8405949d4..b7cc6fbbb5 100644
--- a/src/python/grpcio/grpc/_adapter/_types.py
+++ b/src/python/grpcio/grpc/_adapter/_types.py
@@ -114,7 +114,7 @@ class ConnectivityState(enum.IntEnum):
CONNECTING = cygrpc.ConnectivityState.connecting
READY = cygrpc.ConnectivityState.ready
TRANSIENT_FAILURE = cygrpc.ConnectivityState.transient_failure
- FATAL_FAILURE = cygrpc.ConnectivityState.fatal_failure
+ FATAL_FAILURE = cygrpc.ConnectivityState.shutdown
class Status(collections.namedtuple(
diff --git a/src/python/grpcio/grpc/_auth.py b/src/python/grpcio/grpc/_auth.py
new file mode 100644
index 0000000000..dea3221c9d
--- /dev/null
+++ b/src/python/grpcio/grpc/_auth.py
@@ -0,0 +1,86 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""GRPCAuthMetadataPlugins for standard authentication."""
+
+import inspect
+from concurrent import futures
+
+import grpc
+
+
+def _sign_request(callback, token, error):
+ metadata = (('authorization', 'Bearer {}'.format(token)),)
+ callback(metadata, error)
+
+
+class GoogleCallCredentials(grpc.AuthMetadataPlugin):
+ """Metadata wrapper for GoogleCredentials from the oauth2client library."""
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+ self._pool = futures.ThreadPoolExecutor(max_workers=1)
+
+ # Hack to determine if these are JWT creds and we need to pass
+ # additional_claims when getting a token
+ if 'additional_claims' in inspect.getargspec(
+ credentials.get_access_token).args:
+ self._is_jwt = True
+ else:
+ self._is_jwt = False
+
+ def __call__(self, context, callback):
+ # MetadataPlugins cannot block (see grpc.beta.interfaces.py)
+ if self._is_jwt:
+ future = self._pool.submit(self._credentials.get_access_token,
+ additional_claims={'aud': context.service_url})
+ else:
+ future = self._pool.submit(self._credentials.get_access_token)
+ future.add_done_callback(lambda x: self._get_token_callback(callback, x))
+
+ def _get_token_callback(self, callback, future):
+ try:
+ access_token = future.result().access_token
+ except Exception as e:
+ _sign_request(callback, None, e)
+ else:
+ _sign_request(callback, access_token, None)
+
+ def __del__(self):
+ self._pool.shutdown(wait=False)
+
+
+class AccessTokenCallCredentials(grpc.AuthMetadataPlugin):
+ """Metadata wrapper for raw access token credentials."""
+
+ def __init__(self, access_token):
+ self._access_token = access_token
+
+ def __call__(self, context, callback):
+ _sign_request(callback, self._access_token, None)
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
new file mode 100644
index 0000000000..d9315d2e6c
--- /dev/null
+++ b/src/python/grpcio/grpc/_channel.py
@@ -0,0 +1,852 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Invocation-side implementation of gRPC Python."""
+
+import sys
+import threading
+import time
+
+import grpc
+from grpc import _common
+from grpc import _grpcio_metadata
+from grpc.framework.foundation import callable_util
+from grpc._cython import cygrpc
+
+_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
+
+_EMPTY_FLAGS = 0
+_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
+_EMPTY_METADATA = cygrpc.Metadata(())
+
+_UNARY_UNARY_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_UNARY_STREAM_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_UNARY_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_STREAM_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_status_on_client,
+)
+
+_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
+ 'Exception calling channel subscription callback!')
+
+
+def _deadline(timeout):
+ if timeout is None:
+ return None, _INFINITE_FUTURE
+ else:
+ deadline = time.time() + timeout
+ return deadline, cygrpc.Timespec(deadline)
+
+
+def _unknown_code_details(unknown_cygrpc_code, details):
+ return 'Server sent unknown code {} and details "{}"'.format(
+ unknown_cygrpc_code, details)
+
+
+def _wait_once_until(condition, until):
+ if until is None:
+ condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise grpc.FutureTimeoutError()
+ else:
+ condition.wait(timeout=remaining)
+
+
+class _RPCState(object):
+
+ def __init__(self, due, initial_metadata, trailing_metadata, code, details):
+ self.condition = threading.Condition()
+ # The cygrpc.OperationType objects representing events due from the RPC's
+ # completion queue.
+ self.due = set(due)
+ self.initial_metadata = initial_metadata
+ self.response = None
+ self.trailing_metadata = trailing_metadata
+ self.code = code
+ self.details = details
+ # The semantics of grpc.Future.cancel and grpc.Future.cancelled are
+ # slightly wonky, so they have to be tracked separately from the rest of the
+ # result of the RPC. This field tracks whether cancellation was requested
+ # prior to termination of the RPC.
+ self.cancelled = False
+ self.callbacks = []
+
+
+def _abort(state, code, details):
+ if state.code is None:
+ state.code = code
+ state.details = details
+ if state.initial_metadata is None:
+ state.initial_metadata = _EMPTY_METADATA
+ state.trailing_metadata = _EMPTY_METADATA
+
+
+def _handle_event(event, state, response_deserializer):
+ callbacks = []
+ for batch_operation in event.batch_operations:
+ operation_type = batch_operation.type
+ state.due.remove(operation_type)
+ if operation_type == cygrpc.OperationType.receive_initial_metadata:
+ state.initial_metadata = batch_operation.received_metadata
+ elif operation_type == cygrpc.OperationType.receive_message:
+ serialized_response = batch_operation.received_message.bytes()
+ if serialized_response is not None:
+ response = _common.deserialize(
+ serialized_response, response_deserializer)
+ if response is None:
+ details = 'Exception deserializing response!'
+ _abort(state, grpc.StatusCode.INTERNAL, details)
+ else:
+ state.response = response
+ elif operation_type == cygrpc.OperationType.receive_status_on_client:
+ state.trailing_metadata = batch_operation.received_metadata
+ if state.code is None:
+ code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
+ batch_operation.received_status_code)
+ if code is None:
+ state.code = grpc.StatusCode.UNKNOWN
+ state.details = _unknown_code_details(
+ batch_operation.received_status_code,
+ batch_operation.received_status_details)
+ else:
+ state.code = code
+ state.details = batch_operation.received_status_details
+ callbacks.extend(state.callbacks)
+ state.callbacks = None
+ return callbacks
+
+
+def _event_handler(state, call, response_deserializer):
+ def handle_event(event):
+ with state.condition:
+ callbacks = _handle_event(event, state, response_deserializer)
+ state.condition.notify_all()
+ done = not state.due
+ for callback in callbacks:
+ callback()
+ return call if done else None
+ return handle_event
+
+
+def _consume_request_iterator(
+ request_iterator, state, call, request_serializer):
+ event_handler = _event_handler(state, call, None)
+ def consume_request_iterator():
+ for request in request_iterator:
+ serialized_request = _common.serialize(request, request_serializer)
+ with state.condition:
+ if state.code is None and not state.cancelled:
+ if serialized_request is None:
+ call.cancel()
+ details = 'Exception serializing request!'
+ _abort(state, grpc.StatusCode.INTERNAL, details)
+ return
+ else:
+ operations = (
+ cygrpc.operation_send_message(
+ serialized_request, _EMPTY_FLAGS),
+ )
+ call.start_batch(cygrpc.Operations(operations), event_handler)
+ state.due.add(cygrpc.OperationType.send_message)
+ while True:
+ state.condition.wait()
+ if state.code is None:
+ if cygrpc.OperationType.send_message not in state.due:
+ break
+ else:
+ return
+ else:
+ return
+ with state.condition:
+ if state.code is None:
+ operations = (
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ )
+ call.start_batch(cygrpc.Operations(operations), event_handler)
+ state.due.add(cygrpc.OperationType.send_close_from_client)
+ thread = threading.Thread(target=consume_request_iterator)
+ thread.start()
+
+
+class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
+
+ def __init__(self, state, call, response_deserializer, deadline):
+ super(_Rendezvous, self).__init__()
+ self._state = state
+ self._call = call
+ self._response_deserializer = response_deserializer
+ self._deadline = deadline
+
+ def cancel(self):
+ with self._state.condition:
+ if self._state.code is None:
+ self._call.cancel()
+ self._state.cancelled = True
+ _abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
+ self._state.condition.notify_all()
+ return False
+
+ def cancelled(self):
+ with self._state.condition:
+ return self._state.cancelled
+
+ def running(self):
+ with self._state.condition:
+ return self._state.code is None
+
+ def done(self):
+ with self._state.condition:
+ return self._state.code is not None
+
+ def result(self, timeout=None):
+ until = None if timeout is None else time.time() + timeout
+ with self._state.condition:
+ while True:
+ if self._state.code is None:
+ _wait_once_until(self._state.condition, until)
+ elif self._state.code is grpc.StatusCode.OK:
+ return self._state.response
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ raise self
+
+ def exception(self, timeout=None):
+ until = None if timeout is None else time.time() + timeout
+ with self._state.condition:
+ while True:
+ if self._state.code is None:
+ _wait_once_until(self._state.condition, until)
+ elif self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ return self
+
+ def traceback(self, timeout=None):
+ until = None if timeout is None else time.time() + timeout
+ with self._state.condition:
+ while True:
+ if self._state.code is None:
+ _wait_once_until(self._state.condition, until)
+ elif self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn):
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.callbacks.append(lambda: fn(self))
+ return
+
+ fn(self)
+
+ def _next(self):
+ with self._state.condition:
+ if self._state.code is None:
+ event_handler = _event_handler(
+ self._state, self._call, self._response_deserializer)
+ self._call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ event_handler)
+ self._state.due.add(cygrpc.OperationType.receive_message)
+ elif self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ else:
+ raise self
+ while True:
+ self._state.condition.wait()
+ if self._state.response is not None:
+ response = self._state.response
+ self._state.response = None
+ return response
+ elif cygrpc.OperationType.receive_message not in self._state.due:
+ if self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ elif self._state.code is not None:
+ raise self
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+ def is_active(self):
+ with self._state.condition:
+ return self._state.code is None
+
+ def time_remaining(self):
+ if self._deadline is None:
+ return None
+ else:
+ return max(self._deadline - time.time(), 0)
+
+ def add_cancellation_callback(self, callback):
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(lambda unused_future: callback())
+ return True
+
+ def initial_metadata(self):
+ with self._state.condition:
+ while self._state.initial_metadata is None:
+ self._state.condition.wait()
+ return self._state.initial_metadata
+
+ def trailing_metadata(self):
+ with self._state.condition:
+ while self._state.trailing_metadata is None:
+ self._state.condition.wait()
+ return self._state.trailing_metadata
+
+ def code(self):
+ with self._state.condition:
+ while self._state.code is None:
+ self._state.condition.wait()
+ return self._state.code
+
+ def details(self):
+ with self._state.condition:
+ while self._state.details is None:
+ self._state.condition.wait()
+ return self._state.details
+
+ def _repr(self):
+ with self._state.condition:
+ if self._state.code is None:
+ return '<_Rendezvous object of in-flight RPC>'
+ else:
+ return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
+ self._state.code, self._state.details)
+
+ def __repr__(self):
+ return self._repr()
+
+ def __str__(self):
+ return self._repr()
+
+ def __del__(self):
+ with self._state.condition:
+ if self._state.code is None:
+ self._call.cancel()
+ self._state.cancelled = True
+ self._state.code = grpc.StatusCode.CANCELLED
+ self._state.condition.notify_all()
+
+
+def _start_unary_request(request, timeout, request_serializer):
+ deadline, deadline_timespec = _deadline(timeout)
+ serialized_request = _common.serialize(request, request_serializer)
+ if serialized_request is None:
+ state = _RPCState(
+ (), _EMPTY_METADATA, _EMPTY_METADATA, grpc.StatusCode.INTERNAL,
+ 'Exception serializing request!')
+ rendezvous = _Rendezvous(state, None, None, deadline)
+ return deadline, deadline_timespec, None, rendezvous
+ else:
+ return deadline, deadline_timespec, serialized_request, None
+
+
+def _end_unary_response_blocking(state, with_call, deadline):
+ if state.code is grpc.StatusCode.OK:
+ if with_call:
+ rendezvous = _Rendezvous(state, None, None, deadline)
+ return state.response, rendezvous
+ else:
+ return state.response
+ else:
+ raise _Rendezvous(state, None, None, deadline)
+
+
+class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+
+ def __init__(
+ self, channel, create_managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._create_managed_call = create_managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def _prepare(self, request, timeout, metadata):
+ deadline, deadline_timespec, serialized_request, rendezvous = (
+ _start_unary_request(request, timeout, self._request_serializer))
+ if serialized_request is None:
+ return None, None, None, None, rendezvous
+ else:
+ state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ )
+ return state, operations, deadline, deadline_timespec, None
+
+ def __call__(
+ self, request, timeout=None, metadata=None, credentials=None,
+ with_call=False):
+ state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
+ request, timeout, metadata)
+ if rendezvous:
+ raise rendezvous
+ else:
+ completion_queue = cygrpc.CompletionQueue()
+ call = self._channel.create_call(
+ None, 0, completion_queue, self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ call.start_batch(cygrpc.Operations(operations), None)
+ _handle_event(completion_queue.poll(), state, self._response_deserializer)
+ return _end_unary_response_blocking(state, with_call, deadline)
+
+ def future(self, request, timeout=None, metadata=None, credentials=None):
+ state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
+ request, timeout, metadata)
+ if rendezvous:
+ return rendezvous
+ else:
+ call = self._create_managed_call(
+ None, 0, self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call, self._response_deserializer)
+ with state.condition:
+ call.start_batch(cygrpc.Operations(operations), event_handler)
+ return _Rendezvous(state, call, self._response_deserializer, deadline)
+
+
+class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+
+ def __init__(
+ self, channel, create_managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._create_managed_call = create_managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self, request, timeout=None, metadata=None, credentials=None):
+ deadline, deadline_timespec, serialized_request, rendezvous = (
+ _start_unary_request(request, timeout, self._request_serializer))
+ if serialized_request is None:
+ raise rendezvous
+ else:
+ state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
+ call = self._create_managed_call(
+ None, 0, self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call, self._response_deserializer)
+ with state.condition:
+ call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ event_handler)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ )
+ call.start_batch(cygrpc.Operations(operations), event_handler)
+ return _Rendezvous(state, call, self._response_deserializer, deadline)
+
+
+class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+
+ def __init__(
+ self, channel, create_managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._create_managed_call = create_managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(
+ self, request_iterator, timeout=None, metadata=None, credentials=None,
+ with_call=False):
+ deadline, deadline_timespec = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ completion_queue = cygrpc.CompletionQueue()
+ call = self._channel.create_call(
+ None, 0, completion_queue, self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ with state.condition:
+ call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ None)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ )
+ call.start_batch(cygrpc.Operations(operations), None)
+ _consume_request_iterator(
+ request_iterator, state, call, self._request_serializer)
+ while True:
+ event = completion_queue.poll()
+ with state.condition:
+ _handle_event(event, state, self._response_deserializer)
+ state.condition.notify_all()
+ if not state.due:
+ break
+ return _end_unary_response_blocking(state, with_call, deadline)
+
+ def future(
+ self, request_iterator, timeout=None, metadata=None, credentials=None):
+ deadline, deadline_timespec = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ call = self._create_managed_call(
+ None, 0, self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call, self._response_deserializer)
+ with state.condition:
+ call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ event_handler)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ )
+ call.start_batch(cygrpc.Operations(operations), event_handler)
+ _consume_request_iterator(
+ request_iterator, state, call, self._request_serializer)
+ return _Rendezvous(state, call, self._response_deserializer, deadline)
+
+
+class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+
+ def __init__(
+ self, channel, create_managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._create_managed_call = create_managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(
+ self, request_iterator, timeout=None, metadata=None, credentials=None):
+ deadline, deadline_timespec = _deadline(timeout)
+ state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
+ call = self._create_managed_call(
+ None, 0, self._method, None, deadline_timespec)
+ if credentials is not None:
+ call.set_credentials(credentials._credentials)
+ event_handler = _event_handler(state, call, self._response_deserializer)
+ with state.condition:
+ call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
+ event_handler)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _common.metadata(metadata), _EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ )
+ call.start_batch(cygrpc.Operations(operations), event_handler)
+ _consume_request_iterator(
+ request_iterator, state, call, self._request_serializer)
+ return _Rendezvous(state, call, self._response_deserializer, deadline)
+
+
+class _ChannelCallState(object):
+
+ def __init__(self, channel):
+ self.lock = threading.Lock()
+ self.channel = channel
+ self.completion_queue = cygrpc.CompletionQueue()
+ self.managed_calls = None
+
+
+def _call_spin(state):
+ while True:
+ event = state.completion_queue.poll()
+ completed_call = event.tag(event)
+ if completed_call is not None:
+ with state.lock:
+ state.managed_calls.remove(completed_call)
+ if not state.managed_calls:
+ state.managed_calls = None
+ return
+
+
+def _create_channel_managed_call(state):
+ def create_channel_managed_call(parent, flags, method, host, deadline):
+ """Creates a managed cygrpc.Call.
+
+ Callers of this function must conduct at least one operation on the returned
+ call. The tags associated with operations conducted on the returned call
+ must be no-argument callables that return None to indicate that this channel
+ should continue polling for events associated with the call and return the
+ call itself to indicate that no more events associated with the call will be
+ generated.
+
+ Args:
+ parent: A cygrpc.Call to be used as the parent of the created call.
+ flags: An integer bitfield of call flags.
+ method: The RPC method.
+ host: A host string for the created call.
+ deadline: A cygrpc.Timespec to be the deadline of the created call.
+
+ Returns:
+ A cygrpc.Call with which to conduct an RPC.
+ """
+ with state.lock:
+ call = state.channel.create_call(
+ parent, flags, state.completion_queue, method, host, deadline)
+ if state.managed_calls is None:
+ state.managed_calls = set((call,))
+ spin_thread = threading.Thread(target=_call_spin, args=(state,))
+ spin_thread.start()
+ else:
+ state.managed_calls.add(call)
+ return call
+ return create_channel_managed_call
+
+
+class _ChannelConnectivityState(object):
+
+ def __init__(self, channel):
+ self.lock = threading.Lock()
+ self.channel = channel
+ self.polling = False
+ self.connectivity = None
+ self.try_to_connect = False
+ self.callbacks_and_connectivities = []
+ self.delivering = False
+
+
+def _deliveries(state):
+ callbacks_needing_update = []
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback, callback_connectivity, = callback_and_connectivity
+ if callback_connectivity is not state.connectivity:
+ callbacks_needing_update.append(callback)
+ callback_and_connectivity[1] = state.connectivity
+ return callbacks_needing_update
+
+
+def _deliver(state, initial_connectivity, initial_callbacks):
+ connectivity = initial_connectivity
+ callbacks = initial_callbacks
+ while True:
+ for callback in callbacks:
+ callable_util.call_logging_exceptions(
+ callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
+ connectivity)
+ with state.lock:
+ callbacks = _deliveries(state)
+ if callbacks:
+ connectivity = state.connectivity
+ else:
+ state.delivering = False
+ return
+
+
+def _spawn_delivery(state, callbacks):
+ delivering_thread = threading.Thread(
+ target=_deliver, args=(state, state.connectivity, callbacks,))
+ delivering_thread.start()
+ state.delivering = True
+
+
+# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
+def _poll_connectivity(state, channel, initial_try_to_connect):
+ try_to_connect = initial_try_to_connect
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ connectivity])
+ callbacks = tuple(
+ callback for callback, unused_but_known_to_be_none_connectivity
+ in state.callbacks_and_connectivities)
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback_and_connectivity[1] = state.connectivity
+ if callbacks:
+ _spawn_delivery(state, callbacks)
+ completion_queue = cygrpc.CompletionQueue()
+ while True:
+ channel.watch_connectivity_state(
+ connectivity, cygrpc.Timespec(time.time() + 0.2),
+ completion_queue, None)
+ event = completion_queue.poll()
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.try_to_connect:
+ state.polling = False
+ state.connectivity = None
+ break
+ try_to_connect = state.try_to_connect
+ state.try_to_connect = False
+ if event.success or try_to_connect:
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ connectivity])
+ if not state.delivering:
+ callbacks = _deliveries(state)
+ if callbacks:
+ _spawn_delivery(state, callbacks)
+
+
+def _subscribe(state, callback, try_to_connect):
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.polling:
+ polling_thread = threading.Thread(
+ target=_poll_connectivity,
+ args=(state, state.channel, bool(try_to_connect)))
+ polling_thread.start()
+ state.polling = True
+ state.callbacks_and_connectivities.append([callback, None])
+ elif not state.delivering and state.connectivity is not None:
+ _spawn_delivery(state, (callback,))
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append(
+ [callback, state.connectivity])
+ else:
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append([callback, None])
+
+
+def _unsubscribe(state, callback):
+ with state.lock:
+ for index, (subscribed_callback, unused_connectivity) in enumerate(
+ state.callbacks_and_connectivities):
+ if callback == subscribed_callback:
+ state.callbacks_and_connectivities.pop(index)
+ break
+
+
+def _moot(state):
+ with state.lock:
+ del state.callbacks_and_connectivities[:]
+
+
+def _options(options):
+ if options is None:
+ pairs = ((cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT),)
+ else:
+ pairs = list(options) + [
+ (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)]
+ return cygrpc.ChannelArgs(
+ cygrpc.ChannelArg(arg_name, arg_value) for arg_name, arg_value in pairs)
+
+
+class Channel(grpc.Channel):
+
+ def __init__(self, target, options, credentials):
+ self._channel = cygrpc.Channel(target, _options(options), credentials)
+ self._call_state = _ChannelCallState(self._channel)
+ self._connectivity_state = _ChannelConnectivityState(self._channel)
+
+ def subscribe(self, callback, try_to_connect=None):
+ _subscribe(self._connectivity_state, callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ _unsubscribe(self._connectivity_state, callback)
+
+ def unary_unary(
+ self, method, request_serializer=None, response_deserializer=None):
+ return _UnaryUnaryMultiCallable(
+ self._channel, _create_channel_managed_call(self._call_state), method,
+ request_serializer, response_deserializer)
+
+ def unary_stream(
+ self, method, request_serializer=None, response_deserializer=None):
+ return _UnaryStreamMultiCallable(
+ self._channel, _create_channel_managed_call(self._call_state), method,
+ request_serializer, response_deserializer)
+
+ def stream_unary(
+ self, method, request_serializer=None, response_deserializer=None):
+ return _StreamUnaryMultiCallable(
+ self._channel, _create_channel_managed_call(self._call_state), method,
+ request_serializer, response_deserializer)
+
+ def stream_stream(
+ self, method, request_serializer=None, response_deserializer=None):
+ return _StreamStreamMultiCallable(
+ self._channel, _create_channel_managed_call(self._call_state), method,
+ request_serializer, response_deserializer)
+
+ def __del__(self):
+ _moot(self._connectivity_state)
diff --git a/src/python/grpcio/grpc/_common.py b/src/python/grpcio/grpc/_common.py
new file mode 100644
index 0000000000..f351bea9e3
--- /dev/null
+++ b/src/python/grpcio/grpc/_common.py
@@ -0,0 +1,154 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Shared implementation."""
+
+import logging
+import threading
+import time
+
+import six
+
+import grpc
+from grpc._cython import cygrpc
+
+_EMPTY_METADATA = cygrpc.Metadata(())
+
+CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
+ cygrpc.ConnectivityState.idle: grpc.ChannelConnectivity.IDLE,
+ cygrpc.ConnectivityState.connecting: grpc.ChannelConnectivity.CONNECTING,
+ cygrpc.ConnectivityState.ready: grpc.ChannelConnectivity.READY,
+ cygrpc.ConnectivityState.transient_failure:
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ cygrpc.ConnectivityState.shutdown:
+ grpc.ChannelConnectivity.SHUTDOWN,
+}
+
+CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
+ cygrpc.StatusCode.ok: grpc.StatusCode.OK,
+ cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED,
+ cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN,
+ cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT,
+ cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED,
+ cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND,
+ cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS,
+ cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED,
+ cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED,
+ cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED,
+ cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION,
+ cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED,
+ cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE,
+ cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED,
+ cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL,
+ cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE,
+ cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS,
+}
+STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
+ grpc_code: cygrpc_code
+ for cygrpc_code, grpc_code in six.iteritems(
+ CYGRPC_STATUS_CODE_TO_STATUS_CODE)
+}
+
+
+def metadata(application_metadata):
+ return _EMPTY_METADATA if application_metadata is None else cygrpc.Metadata(
+ cygrpc.Metadatum(key, value) for key, value in application_metadata)
+
+
+def _transform(message, transformer, exception_message):
+ if transformer is None:
+ return message
+ else:
+ try:
+ return transformer(message)
+ except Exception: # pylint: disable=broad-except
+ logging.exception(exception_message)
+ return None
+
+
+def serialize(message, serializer):
+ return _transform(message, serializer, 'Exception serializing message!')
+
+
+def deserialize(serialized_message, deserializer):
+ return _transform(serialized_message, deserializer,
+ 'Exception deserializing message!')
+
+
+def _encode(s):
+ if isinstance(s, bytes):
+ return s
+ else:
+ return s.encode('ascii')
+
+
+def fully_qualified_method(group, method):
+ group = _encode(group)
+ method = _encode(method)
+ return b'/' + group + b'/' + method
+
+
+class CleanupThread(threading.Thread):
+ """A threading.Thread subclass supporting custom behavior on join().
+
+ On Python Interpreter exit, Python will attempt to join outstanding threads
+ prior to garbage collection. We may need to do additional cleanup, and
+ we accomplish this by overriding the join() method.
+ """
+
+ def __init__(self, behavior, group=None, target=None, name=None,
+ args=(), kwargs={}):
+ """Constructor.
+
+ Args:
+ behavior (function): Function called on join() with a single
+ argument, timeout, indicating the maximum duration of
+ `behavior`, or None indicating `behavior` has no deadline.
+ `behavior` must be idempotent.
+ group (None): should be None. Reseved for future extensions
+ when ThreadGroup is implemented.
+ target (function): The function to invoke when this thread is
+ run. Defaults to None.
+ name (str): The name of this thread. Defaults to None.
+ args (tuple[object]): A tuple of arguments to pass to `target`.
+ kwargs (dict[str,object]): A dictionary of keyword arguments to
+ pass to `target`.
+ """
+ super(CleanupThread, self).__init__(group=group, target=target,
+ name=name, args=args, kwargs=kwargs)
+ self._behavior = behavior
+
+ def join(self, timeout=None):
+ start_time = time.time()
+ self._behavior(timeout)
+ end_time = time.time()
+ if timeout is not None:
+ timeout -= end_time - start_time
+ timeout = max(timeout, 0)
+ super(CleanupThread, self).join(timeout)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
index 1bfe6344e0..a09bbc75fe 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -55,6 +55,7 @@ cdef class Call:
def cancel(
self, grpc_status_code error_code=GRPC_STATUS__DO_NOT_USE,
details=None):
+ details = str_to_bytes(details)
if not self.is_valid:
raise ValueError("invalid call object cannot be used from Python")
if (details is None) != (error_code == GRPC_STATUS__DO_NOT_USE):
@@ -63,12 +64,6 @@ cdef class Call:
cdef grpc_call_error result
cdef char *c_details = NULL
if error_code != GRPC_STATUS__DO_NOT_USE:
- if isinstance(details, bytes):
- pass
- elif isinstance(details, basestring):
- details = details.encode()
- else:
- raise TypeError("expected details to be str or bytes")
self.references.append(details)
c_details = details
with nogil:
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
index c26bc083cf..866cff0d01 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -34,18 +34,13 @@ cdef class Channel:
def __cinit__(self, target, ChannelArgs arguments=None,
ChannelCredentials channel_credentials=None):
+ target = str_to_bytes(target)
cdef grpc_channel_args *c_arguments = NULL
cdef char *c_target = NULL
self.c_channel = NULL
self.references = []
if arguments is not None:
c_arguments = &arguments.c_args
- if isinstance(target, bytes):
- pass
- elif isinstance(target, basestring):
- target = target.encode()
- else:
- raise TypeError("expected target to be str or bytes")
c_target = target
if channel_credentials is None:
with nogil:
@@ -62,25 +57,14 @@ cdef class Channel:
def create_call(self, Call parent, int flags,
CompletionQueue queue not None,
method, host, Timespec deadline not None):
+ method = str_to_bytes(method)
+ host = str_to_bytes(host)
if queue.is_shutting_down:
raise ValueError("queue must not be shutting down or shutdown")
- if isinstance(method, bytes):
- pass
- elif isinstance(method, basestring):
- method = method.encode()
- else:
- raise TypeError("expected method to be str or bytes")
cdef char *method_c_string = method
cdef char *host_c_string = NULL
- if host is None:
- pass
- elif isinstance(host, bytes):
+ if host is not None:
host_c_string = host
- elif isinstance(host, basestring):
- host = host.encode()
- host_c_string = host
- else:
- raise TypeError("expected host to be str, bytes, or None")
cdef Call operation_call = Call()
operation_call.references = [self, method, host, queue]
cdef grpc_call *parent_call = NULL
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
index a67c963684..01089c3dc0 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
@@ -31,9 +31,6 @@
cdef class CompletionQueue:
cdef grpc_completion_queue *c_completion_queue
- cdef object pluck_condition
- cdef int num_plucking
- cdef int num_polling
cdef bint is_shutting_down
cdef bint is_shutdown
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index cdae39d519..90266516fe 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -32,6 +32,8 @@ cimport cpython
import threading
import time
+cdef int _INTERRUPT_CHECK_PERIOD_MS = 200
+
cdef class CompletionQueue:
@@ -40,9 +42,6 @@ cdef class CompletionQueue:
self.c_completion_queue = grpc_completion_queue_create(NULL)
self.is_shutting_down = False
self.is_shutdown = False
- self.pluck_condition = threading.Condition()
- self.num_plucking = 0
- self.num_polling = 0
cdef _interpret_event(self, grpc_event event):
cdef OperationTag tag = None
@@ -83,45 +82,27 @@ cdef class CompletionQueue:
def poll(self, Timespec deadline=None):
# We name this 'poll' to avoid problems with CPython's expectations for
# 'special' methods (like next and __next__).
+ cdef gpr_timespec c_increment
+ cdef gpr_timespec c_timeout
cdef gpr_timespec c_deadline
with nogil:
+ c_increment = gpr_time_from_millis(_INTERRUPT_CHECK_PERIOD_MS, GPR_TIMESPAN)
c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
- if deadline is not None:
- c_deadline = deadline.c_time
- cdef grpc_event event
-
- # Poll within a critical section to detect contention
- with self.pluck_condition:
- assert self.num_plucking == 0, 'cannot simultaneously pluck and poll'
- self.num_polling += 1
- with nogil:
- event = grpc_completion_queue_next(
- self.c_completion_queue, c_deadline, NULL)
- with self.pluck_condition:
- self.num_polling -= 1
- return self._interpret_event(event)
-
- def pluck(self, OperationTag tag, Timespec deadline=None):
- # Plucking a 'None' tag is equivalent to passing control to GRPC core until
- # the deadline.
- cdef gpr_timespec c_deadline = gpr_inf_future(
- GPR_CLOCK_REALTIME)
- if deadline is not None:
- c_deadline = deadline.c_time
- cdef grpc_event event
-
- # Pluck within a critical section to detect contention
- with self.pluck_condition:
- assert self.num_polling == 0, 'cannot simultaneously pluck and poll'
- assert self.num_plucking < GRPC_MAX_COMPLETION_QUEUE_PLUCKERS, (
- 'cannot pluck more than {} times simultaneously'.format(
- GRPC_MAX_COMPLETION_QUEUE_PLUCKERS))
- self.num_plucking += 1
- with nogil:
- event = grpc_completion_queue_pluck(
- self.c_completion_queue, <cpython.PyObject *>tag, c_deadline, NULL)
- with self.pluck_condition:
- self.num_plucking -= 1
+ if deadline is not None:
+ c_deadline = deadline.c_time
+
+ while True:
+ c_timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c_increment)
+ if gpr_time_cmp(c_timeout, c_deadline) > 0:
+ c_timeout = c_deadline
+ event = grpc_completion_queue_next(
+ self.c_completion_queue, c_timeout, NULL)
+ if event.type != GRPC_QUEUE_TIMEOUT or gpr_time_cmp(c_timeout, c_deadline) == 0:
+ break;
+
+ # Handle any signals
+ with gil:
+ cpython.PyErr_CheckSignals()
return self._interpret_event(event)
def shutdown(self):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
index c793c8f5e5..d377a67520 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
@@ -54,7 +54,7 @@ cdef class ServerCredentials:
cdef class CredentialsMetadataPlugin:
cdef object plugin_callback
- cdef str plugin_name
+ cdef bytes plugin_name
cdef grpc_metadata_credentials_plugin make_c_plugin(self)
@@ -68,4 +68,4 @@ cdef void plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil
-cdef void plugin_destroy_c_plugin_state(void *state)
+cdef void plugin_destroy_c_plugin_state(void *state) with gil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index 94d13b5999..470382d609 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -82,7 +82,7 @@ cdef class ServerCredentials:
cdef class CredentialsMetadataPlugin:
- def __cinit__(self, object plugin_callback, str name):
+ def __cinit__(self, object plugin_callback, name):
"""
Args:
plugin_callback (callable): Callback accepting a service URL (str/bytes)
@@ -93,6 +93,7 @@ cdef class CredentialsMetadataPlugin:
successful).
name (str): Plugin name.
"""
+ name = str_to_bytes(name)
if not callable(plugin_callback):
raise ValueError('expected callable plugin_callback')
self.plugin_callback = plugin_callback
@@ -129,7 +130,8 @@ cdef void plugin_get_metadata(
grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil:
def python_callback(
Metadata metadata, grpc_status_code status,
- const char *error_details):
+ error_details):
+ error_details = str_to_bytes(error_details)
cb(user_data, metadata.c_metadata_array.metadata,
metadata.c_metadata_array.count, status, error_details)
cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state
@@ -137,7 +139,7 @@ cdef void plugin_get_metadata(
cy_context.context = context
self.plugin_callback(cy_context, python_callback)
-cdef void plugin_destroy_c_plugin_state(void *state):
+cdef void plugin_destroy_c_plugin_state(void *state) with gil:
cpython.Py_DECREF(<CredentialsMetadataPlugin>state)
def channel_credentials_google_default():
@@ -148,14 +150,7 @@ def channel_credentials_google_default():
def channel_credentials_ssl(pem_root_certificates,
SslPemKeyCertPair ssl_pem_key_cert_pair):
- if pem_root_certificates is None:
- pass
- elif isinstance(pem_root_certificates, bytes):
- pass
- elif isinstance(pem_root_certificates, basestring):
- pem_root_certificates = pem_root_certificates.encode()
- else:
- raise TypeError("expected str or bytes for pem_root_certificates")
+ pem_root_certificates = str_to_bytes(pem_root_certificates)
cdef ChannelCredentials credentials = ChannelCredentials()
cdef const char *c_pem_root_certificates = NULL
if pem_root_certificates is not None:
@@ -207,12 +202,7 @@ def call_credentials_google_compute_engine():
def call_credentials_service_account_jwt_access(
json_key, Timespec token_lifetime not None):
- if isinstance(json_key, bytes):
- pass
- elif isinstance(json_key, basestring):
- json_key = json_key.encode()
- else:
- raise TypeError("expected json_key to be str or bytes")
+ json_key = str_to_bytes(json_key)
cdef CallCredentials credentials = CallCredentials()
cdef char *json_key_c_string = json_key
with nogil:
@@ -223,12 +213,7 @@ def call_credentials_service_account_jwt_access(
return credentials
def call_credentials_google_refresh_token(json_refresh_token):
- if isinstance(json_refresh_token, bytes):
- pass
- elif isinstance(json_refresh_token, basestring):
- json_refresh_token = json_refresh_token.encode()
- else:
- raise TypeError("expected json_refresh_token to be str or bytes")
+ json_refresh_token = str_to_bytes(json_refresh_token)
cdef CallCredentials credentials = CallCredentials()
cdef char *json_refresh_token_c_string = json_refresh_token
with nogil:
@@ -238,18 +223,8 @@ def call_credentials_google_refresh_token(json_refresh_token):
return credentials
def call_credentials_google_iam(authorization_token, authority_selector):
- if isinstance(authorization_token, bytes):
- pass
- elif isinstance(authorization_token, basestring):
- authorization_token = authorization_token.encode()
- else:
- raise TypeError("expected authorization_token to be str or bytes")
- if isinstance(authority_selector, bytes):
- pass
- elif isinstance(authority_selector, basestring):
- authority_selector = authority_selector.encode()
- else:
- raise TypeError("expected authority_selector to be str or bytes")
+ authorization_token = str_to_bytes(authorization_token)
+ authority_selector = str_to_bytes(authority_selector)
cdef CallCredentials credentials = CallCredentials()
cdef char *authorization_token_c_string = authorization_token
cdef char *authority_selector_c_string = authority_selector
@@ -272,16 +247,10 @@ def call_credentials_metadata_plugin(CredentialsMetadataPlugin plugin):
def server_credentials_ssl(pem_root_certs, pem_key_cert_pairs,
bint force_client_auth):
+ pem_root_certs = str_to_bytes(pem_root_certs)
cdef char *c_pem_root_certs = NULL
- if pem_root_certs is None:
- pass
- elif isinstance(pem_root_certs, bytes):
- c_pem_root_certs = pem_root_certs
- elif isinstance(pem_root_certs, basestring):
- pem_root_certs = pem_root_certs.encode()
+ if pem_root_certs is not None:
c_pem_root_certs = pem_root_certs
- else:
- raise TypeError("expected pem_root_certs to be str or bytes")
pem_key_cert_pairs = list(pem_key_cert_pairs)
for pair in pem_key_cert_pairs:
if not isinstance(pair, SslPemKeyCertPair):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 66e6e6b549..168b9751aa 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -80,6 +80,12 @@ cdef extern from "grpc/_cython/loader.h":
gpr_timespec gpr_convert_clock_type(gpr_timespec t,
gpr_clock_type target_clock) nogil
+ gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) nogil
+
+ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) nogil
+
+ int gpr_time_cmp(gpr_timespec a, gpr_timespec b) nogil
+
ctypedef enum grpc_status_code:
GRPC_STATUS_OK
GRPC_STATUS_CANCELLED
@@ -208,7 +214,7 @@ cdef extern from "grpc/_cython/loader.h":
GRPC_CHANNEL_CONNECTING
GRPC_CHANNEL_READY
GRPC_CHANNEL_TRANSIENT_FAILURE
- GRPC_CHANNEL_FATAL_FAILURE
+ GRPC_CHANNEL_SHUTDOWN
ctypedef struct grpc_metadata:
const char *key
@@ -336,6 +342,8 @@ cdef extern from "grpc/_cython/loader.h":
void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq,
void *reserved) nogil
+ void grpc_server_register_non_listening_completion_queue(
+ grpc_server *server, grpc_completion_queue *cq, void *reserved) nogil
int grpc_server_add_insecure_http2_port(
grpc_server *server, const char *addr) nogil
void grpc_server_start(grpc_server *server) nogil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
new file mode 100644
index 0000000000..274f038e0a
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
@@ -0,0 +1,39 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# This function will ascii encode unicode string inputs if neccesary.
+# In Python3, unicode strings are the default str type.
+cdef bytes str_to_bytes(object s):
+ if s is None or isinstance(s, bytes):
+ return s
+ elif isinstance(s, unicode):
+ return s.encode('ascii')
+ else:
+ raise TypeError('Expected bytes, str, or unicode, not {}'.format(type(s)))
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index c7539f0d49..0055d0d3a2 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -33,7 +33,7 @@ class ConnectivityState:
connecting = GRPC_CHANNEL_CONNECTING
ready = GRPC_CHANNEL_READY
transient_failure = GRPC_CHANNEL_TRANSIENT_FAILURE
- fatal_failure = GRPC_CHANNEL_FATAL_FAILURE
+ shutdown = GRPC_CHANNEL_SHUTDOWN
class ChannelArgKey:
@@ -235,18 +235,13 @@ cdef class ByteBuffer:
if data is None:
self.c_byte_buffer = NULL
return
- if isinstance(data, bytes):
- pass
- elif isinstance(data, basestring):
- data = data.encode()
- elif isinstance(data, ByteBuffer):
+ if isinstance(data, ByteBuffer):
data = (<ByteBuffer>data).bytes()
if data is None:
self.c_byte_buffer = NULL
return
else:
- raise TypeError("expected value to be of type str, bytes, or "
- "ByteBuffer, not {}".format(type(data)))
+ data = str_to_bytes(data)
cdef char *c_data = data
cdef gpr_slice data_slice
@@ -274,6 +269,7 @@ cdef class ByteBuffer:
data_slice_length = gpr_slice_length(data_slice)
with gil:
result += (<char *>data_slice_pointer)[:data_slice_length]
+ gpr_slice_unref(data_slice)
with nogil:
grpc_byte_buffer_reader_destroy(&reader)
return bytes(result)
@@ -301,19 +297,8 @@ cdef class ByteBuffer:
cdef class SslPemKeyCertPair:
def __cinit__(self, private_key, certificate_chain):
- if isinstance(private_key, bytes):
- self.private_key = private_key
- elif isinstance(private_key, basestring):
- self.private_key = private_key.encode()
- else:
- raise TypeError("expected private_key to be of type str or bytes")
- if isinstance(certificate_chain, bytes):
- self.certificate_chain = certificate_chain
- elif isinstance(certificate_chain, basestring):
- self.certificate_chain = certificate_chain.encode()
- else:
- raise TypeError("expected certificate_chain to be of type str or bytes "
- "or int")
+ self.private_key = str_to_bytes(private_key)
+ self.certificate_chain = str_to_bytes(certificate_chain)
self.c_pair.private_key = self.private_key
self.c_pair.certificate_chain = self.certificate_chain
@@ -321,27 +306,16 @@ cdef class SslPemKeyCertPair:
cdef class ChannelArg:
def __cinit__(self, key, value):
- if isinstance(key, bytes):
- self.key = key
- elif isinstance(key, basestring):
- self.key = key.encode()
- else:
- raise TypeError("expected key to be of type str or bytes")
- if isinstance(value, bytes):
- self.value = value
- self.c_arg.type = GRPC_ARG_STRING
- self.c_arg.value.string = self.value
- elif isinstance(value, basestring):
- self.value = value.encode()
- self.c_arg.type = GRPC_ARG_STRING
- self.c_arg.value.string = self.value
- elif isinstance(value, int):
+ self.key = str_to_bytes(key)
+ self.c_arg.key = self.key
+ if isinstance(value, int):
self.value = int(value)
self.c_arg.type = GRPC_ARG_INTEGER
self.c_arg.value.integer = self.value
else:
- raise TypeError("expected value to be of type str or bytes or int")
- self.c_arg.key = self.key
+ self.value = str_to_bytes(value)
+ self.c_arg.type = GRPC_ARG_STRING
+ self.c_arg.value.string = self.value
cdef class ChannelArgs:
@@ -374,18 +348,8 @@ cdef class ChannelArgs:
cdef class Metadatum:
def __cinit__(self, key, value):
- if isinstance(key, bytes):
- self._key = key
- elif isinstance(key, basestring):
- self._key = key.encode()
- else:
- raise TypeError("expected key to be of type str or bytes")
- if isinstance(value, bytes):
- self._value = value
- elif isinstance(value, basestring):
- self._value = value.encode()
- else:
- raise TypeError("expected value to be of type str or bytes")
+ self._key = str_to_bytes(key)
+ self._value = str_to_bytes(value)
self.c_metadata.key = self._key
self.c_metadata.value = self._value
self.c_metadata.value_length = len(self._value)
@@ -600,12 +564,7 @@ def operation_send_close_from_client(int flags):
def operation_send_status_from_server(
Metadata metadata, grpc_status_code code, details, int flags):
- if isinstance(details, bytes):
- pass
- elif isinstance(details, basestring):
- details = details.encode()
- else:
- raise TypeError("expected a str or bytes object for details")
+ details = str_to_bytes(details)
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
op.c_op.flags = flags
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index 8419a59068..42afeb8498 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -81,25 +81,29 @@ cdef class Server:
self.c_server, queue.c_completion_queue, NULL)
self.registered_completion_queues.append(queue)
+ def register_non_listening_completion_queue(
+ self, CompletionQueue queue not None):
+ if self.is_started:
+ raise ValueError("cannot register completion queues after start")
+ with nogil:
+ grpc_server_register_non_listening_completion_queue(
+ self.c_server, queue.c_completion_queue, NULL)
+ self.registered_completion_queues.append(queue)
+
def start(self):
if self.is_started:
raise ValueError("the server has already started")
self.backup_shutdown_queue = CompletionQueue()
- self.register_completion_queue(self.backup_shutdown_queue)
+ self.register_non_listening_completion_queue(self.backup_shutdown_queue)
self.is_started = True
with nogil:
grpc_server_start(self.c_server)
# Ensure the core has gotten a chance to do the start-up work
- self.backup_shutdown_queue.pluck(None, Timespec(None))
+ self.backup_shutdown_queue.poll(Timespec(None))
def add_http2_port(self, address,
ServerCredentials server_credentials=None):
- if isinstance(address, bytes):
- pass
- elif isinstance(address, basestring):
- address = address.encode()
- else:
- raise TypeError("expected address to be a str or bytes")
+ address = str_to_bytes(address)
self.references.append(address)
cdef int result
cdef char *address_c_string = address
@@ -169,4 +173,3 @@ cdef class Server:
time.sleep(0)
with nogil:
grpc_server_destroy(self.c_server)
-
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx
index 8823ea5ef5..cf146f5a04 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -35,6 +35,7 @@ import sys
# TODO(atash): figure out why the coverage tool gets confused about the Cython
# coverage plugin when the following files don't have a '.pxi' suffix.
+include "grpc/_cython/_cygrpc/grpc_string.pyx.pxi"
include "grpc/_cython/_cygrpc/call.pyx.pxi"
include "grpc/_cython/_cygrpc/channel.pyx.pxi"
include "grpc/_cython/_cygrpc/credentials.pyx.pxi"
diff --git a/src/python/grpcio/grpc/_cython/imports.generated.c b/src/python/grpcio/grpc/_cython/imports.generated.c
index 3cf4cb491a..8437e74ba0 100644
--- a/src/python/grpcio/grpc/_cython/imports.generated.c
+++ b/src/python/grpcio/grpc/_cython/imports.generated.c
@@ -35,7 +35,7 @@
#include "imports.generated.h"
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
census_initialize_type census_initialize_import;
census_shutdown_type census_shutdown_import;
@@ -126,7 +126,8 @@ grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
grpc_is_binary_header_type grpc_is_binary_header_import;
grpc_call_error_to_string_type grpc_call_error_to_string_import;
-grpc_cronet_secure_channel_create_type grpc_cronet_secure_channel_create_import;
+grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
+grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import;
grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import;
@@ -400,7 +401,8 @@ void pygrpc_load_imports(HMODULE library) {
grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal");
grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header");
grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
- grpc_cronet_secure_channel_create_import = (grpc_cronet_secure_channel_create_type) GetProcAddress(library, "grpc_cronet_secure_channel_create");
+ grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
+ grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next");
grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator");
grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity");
@@ -585,4 +587,4 @@ void pygrpc_load_imports(HMODULE library) {
}
#endif /* __cpluslus */
-#endif /* !GPR_WIN32 */
+#endif /* !GPR_WINDOWS */
diff --git a/src/python/grpcio/grpc/_cython/imports.generated.h b/src/python/grpcio/grpc/_cython/imports.generated.h
index 8a3e4bf86e..d52e8591b3 100644
--- a/src/python/grpcio/grpc/_cython/imports.generated.h
+++ b/src/python/grpcio/grpc/_cython/imports.generated.h
@@ -36,14 +36,14 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include <windows.h>
#include <grpc/census.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
-#include <grpc/grpc_cronet.h>
+#include <grpc/grpc_posix.h>
#include <grpc/grpc_security.h>
#include <grpc/impl/codegen/alloc.h>
#include <grpc/impl/codegen/byte_buffer.h>
@@ -57,7 +57,7 @@
#include <grpc/support/cpu.h>
#include <grpc/support/histogram.h>
#include <grpc/support/host_port.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include <grpc/support/subprocess.h>
#include <grpc/support/thd.h>
@@ -329,9 +329,12 @@ extern grpc_is_binary_header_type grpc_is_binary_header_import;
typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error);
extern grpc_call_error_to_string_type grpc_call_error_to_string_import;
#define grpc_call_error_to_string grpc_call_error_to_string_import
-typedef grpc_channel *(*grpc_cronet_secure_channel_create_type)(void *engine, const char *target, const grpc_channel_args *args, void *reserved);
-extern grpc_cronet_secure_channel_create_type grpc_cronet_secure_channel_create_import;
-#define grpc_cronet_secure_channel_create grpc_cronet_secure_channel_create_import
+typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args);
+extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
+#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import
+typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd);
+extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
+#define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import
typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it);
extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
#define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import
@@ -479,7 +482,7 @@ extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_impo
typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader);
extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import
-typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...);
+typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(4, 5);
extern gpr_log_type gpr_log_import;
#define gpr_log gpr_log_import
typedef void(*gpr_log_message_type)(const char *file, int line, gpr_log_severity severity, const char *message);
@@ -824,7 +827,7 @@ extern gpr_format_message_type gpr_format_message_import;
typedef char *(*gpr_strdup_type)(const char *src);
extern gpr_strdup_type gpr_strdup_import;
#define gpr_strdup gpr_strdup_import
-typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...);
+typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(2, 3);
extern gpr_asprintf_type gpr_asprintf_import;
#define gpr_asprintf gpr_asprintf_import
typedef const char *(*gpr_subprocess_binary_extension_type)();
@@ -877,7 +880,7 @@ void pygrpc_load_imports(HMODULE library);
}
#endif /* __cpluslus */
-#else /* !GPR_WIN32 */
+#else /* !GPR_WINDOWS */
#include <grpc/byte_buffer.h>
#include <grpc/byte_buffer_reader.h>
@@ -889,6 +892,6 @@ void pygrpc_load_imports(HMODULE library);
#include <grpc/support/time.h>
#include <grpc/status.h>
-#endif /* !GPR_WIN32 */
+#endif /* !GPR_WINDOWS */
#endif
diff --git a/src/python/grpcio/grpc/_cython/loader.c b/src/python/grpcio/grpc/_cython/loader.c
index 3b72806ea1..b909ad594e 100644
--- a/src/python/grpcio/grpc/_cython/loader.c
+++ b/src/python/grpcio/grpc/_cython/loader.c
@@ -37,7 +37,7 @@
extern "C" {
#endif /* __cpluslus */
-#if GPR_WIN32
+#if GPR_WINDOWS
int pygrpc_load_core(char *path) {
HMODULE grpc_c;
@@ -60,7 +60,7 @@ int pygrpc_load_core(char *path) {
int pygrpc_load_core(char *path) { return 1; }
-#endif /* !GPR_WIN32 */
+#endif /* !GPR_WINDOWS */
#ifdef __cplusplus
}
diff --git a/src/python/grpcio/grpc/_links/service.py b/src/python/grpcio/grpc/_links/service.py
index 11310e2240..5fc4994ca0 100644
--- a/src/python/grpcio/grpc/_links/service.py
+++ b/src/python/grpcio/grpc/_links/service.py
@@ -33,6 +33,7 @@ import abc
import enum
import logging
import threading
+import six
import time
from grpc._adapter import _intermediary_low
@@ -177,7 +178,10 @@ class _Kernel(object):
call = service_acceptance.call
call.accept(self._completion_queue, call)
try:
- group, method = service_acceptance.method.split(b'/')[1:3]
+ service_method = service_acceptance.method
+ if six.PY3:
+ service_method = service_method.decode('latin1')
+ group, method = service_method.split('/')[1:3]
except ValueError:
logging.info('Illegal path "%s"!', service_acceptance.method)
return
diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py
new file mode 100644
index 0000000000..4e9cfe710c
--- /dev/null
+++ b/src/python/grpcio/grpc/_plugin_wrapping.py
@@ -0,0 +1,123 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import collections
+import threading
+
+import grpc
+from grpc._cython import cygrpc
+
+
+class AuthMetadataContext(
+ collections.namedtuple(
+ 'AuthMetadataContext', ('service_url', 'method_name',)),
+ grpc.AuthMetadataContext):
+ pass
+
+
+class AuthMetadataPluginCallback(grpc.AuthMetadataContext):
+
+ def __init__(self, callback):
+ self._callback = callback
+
+ def __call__(self, metadata, error):
+ self._callback(metadata, error)
+
+
+class _WrappedCygrpcCallback(object):
+
+ def __init__(self, cygrpc_callback):
+ self.is_called = False
+ self.error = None
+ self.is_called_lock = threading.Lock()
+ self.cygrpc_callback = cygrpc_callback
+
+ def _invoke_failure(self, error):
+ # TODO(atash) translate different Exception superclasses into different
+ # status codes.
+ self.cygrpc_callback(
+ cygrpc.Metadata([]), cygrpc.StatusCode.internal, error.message)
+
+ def _invoke_success(self, metadata):
+ try:
+ cygrpc_metadata = cygrpc.Metadata(
+ cygrpc.Metadatum(key, value)
+ for key, value in metadata)
+ except Exception as error:
+ self._invoke_failure(error)
+ return
+ self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, '')
+
+ def __call__(self, metadata, error):
+ with self.is_called_lock:
+ if self.is_called:
+ raise RuntimeError('callback should only ever be invoked once')
+ if self.error:
+ self._invoke_failure(self.error)
+ return
+ self.is_called = True
+ if error is None:
+ self._invoke_success(metadata)
+ else:
+ self._invoke_failure(error)
+
+ def notify_failure(self, error):
+ with self.is_called_lock:
+ if not self.is_called:
+ self.error = error
+
+
+class _WrappedPlugin(object):
+
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def __call__(self, context, cygrpc_callback):
+ wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback)
+ wrapped_context = AuthMetadataContext(
+ context.service_url, context.method_name)
+ try:
+ self.plugin(
+ wrapped_context, AuthMetadataPluginCallback(wrapped_cygrpc_callback))
+ except Exception as error:
+ wrapped_cygrpc_callback.notify_failure(error)
+ raise
+
+
+def call_credentials_metadata_plugin(plugin, name):
+ """
+ Args:
+ plugin: A callable accepting a grpc.AuthMetadataContext
+ object and a callback (itself accepting a list of metadata key/value
+ 2-tuples and a None-able exception value). The callback must be eventually
+ called, but need not be called in plugin's invocation.
+ plugin's invocation must be non-blocking.
+ """
+ return cygrpc.call_credentials_metadata_plugin(
+ cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), name))
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
new file mode 100644
index 0000000000..bf20f15f72
--- /dev/null
+++ b/src/python/grpcio/grpc/_server.py
@@ -0,0 +1,755 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Service-side implementation of gRPC Python."""
+
+import collections
+import enum
+import logging
+import threading
+import time
+
+import grpc
+from grpc import _common
+from grpc._cython import cygrpc
+from grpc.framework.foundation import callable_util
+
+_SHUTDOWN_TAG = 'shutdown'
+_REQUEST_CALL_TAG = 'request_call'
+
+_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
+_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
+_RECEIVE_MESSAGE_TOKEN = 'receive_message'
+_SEND_MESSAGE_TOKEN = 'send_message'
+_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
+ 'send_initial_metadata * send_message')
+_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
+_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
+ 'send_initial_metadata * send_status_from_server')
+
+_OPEN = 'open'
+_CLOSED = 'closed'
+_CANCELLED = 'cancelled'
+
+_EMPTY_FLAGS = 0
+_EMPTY_METADATA = cygrpc.Metadata(())
+
+_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
+
+
+def _serialized_request(request_event):
+ return request_event.batch_operations[0].received_message.bytes()
+
+
+def _application_code(code):
+ cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
+ return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
+
+
+def _completion_code(state):
+ if state.code is None:
+ return cygrpc.StatusCode.ok
+ else:
+ return _application_code(state.code)
+
+
+def _abortion_code(state, code):
+ if state.code is None:
+ return code
+ else:
+ return _application_code(state.code)
+
+
+def _details(state):
+ return '' if state.details is None else state.details
+
+
+class _HandlerCallDetails(
+ collections.namedtuple(
+ '_HandlerCallDetails', ('method', 'invocation_metadata',)),
+ grpc.HandlerCallDetails):
+ pass
+
+
+class _RPCState(object):
+
+ def __init__(self):
+ self.condition = threading.Condition()
+ self.due = set()
+ self.request = None
+ self.client = _OPEN
+ self.initial_metadata_allowed = True
+ self.disable_next_compression = False
+ self.trailing_metadata = None
+ self.code = None
+ self.details = None
+ self.statused = False
+ self.rpc_errors = []
+ self.callbacks = []
+
+
+def _raise_rpc_error(state):
+ rpc_error = grpc.RpcError()
+ state.rpc_errors.append(rpc_error)
+ raise rpc_error
+
+
+def _possibly_finish_call(state, token):
+ state.due.remove(token)
+ if (state.client is _CANCELLED or state.statused) and not state.due:
+ callbacks = state.callbacks
+ state.callbacks = None
+ return state, callbacks
+ else:
+ return None, ()
+
+
+def _send_status_from_server(state, token):
+ def send_status_from_server(unused_send_status_from_server_event):
+ with state.condition:
+ return _possibly_finish_call(state, token)
+ return send_status_from_server
+
+
+def _abort(state, call, code, details):
+ if state.client is not _CANCELLED:
+ effective_code = _abortion_code(state, code)
+ effective_details = details if state.details is None else state.details
+ if state.initial_metadata_allowed:
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _common.metadata(state.trailing_metadata), effective_code,
+ effective_details, _EMPTY_FLAGS),
+ )
+ token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
+ else:
+ operations = (
+ cygrpc.operation_send_status_from_server(
+ _common.metadata(state.trailing_metadata), effective_code,
+ effective_details, _EMPTY_FLAGS),
+ )
+ token = _SEND_STATUS_FROM_SERVER_TOKEN
+ call.start_batch(
+ cygrpc.Operations(operations),
+ _send_status_from_server(state, token))
+ state.statused = True
+ state.due.add(token)
+
+
+def _receive_close_on_server(state):
+ def receive_close_on_server(receive_close_on_server_event):
+ with state.condition:
+ if receive_close_on_server_event.batch_operations[0].received_cancelled:
+ state.client = _CANCELLED
+ elif state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
+ return receive_close_on_server
+
+
+def _receive_message(state, call, request_deserializer):
+ def receive_message(receive_message_event):
+ serialized_request = _serialized_request(receive_message_event)
+ if serialized_request is None:
+ with state.condition:
+ if state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+ else:
+ request = _common.deserialize(serialized_request, request_deserializer)
+ with state.condition:
+ if request is None:
+ _abort(
+ state, call, cygrpc.StatusCode.internal,
+ 'Exception deserializing request!')
+ else:
+ state.request = request
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+ return receive_message
+
+
+def _send_initial_metadata(state):
+ def send_initial_metadata(unused_send_initial_metadata_event):
+ with state.condition:
+ return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
+ return send_initial_metadata
+
+
+def _send_message(state, token):
+ def send_message(unused_send_message_event):
+ with state.condition:
+ state.condition.notify_all()
+ return _possibly_finish_call(state, token)
+ return send_message
+
+
+class _Context(grpc.ServicerContext):
+
+ def __init__(self, rpc_event, state, request_deserializer):
+ self._rpc_event = rpc_event
+ self._state = state
+ self._request_deserializer = request_deserializer
+
+ def is_active(self):
+ with self._state.condition:
+ return self._state.client is not _CANCELLED and not self._state.statused
+
+ def time_remaining(self):
+ return max(self._rpc_event.request_call_details.deadline - time.time(), 0)
+
+ def cancel(self):
+ self._rpc_event.operation_call.cancel()
+
+ def add_callback(self, callback):
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
+
+ def disable_next_message_compression(self):
+ with self._state.condition:
+ self._state.disable_next_compression = True
+
+ def invocation_metadata(self):
+ return self._rpc_event.request_metadata
+
+ def peer(self):
+ return self._rpc_event.operation_call.peer()
+
+ def send_initial_metadata(self, initial_metadata):
+ with self._state.condition:
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ else:
+ if self._state.initial_metadata_allowed:
+ operation = cygrpc.operation_send_initial_metadata(
+ _common.metadata(initial_metadata), _EMPTY_FLAGS)
+ self._rpc_event.operation_call.start_batch(
+ cygrpc.Operations((operation,)),
+ _send_initial_metadata(self._state))
+ self._state.initial_metadata_allowed = False
+ self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
+ else:
+ raise ValueError('Initial metadata no longer allowed!')
+
+ def set_trailing_metadata(self, trailing_metadata):
+ with self._state.condition:
+ self._state.trailing_metadata = trailing_metadata
+
+ def set_code(self, code):
+ with self._state.condition:
+ self._state.code = code
+
+ def set_details(self, details):
+ with self._state.condition:
+ self._state.details = details
+
+
+class _RequestIterator(object):
+
+ def __init__(self, state, call, request_deserializer):
+ self._state = state
+ self._call = call
+ self._request_deserializer = request_deserializer
+
+ def _raise_or_start_receive_message(self):
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif self._state.client is _CLOSED or self._state.statused:
+ raise StopIteration()
+ else:
+ self._call.start_batch(
+ cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ _receive_message(self._state, self._call, self._request_deserializer))
+ self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
+
+ def _look_for_request(self):
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif (self._state.request is None and
+ _RECEIVE_MESSAGE_TOKEN not in self._state.due):
+ raise StopIteration()
+ else:
+ request = self._state.request
+ self._state.request = None
+ return request
+
+ def _next(self):
+ with self._state.condition:
+ self._raise_or_start_receive_message()
+ while True:
+ self._state.condition.wait()
+ request = self._look_for_request()
+ if request is not None:
+ return request
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+
+def _unary_request(rpc_event, state, request_deserializer):
+ def unary_request():
+ with state.condition:
+ if state.client is _CANCELLED or state.statused:
+ return None
+ else:
+ start_batch_result = rpc_event.operation_call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ _receive_message(
+ state, rpc_event.operation_call, request_deserializer))
+ state.due.add(_RECEIVE_MESSAGE_TOKEN)
+ while True:
+ state.condition.wait()
+ if state.request is None:
+ if state.client is _CLOSED:
+ details = '"{}" requires exactly one request message.'.format(
+ rpc_event.request_call_details.method)
+ _abort(
+ state, rpc_event.operation_call,
+ cygrpc.StatusCode.unimplemented, details)
+ return None
+ elif state.client is _CANCELLED:
+ return None
+ else:
+ request = state.request
+ state.request = None
+ return request
+ return unary_request
+
+
+def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
+ context = _Context(rpc_event, state, request_deserializer)
+ try:
+ return behavior(argument, context), True
+ except Exception as e: # pylint: disable=broad-except
+ with state.condition:
+ if e not in state.rpc_errors:
+ details = 'Exception calling application: {}'.format(e)
+ logging.exception(details)
+ _abort(
+ state, rpc_event.operation_call, cygrpc.StatusCode.unknown, details)
+ return None, False
+
+
+def _take_response_from_response_iterator(rpc_event, state, response_iterator):
+ try:
+ return next(response_iterator), True
+ except StopIteration:
+ return None, True
+ except Exception as e: # pylint: disable=broad-except
+ with state.condition:
+ if e not in state.rpc_errors:
+ details = 'Exception iterating responses: {}'.format(e)
+ logging.exception(details)
+ _abort(
+ state, rpc_event.operation_call, cygrpc.StatusCode.unknown, details)
+ return None, False
+
+
+def _serialize_response(rpc_event, state, response, response_serializer):
+ serialized_response = _common.serialize(response, response_serializer)
+ if serialized_response is None:
+ with state.condition:
+ _abort(
+ state, rpc_event.operation_call, cygrpc.StatusCode.internal,
+ 'Failed to serialize response!')
+ return None
+ else:
+ return serialized_response
+
+
+def _send_response(rpc_event, state, serialized_response):
+ with state.condition:
+ if state.client is _CANCELLED or state.statused:
+ return False
+ else:
+ if state.initial_metadata_allowed:
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
+ )
+ state.initial_metadata_allowed = False
+ token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
+ else:
+ operations = (
+ cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
+ )
+ token = _SEND_MESSAGE_TOKEN
+ rpc_event.operation_call.start_batch(
+ cygrpc.Operations(operations), _send_message(state, token))
+ state.due.add(token)
+ while True:
+ state.condition.wait()
+ if token not in state.due:
+ return state.client is not _CANCELLED and not state.statused
+
+
+def _status(rpc_event, state, serialized_response):
+ with state.condition:
+ if state.client is not _CANCELLED:
+ trailing_metadata = _common.metadata(state.trailing_metadata)
+ code = _completion_code(state)
+ details = _details(state)
+ operations = [
+ cygrpc.operation_send_status_from_server(
+ trailing_metadata, code, details, _EMPTY_FLAGS),
+ ]
+ if state.initial_metadata_allowed:
+ operations.append(
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS))
+ if serialized_response is not None:
+ operations.append(cygrpc.operation_send_message(
+ serialized_response, _EMPTY_FLAGS))
+ rpc_event.operation_call.start_batch(
+ cygrpc.Operations(operations),
+ _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
+ state.statused = True
+ state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
+
+
+def _unary_response_in_pool(
+ rpc_event, state, behavior, argument_thunk, request_deserializer,
+ response_serializer):
+ argument = argument_thunk()
+ if argument is not None:
+ response, proceed = _call_behavior(
+ rpc_event, state, behavior, argument, request_deserializer)
+ if proceed:
+ serialized_response = _serialize_response(
+ rpc_event, state, response, response_serializer)
+ if serialized_response is not None:
+ _status(rpc_event, state, serialized_response)
+ return
+
+
+def _stream_response_in_pool(
+ rpc_event, state, behavior, argument_thunk, request_deserializer,
+ response_serializer):
+ argument = argument_thunk()
+ if argument is not None:
+ response_iterator, proceed = _call_behavior(
+ rpc_event, state, behavior, argument, request_deserializer)
+ if proceed:
+ while True:
+ response, proceed = _take_response_from_response_iterator(
+ rpc_event, state, response_iterator)
+ if proceed:
+ if response is None:
+ _status(rpc_event, state, None)
+ break
+ else:
+ serialized_response = _serialize_response(
+ rpc_event, state, response, response_serializer)
+ if serialized_response is not None:
+ proceed = _send_response(rpc_event, state, serialized_response)
+ if not proceed:
+ break
+ else:
+ break
+ else:
+ break
+
+
+def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
+ unary_request = _unary_request(
+ rpc_event, state, method_handler.request_deserializer)
+ thread_pool.submit(
+ _unary_response_in_pool, rpc_event, state, method_handler.unary_unary,
+ unary_request, method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
+ unary_request = _unary_request(
+ rpc_event, state, method_handler.request_deserializer)
+ thread_pool.submit(
+ _stream_response_in_pool, rpc_event, state, method_handler.unary_stream,
+ unary_request, method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
+ request_iterator = _RequestIterator(
+ state, rpc_event.operation_call, method_handler.request_deserializer)
+ thread_pool.submit(
+ _unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
+ lambda: request_iterator, method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
+ request_iterator = _RequestIterator(
+ state, rpc_event.operation_call, method_handler.request_deserializer)
+ thread_pool.submit(
+ _stream_response_in_pool, rpc_event, state, method_handler.stream_stream,
+ lambda: request_iterator, method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _find_method_handler(rpc_event, generic_handlers):
+ for generic_handler in generic_handlers:
+ method_handler = generic_handler.service(
+ _HandlerCallDetails(
+ rpc_event.request_call_details.method, rpc_event.request_metadata))
+ if method_handler is not None:
+ return method_handler
+ else:
+ return None
+
+
+def _handle_unrecognized_method(rpc_event):
+ operations = (
+ cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
+ 'Method not found!', _EMPTY_FLAGS),
+ )
+ rpc_state = _RPCState()
+ rpc_event.operation_call.start_batch(
+ operations, lambda ignored_event: (rpc_state, (),))
+ return rpc_state
+
+
+def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
+ state = _RPCState()
+ with state.condition:
+ rpc_event.operation_call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
+ _receive_close_on_server(state))
+ state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
+ if method_handler.request_streaming:
+ if method_handler.response_streaming:
+ _handle_stream_stream(rpc_event, state, method_handler, thread_pool)
+ else:
+ _handle_stream_unary(rpc_event, state, method_handler, thread_pool)
+ else:
+ if method_handler.response_streaming:
+ _handle_unary_stream(rpc_event, state, method_handler, thread_pool)
+ else:
+ _handle_unary_unary(rpc_event, state, method_handler, thread_pool)
+ return state
+
+
+def _handle_call(rpc_event, generic_handlers, thread_pool):
+ if rpc_event.request_call_details.method is not None:
+ method_handler = _find_method_handler(rpc_event, generic_handlers)
+ if method_handler is None:
+ return _handle_unrecognized_method(rpc_event)
+ else:
+ return _handle_with_method_handler(rpc_event, method_handler, thread_pool)
+ else:
+ return None
+
+
+@enum.unique
+class _ServerStage(enum.Enum):
+ STOPPED = 'stopped'
+ STARTED = 'started'
+ GRACE = 'grace'
+
+
+class _ServerState(object):
+
+ def __init__(self, completion_queue, server, generic_handlers, thread_pool):
+ self.lock = threading.Lock()
+ self.completion_queue = completion_queue
+ self.server = server
+ self.generic_handlers = list(generic_handlers)
+ self.thread_pool = thread_pool
+ self.stage = _ServerStage.STOPPED
+ self.shutdown_events = None
+
+ # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
+ self.rpc_states = set()
+ self.due = set()
+
+
+def _add_generic_handlers(state, generic_handlers):
+ with state.lock:
+ state.generic_handlers.extend(generic_handlers)
+
+
+def _add_insecure_port(state, address):
+ with state.lock:
+ return state.server.add_http2_port(address)
+
+
+def _add_secure_port(state, address, server_credentials):
+ with state.lock:
+ return state.server.add_http2_port(address, server_credentials._credentials)
+
+
+def _request_call(state):
+ state.server.request_call(
+ state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG)
+ state.due.add(_REQUEST_CALL_TAG)
+
+
+# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
+def _stop_serving(state):
+ if not state.rpc_states and not state.due:
+ for shutdown_event in state.shutdown_events:
+ shutdown_event.set()
+ state.stage = _ServerStage.STOPPED
+ return True
+ else:
+ return False
+
+
+def _serve(state):
+ while True:
+ event = state.completion_queue.poll()
+ if event.tag is _SHUTDOWN_TAG:
+ with state.lock:
+ state.due.remove(_SHUTDOWN_TAG)
+ if _stop_serving(state):
+ return
+ elif event.tag is _REQUEST_CALL_TAG:
+ with state.lock:
+ state.due.remove(_REQUEST_CALL_TAG)
+ rpc_state = _handle_call(
+ event, state.generic_handlers, state.thread_pool)
+ if rpc_state is not None:
+ state.rpc_states.add(rpc_state)
+ if state.stage is _ServerStage.STARTED:
+ _request_call(state)
+ elif _stop_serving(state):
+ return
+ else:
+ rpc_state, callbacks = event.tag(event)
+ for callback in callbacks:
+ callable_util.call_logging_exceptions(
+ callback, 'Exception calling callback!')
+ if rpc_state is not None:
+ with state.lock:
+ state.rpc_states.remove(rpc_state)
+ if _stop_serving(state):
+ return
+
+
+def _stop(state, grace):
+ with state.lock:
+ if state.stage is _ServerStage.STOPPED:
+ shutdown_event = threading.Event()
+ shutdown_event.set()
+ return shutdown_event
+ else:
+ if state.stage is _ServerStage.STARTED:
+ state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
+ state.stage = _ServerStage.GRACE
+ state.shutdown_events = []
+ state.due.add(_SHUTDOWN_TAG)
+ shutdown_event = threading.Event()
+ state.shutdown_events.append(shutdown_event)
+ if grace is None:
+ state.server.cancel_all_calls()
+ # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
+ for rpc_state in state.rpc_states:
+ with rpc_state.condition:
+ rpc_state.client = _CANCELLED
+ rpc_state.condition.notify_all()
+ else:
+ def cancel_all_calls_after_grace():
+ shutdown_event.wait(timeout=grace)
+ with state.lock:
+ state.server.cancel_all_calls()
+ # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
+ for rpc_state in state.rpc_states:
+ with rpc_state.condition:
+ rpc_state.client = _CANCELLED
+ rpc_state.condition.notify_all()
+ thread = threading.Thread(target=cancel_all_calls_after_grace)
+ thread.start()
+ return shutdown_event
+ shutdown_event.wait()
+ return shutdown_event
+
+
+def _start(state):
+ with state.lock:
+ if state.stage is not _ServerStage.STOPPED:
+ raise ValueError('Cannot start already-started server!')
+ state.server.start()
+ state.stage = _ServerStage.STARTED
+ _request_call(state)
+ def cleanup_server(timeout):
+ if timeout is None:
+ _stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
+ else:
+ _stop(state, timeout).wait()
+
+ thread = _common.CleanupThread(
+ cleanup_server, target=_serve, args=(state,))
+ thread.start()
+
+
+class Server(grpc.Server):
+
+ def __init__(self, generic_handlers, thread_pool):
+ completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server()
+ server.register_completion_queue(completion_queue)
+ self._state = _ServerState(
+ completion_queue, server, generic_handlers, thread_pool)
+
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ _add_generic_handlers(self._state, generic_rpc_handlers)
+
+ def add_insecure_port(self, address):
+ return _add_insecure_port(self._state, address)
+
+ def add_secure_port(self, address, server_credentials):
+ return _add_secure_port(self._state, address, server_credentials)
+
+ def start(self):
+ _start(self._state)
+
+ def stop(self, grace):
+ return _stop(self._state, grace)
+
+ def __del__(self):
+ _stop(self._state, None)
diff --git a/src/python/grpcio/grpc/_utilities.py b/src/python/grpcio/grpc/_utilities.py
new file mode 100644
index 0000000000..4850967fbc
--- /dev/null
+++ b/src/python/grpcio/grpc/_utilities.py
@@ -0,0 +1,171 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Internal utilities for gRPC Python."""
+
+import collections
+import threading
+import time
+
+import six
+
+import grpc
+from grpc import _common
+from grpc.framework.foundation import callable_util
+
+_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
+ 'Exception calling connectivity future "done" callback!')
+
+
+class RpcMethodHandler(
+ collections.namedtuple(
+ '_RpcMethodHandler',
+ ('request_streaming', 'response_streaming', 'request_deserializer',
+ 'response_serializer', 'unary_unary', 'unary_stream', 'stream_unary',
+ 'stream_stream',)),
+ grpc.RpcMethodHandler):
+ pass
+
+
+class DictionaryGenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, service, method_handlers):
+ self._method_handlers = {
+ _common.fully_qualified_method(service, method): method_handler
+ for method, method_handler in six.iteritems(method_handlers)}
+
+ def service(self, handler_call_details):
+ return self._method_handlers.get(handler_call_details.method)
+
+
+class _ChannelReadyFuture(grpc.Future):
+
+ def __init__(self, channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout):
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise grpc.FutureCancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise grpc.FutureTimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity):
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is grpc.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancel(self):
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return False
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancelled(self):
+ with self._condition:
+ return self._cancelled
+
+ def running(self):
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self):
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def exception(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def traceback(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def add_done_callback(self, fn):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
+
+
+def channel_ready_future(channel):
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py
new file mode 100644
index 0000000000..024808c540
--- /dev/null
+++ b/src/python/grpcio/grpc/beta/_client_adaptations.py
@@ -0,0 +1,563 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
+
+import grpc
+from grpc import _common
+from grpc._cython import cygrpc
+from grpc.beta import interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.foundation import future
+from grpc.framework.interfaces.face import face
+
+_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
+ grpc.StatusCode.CANCELLED: (
+ face.Abortion.Kind.CANCELLED, face.CancellationError),
+ grpc.StatusCode.UNKNOWN: (
+ face.Abortion.Kind.REMOTE_FAILURE, face.RemoteError),
+ grpc.StatusCode.DEADLINE_EXCEEDED: (
+ face.Abortion.Kind.EXPIRED, face.ExpirationError),
+ grpc.StatusCode.UNIMPLEMENTED: (
+ face.Abortion.Kind.LOCAL_FAILURE, face.LocalError),
+}
+
+
+def _effective_metadata(metadata, metadata_transformer):
+ non_none_metadata = () if metadata is None else metadata
+ if metadata_transformer is None:
+ return non_none_metadata
+ else:
+ return metadata_transformer(non_none_metadata)
+
+
+def _credentials(grpc_call_options):
+ return None if grpc_call_options is None else grpc_call_options.credentials
+
+
+def _abortion(rpc_error_call):
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
+ return face.Abortion(
+ error_kind, rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code, rpc_error_code.details())
+
+
+def _abortion_error(rpc_error_call):
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ exception_class = face.AbortionError if pair is None else pair[1]
+ return exception_class(
+ rpc_error_call.initial_metadata(), rpc_error_call.trailing_metadata(),
+ code, rpc_error_call.details())
+
+
+class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
+
+ def disable_next_request_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+
+
+class _Rendezvous(future.Future, face.Call):
+
+ def __init__(self, response_future, response_iterator, call):
+ self._future = response_future
+ self._iterator = response_iterator
+ self._call = call
+
+ def cancel(self):
+ return self._call.cancel()
+
+ def cancelled(self):
+ return self._future.cancelled()
+
+ def running(self):
+ return self._future.running()
+
+ def done(self):
+ return self._future.done()
+
+ def result(self, timeout=None):
+ try:
+ return self._future.result(timeout=timeout)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def exception(self, timeout=None):
+ try:
+ rpc_error_call = self._future.exception(timeout=timeout)
+ return _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def traceback(self, timeout=None):
+ try:
+ return self._future.traceback(timeout=timeout)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def add_done_callback(self, fn):
+ self._future.add_done_callback(lambda ignored_callback: fn(self))
+
+ def __iter__(self):
+ return self
+
+ def _next(self):
+ try:
+ return next(self._iterator)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+ def is_active(self):
+ return self._call.is_active()
+
+ def time_remaining(self):
+ return self._call.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+ registered = self._call.add_callback(
+ lambda: abortion_callback(_abortion(self._call)))
+ return None if registered else _abortion(self._call)
+
+ def protocol_context(self):
+ return _InvocationProtocolContext()
+
+ def initial_metadata(self):
+ return self._call.initial_metadata()
+
+ def terminal_metadata(self):
+ return self._call.terminal_metadata()
+
+ def code(self):
+ return self._call.code()
+
+ def details(self):
+ return self._call.details()
+
+
+def _blocking_unary_unary(
+ channel, group, method, timeout, with_call, protocol_options, metadata,
+ metadata_transformer, request, request_serializer, response_deserializer):
+ try:
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable(
+ request, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options), with_call=True)
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(
+ request, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_unary_unary(
+ channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request, request_serializer, response_deserializer):
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_future = multi_callable.future(
+ request, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _unary_stream(
+ channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request, request_serializer, response_deserializer):
+ multi_callable = channel.unary_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+def _blocking_stream_unary(
+ channel, group, method, timeout, with_call, protocol_options, metadata,
+ metadata_transformer, request_iterator, request_serializer,
+ response_deserializer):
+ try:
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable(
+ request_iterator, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options), with_call=True)
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(
+ request_iterator, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_stream_unary(
+ channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request_iterator, request_serializer,
+ response_deserializer):
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_future = multi_callable.future(
+ request_iterator, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _stream_stream(
+ channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request_iterator, request_serializer,
+ response_deserializer):
+ multi_callable = channel.stream_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request_iterator, timeout=timeout, metadata=effective_metadata,
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
+
+ def __init__(
+ self, channel, group, method, metadata_transformer, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(
+ self, request, timeout, metadata=None, with_call=False,
+ protocol_options=None):
+ return _blocking_unary_unary(
+ self._channel, self._group, self._method, timeout, with_call,
+ protocol_options, metadata, self._metadata_transformer, request,
+ self._request_serializer, self._response_deserializer)
+
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ return _future_unary_unary(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request, self._request_serializer,
+ self._response_deserializer)
+
+ def event(
+ self, request, receiver, abortion_callback, timeout,
+ metadata=None, protocol_options=None):
+ raise NotImplementedError()
+
+
+class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
+
+ def __init__(
+ self, channel, group, method, metadata_transformer, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ return _unary_stream(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request, self._request_serializer,
+ self._response_deserializer)
+
+ def event(
+ self, request, receiver, abortion_callback, timeout,
+ metadata=None, protocol_options=None):
+ raise NotImplementedError()
+
+
+class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
+
+ def __init__(
+ self, channel, group, method, metadata_transformer, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(
+ self, request_iterator, timeout, metadata=None, with_call=False,
+ protocol_options=None):
+ return _blocking_stream_unary(
+ self._channel, self._group, self._method, timeout, with_call,
+ protocol_options, metadata, self._metadata_transformer,
+ request_iterator, self._request_serializer, self._response_deserializer)
+
+ def future(
+ self, request_iterator, timeout, metadata=None, protocol_options=None):
+ return _future_stream_unary(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request_iterator,
+ self._request_serializer, self._response_deserializer)
+
+ def event(
+ self, receiver, abortion_callback, timeout, metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
+
+ def __init__(
+ self, channel, group, method, metadata_transformer, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(
+ self, request_iterator, timeout, metadata=None, protocol_options=None):
+ return _stream_stream(
+ self._channel, self._group, self._method, timeout, protocol_options,
+ metadata, self._metadata_transformer, request_iterator,
+ self._request_serializer, self._response_deserializer)
+
+ def event(
+ self, receiver, abortion_callback, timeout, metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _GenericStub(face.GenericStub):
+
+ def __init__(
+ self, channel, metadata_transformer, request_serializers,
+ response_deserializers):
+ self._channel = channel
+ self._metadata_transformer = metadata_transformer
+ self._request_serializers = request_serializers or {}
+ self._response_deserializers = response_deserializers or {}
+
+ def blocking_unary_unary(
+ self, group, method, request, timeout, metadata=None,
+ with_call=None, protocol_options=None):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _blocking_unary_unary(
+ self._channel, group, method, timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer, request, request_serializer,
+ response_deserializer)
+
+ def future_unary_unary(
+ self, group, method, request, timeout, metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _future_unary_unary(
+ self._channel, group, method, timeout, protocol_options, metadata,
+ self._metadata_transformer, request, request_serializer,
+ response_deserializer)
+
+ def inline_unary_stream(
+ self, group, method, request, timeout, metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _unary_stream(
+ self._channel, group, method, timeout, protocol_options, metadata,
+ self._metadata_transformer, request, request_serializer,
+ response_deserializer)
+
+ def blocking_stream_unary(
+ self, group, method, request_iterator, timeout, metadata=None,
+ with_call=None, protocol_options=None):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _blocking_stream_unary(
+ self._channel, group, method, timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer, request_iterator,
+ request_serializer, response_deserializer)
+
+ def future_stream_unary(
+ self, group, method, request_iterator, timeout, metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _future_stream_unary(
+ self._channel, group, method, timeout, protocol_options, metadata,
+ self._metadata_transformer, request_iterator, request_serializer,
+ response_deserializer)
+
+ def inline_stream_stream(
+ self, group, method, request_iterator, timeout, metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _stream_stream(
+ self._channel, group, method, timeout, protocol_options, metadata,
+ self._metadata_transformer, request_iterator, request_serializer,
+ response_deserializer)
+
+ def event_unary_unary(
+ self, group, method, request, receiver, abortion_callback, timeout,
+ metadata=None, protocol_options=None):
+ raise NotImplementedError()
+
+ def event_unary_stream(
+ self, group, method, request, receiver, abortion_callback, timeout,
+ metadata=None, protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_unary(
+ self, group, method, receiver, abortion_callback, timeout,
+ metadata=None, protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_stream(
+ self, group, method, receiver, abortion_callback, timeout,
+ metadata=None, protocol_options=None):
+ raise NotImplementedError()
+
+ def unary_unary(self, group, method):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _UnaryUnaryMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def unary_stream(self, group, method):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _UnaryStreamMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def stream_unary(self, group, method):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _StreamUnaryMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def stream_stream(self, group, method):
+ request_serializer = self._request_serializers.get((group, method,))
+ response_deserializer = self._response_deserializers.get((group, method,))
+ return _StreamStreamMultiCallable(
+ self._channel, group, method, self._metadata_transformer,
+ request_serializer, response_deserializer)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+class _DynamicStub(face.DynamicStub):
+
+ def __init__(self, generic_stub, group, cardinalities):
+ self._generic_stub = generic_stub
+ self._group = group
+ self._cardinalities = cardinalities
+
+ def __getattr__(self, attr):
+ method_cardinality = self._cardinalities.get(attr)
+ if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return self._generic_stub.unary_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return self._generic_stub.unary_stream(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return self._generic_stub.stream_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return self._generic_stub.stream_stream(self._group, attr)
+ else:
+ raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+def generic_stub(
+ channel, host, metadata_transformer, request_serializers,
+ response_deserializers):
+ return _GenericStub(
+ channel, metadata_transformer, request_serializers,
+ response_deserializers)
+
+
+def dynamic_stub(
+ channel, service, cardinalities, host, metadata_transformer,
+ request_serializers, response_deserializers):
+ return _DynamicStub(
+ _GenericStub(
+ channel, metadata_transformer, request_serializers,
+ response_deserializers),
+ service, cardinalities)
diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py
new file mode 100644
index 0000000000..79e6ca87eb
--- /dev/null
+++ b/src/python/grpcio/grpc/beta/_server_adaptations.py
@@ -0,0 +1,367 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
+
+import collections
+import threading
+
+import grpc
+from grpc import _common
+from grpc.beta import interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import abandonment
+from grpc.framework.foundation import logging_pool
+from grpc.framework.foundation import stream
+from grpc.framework.interfaces.face import face
+
+_DEFAULT_POOL_SIZE = 8
+
+
+class _ServerProtocolContext(interfaces.GRPCServicerContext):
+
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
+
+ def peer(self):
+ return self._servicer_context.peer()
+
+ def disable_next_response_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+
+
+class _FaceServicerContext(face.ServicerContext):
+
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
+
+ def is_active(self):
+ return self._servicer_context.is_active()
+
+ def time_remaining(self):
+ return self._servicer_context.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+ raise NotImplementedError(
+ 'add_abortion_callback no longer supported server-side!')
+
+ def cancel(self):
+ self._servicer_context.cancel()
+
+ def protocol_context(self):
+ return _ServerProtocolContext(self._servicer_context)
+
+ def invocation_metadata(self):
+ return self._servicer_context.invocation_metadata()
+
+ def initial_metadata(self, initial_metadata):
+ self._servicer_context.send_initial_metadata(initial_metadata)
+
+ def terminal_metadata(self, terminal_metadata):
+ self._servicer_context.set_terminal_metadata(terminal_metadata)
+
+ def code(self, code):
+ self._servicer_context.set_code(code)
+
+ def details(self, details):
+ self._servicer_context.set_details(details)
+
+
+def _adapt_unary_request_inline(unary_request_inline):
+ def adaptation(request, servicer_context):
+ return unary_request_inline(request, _FaceServicerContext(servicer_context))
+ return adaptation
+
+
+def _adapt_stream_request_inline(stream_request_inline):
+ def adaptation(request_iterator, servicer_context):
+ return stream_request_inline(
+ request_iterator, _FaceServicerContext(servicer_context))
+ return adaptation
+
+
+class _Callback(stream.Consumer):
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._terminated = False
+ self._cancelled = False
+
+ def consume(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._condition.notify_all()
+
+ def terminate(self):
+ with self._condition:
+ self._terminated = True
+ self._condition.notify_all()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._terminated = True
+ self._condition.notify_all()
+
+ def cancel(self):
+ with self._condition:
+ self._cancelled = True
+ self._condition.notify_all()
+
+ def draw_one_value(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._values:
+ return self._values.pop(0)
+ elif self._terminated:
+ return None
+ else:
+ self._condition.wait()
+
+ def draw_all_values(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._terminated:
+ all_values = tuple(self._values)
+ self._values = None
+ return all_values
+ else:
+ self._condition.wait()
+
+
+def _pipe_requests(request_iterator, request_consumer, servicer_context):
+ for request in request_iterator:
+ if not servicer_context.is_active():
+ return
+ request_consumer.consume(request)
+ if not servicer_context.is_active():
+ return
+ request_consumer.terminate()
+
+
+def _adapt_unary_unary_event(unary_unary_event):
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_unary_event(
+ request, callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ return callback.draw_all_values()[0]
+ return adaptation
+
+
+def _adapt_unary_stream_event(unary_stream_event):
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_stream_event(
+ request, callback, _FaceServicerContext(servicer_context))
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+ return adaptation
+
+
+def _adapt_stream_unary_event(stream_unary_event):
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_unary_event(
+ callback.consume_and_terminate, _FaceServicerContext(servicer_context))
+ request_pipe_thread = threading.Thread(
+ target=_pipe_requests,
+ args=(request_iterator, request_consumer, servicer_context,))
+ request_pipe_thread.start()
+ return callback.draw_all_values()[0]
+ return adaptation
+
+
+def _adapt_stream_stream_event(stream_stream_event):
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_stream_event(
+ callback, _FaceServicerContext(servicer_context))
+ request_pipe_thread = threading.Thread(
+ target=_pipe_requests,
+ args=(request_iterator, request_consumer, servicer_context,))
+ request_pipe_thread.start()
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+ return adaptation
+
+
+class _SimpleMethodHandler(
+ collections.namedtuple(
+ '_MethodHandler',
+ ('request_streaming', 'response_streaming', 'request_deserializer',
+ 'response_serializer', 'unary_unary', 'unary_stream', 'stream_unary',
+ 'stream_stream',)),
+ grpc.RpcMethodHandler):
+ pass
+
+
+def _simple_method_handler(
+ implementation, request_deserializer, response_serializer):
+ if implementation.style is style.Service.INLINE:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_request_inline(implementation.unary_unary_inline), None,
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_request_inline(implementation.unary_stream_inline), None,
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None, None,
+ _adapt_stream_request_inline(implementation.stream_unary_inline),
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None, None,
+ None,
+ _adapt_stream_request_inline(implementation.stream_stream_inline))
+ elif implementation.style is style.Service.EVENT:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_unary_event(implementation.unary_unary_event), None,
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_stream_event(implementation.unary_stream_event), None,
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None, None,
+ _adapt_stream_unary_event(implementation.stream_unary_event), None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None, None,
+ None, _adapt_stream_stream_event(implementation.stream_stream_event))
+
+
+def _flatten_method_pair_map(method_pair_map):
+ method_pair_map = method_pair_map or {}
+ flat_map = {}
+ for method_pair in method_pair_map:
+ method = _common.fully_qualified_method(method_pair[0], method_pair[1])
+ flat_map[method] = method_pair_map[method_pair]
+ return flat_map
+
+
+class _GenericRpcHandler(grpc.GenericRpcHandler):
+
+ def __init__(
+ self, method_implementations, multi_method_implementation,
+ request_deserializers, response_serializers):
+ self._method_implementations = _flatten_method_pair_map(
+ method_implementations)
+ self._request_deserializers = _flatten_method_pair_map(
+ request_deserializers)
+ self._response_serializers = _flatten_method_pair_map(
+ response_serializers)
+ self._multi_method_implementation = multi_method_implementation
+
+ def service(self, handler_call_details):
+ method_implementation = self._method_implementations.get(
+ handler_call_details.method)
+ if method_implementation is not None:
+ return _simple_method_handler(
+ method_implementation,
+ self._request_deserializers.get(handler_call_details.method),
+ self._response_serializers.get(handler_call_details.method))
+ elif self._multi_method_implementation is None:
+ return None
+ else:
+ try:
+ return None #TODO(nathaniel): call the multimethod.
+ except face.NoSuchMethodError:
+ return None
+
+
+class _Server(interfaces.Server):
+
+ def __init__(self, server):
+ self._server = server
+
+ def add_insecure_port(self, address):
+ return self._server.add_insecure_port(address)
+
+ def add_secure_port(self, address, server_credentials):
+ return self._server.add_secure_port(address, server_credentials)
+
+ def start(self):
+ self._server.start()
+
+ def stop(self, grace):
+ return self._server.stop(grace)
+
+ def __enter__(self):
+ self._server.start()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._server.stop(None)
+ return False
+
+
+def server(
+ service_implementations, multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size):
+ generic_rpc_handler = _GenericRpcHandler(
+ service_implementations, multi_method_implementation,
+ request_deserializers, response_serializers)
+ if thread_pool is None:
+ effective_thread_pool = logging_pool.pool(
+ _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size)
+ else:
+ effective_thread_pool = thread_pool
+ return _Server(grpc.server((generic_rpc_handler,), effective_thread_pool))
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index 822f593323..4ae6e7d675 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -35,112 +35,36 @@ import enum
import threading # pylint: disable=unused-import
# cardinality and face are referenced from specification in this module.
-from grpc._adapter import _intermediary_low
-from grpc._adapter import _low
+import grpc
+from grpc import _auth
from grpc._adapter import _types
-from grpc.beta import _connectivity_channel
-from grpc.beta import _server
-from grpc.beta import _stub
+from grpc.beta import _client_adaptations
+from grpc.beta import _server_adaptations
from grpc.beta import interfaces
from grpc.framework.common import cardinality # pylint: disable=unused-import
from grpc.framework.interfaces.face import face # pylint: disable=unused-import
-_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
- 'Exception calling channel subscription callback!')
+ChannelCredentials = grpc.ChannelCredentials
+ssl_channel_credentials = grpc.ssl_channel_credentials
+CallCredentials = grpc.CallCredentials
+metadata_call_credentials = grpc.metadata_call_credentials
-class ChannelCredentials(object):
- """A value encapsulating the data required to create a secure Channel.
- This class and its instances have no supported interface - it exists to define
- the type of its instances and its instances exist to be passed to other
- functions.
- """
-
- def __init__(self, low_credentials):
- self._low_credentials = low_credentials
-
-
-def ssl_channel_credentials(root_certificates=None, private_key=None,
- certificate_chain=None):
- """Creates a ChannelCredentials for use with an SSL-enabled Channel.
-
- Args:
- root_certificates: The PEM-encoded root certificates or unset to ask for
- them to be retrieved from a default location.
- private_key: The PEM-encoded private key to use or unset if no private key
- should be used.
- certificate_chain: The PEM-encoded certificate chain to use or unset if no
- certificate chain should be used.
-
- Returns:
- A ChannelCredentials for use with an SSL-enabled Channel.
- """
- return ChannelCredentials(_low.channel_credentials_ssl(
- root_certificates, private_key, certificate_chain))
-
-
-class CallCredentials(object):
- """A value encapsulating data asserting an identity over an *established*
- channel. May be composed with ChannelCredentials to always assert identity for
- every call over that channel.
-
- This class and its instances have no supported interface - it exists to define
- the type of its instances and its instances exist to be passed to other
- functions.
- """
-
- def __init__(self, low_credentials):
- self._low_credentials = low_credentials
-
-
-def metadata_call_credentials(metadata_plugin, name=None):
- """Construct CallCredentials from an interfaces.GRPCAuthMetadataPlugin.
-
- Args:
- metadata_plugin: An interfaces.GRPCAuthMetadataPlugin to use in constructing
- the CallCredentials object.
-
- Returns:
- A CallCredentials object for use in a GRPCCallOptions object.
- """
- if name is None:
- name = metadata_plugin.__name__
- return CallCredentials(
- _low.call_credentials_metadata_plugin(metadata_plugin, name))
-
-def composite_call_credentials(call_credentials, additional_call_credentials):
- """Compose two CallCredentials to make a new one.
+def google_call_credentials(credentials):
+ """Construct CallCredentials from GoogleCredentials.
Args:
- call_credentials: A CallCredentials object.
- additional_call_credentials: Another CallCredentials object to compose on
- top of call_credentials.
+ credentials: A GoogleCredentials object from the oauth2client library.
Returns:
A CallCredentials object for use in a GRPCCallOptions object.
"""
- return CallCredentials(
- _low.call_credentials_composite(
- call_credentials._low_credentials,
- additional_call_credentials._low_credentials))
+ return metadata_call_credentials(_auth.GoogleCallCredentials(credentials))
-def composite_channel_credentials(channel_credentials,
- additional_call_credentials):
- """Compose ChannelCredentials on top of client credentials to make a new one.
-
- Args:
- channel_credentials: A ChannelCredentials object.
- additional_call_credentials: A CallCredentials object to compose on
- top of channel_credentials.
-
- Returns:
- A ChannelCredentials object for use in a GRPCCallOptions object.
- """
- return ChannelCredentials(
- _low.channel_credentials_composite(
- channel_credentials._low_credentials,
- additional_call_credentials._low_credentials))
+access_token_call_credentials = grpc.access_token_call_credentials
+composite_call_credentials = grpc.composite_call_credentials
+composite_channel_credentials = grpc.composite_channel_credentials
class Channel(object):
@@ -151,11 +75,8 @@ class Channel(object):
unsupported.
"""
- def __init__(self, low_channel, intermediary_low_channel):
- self._low_channel = low_channel
- self._intermediary_low_channel = intermediary_low_channel
- self._connectivity_channel = _connectivity_channel.ConnectivityChannel(
- low_channel)
+ def __init__(self, channel):
+ self._channel = channel
def subscribe(self, callback, try_to_connect=None):
"""Subscribes to this Channel's connectivity.
@@ -170,7 +91,7 @@ class Channel(object):
attempt to connect if it is not already connected and ready to conduct
RPCs.
"""
- self._connectivity_channel.subscribe(callback, try_to_connect)
+ self._channel.subscribe(callback, try_to_connect=try_to_connect)
def unsubscribe(self, callback):
"""Unsubscribes a callback from this Channel's connectivity.
@@ -179,7 +100,7 @@ class Channel(object):
callback: A callable previously registered with this Channel from having
been passed to its "subscribe" method.
"""
- self._connectivity_channel.unsubscribe(callback)
+ self._channel.unsubscribe(callback)
def insecure_channel(host, port):
@@ -193,9 +114,9 @@ def insecure_channel(host, port):
Returns:
A Channel to the remote host through which RPCs may be conducted.
"""
- intermediary_low_channel = _intermediary_low.Channel(
- '%s:%d' % (host, port) if port else host, None)
- return Channel(intermediary_low_channel._internal, intermediary_low_channel) # pylint: disable=protected-access
+ channel = grpc.insecure_channel(
+ host if port is None else '%s:%d' % (host, port))
+ return Channel(channel)
def secure_channel(host, port, channel_credentials):
@@ -210,10 +131,9 @@ def secure_channel(host, port, channel_credentials):
Returns:
A secure Channel to the remote host through which RPCs may be conducted.
"""
- intermediary_low_channel = _intermediary_low.Channel(
- '%s:%d' % (host, port) if port else host,
- channel_credentials._low_credentials)
- return Channel(intermediary_low_channel._internal, intermediary_low_channel) # pylint: disable=protected-access
+ channel = grpc.secure_channel(
+ host if port is None else '%s:%d' % (host, port), channel_credentials)
+ return Channel(channel)
class StubOptions(object):
@@ -277,12 +197,11 @@ def generic_stub(channel, options=None):
A face.GenericStub on which RPCs can be made.
"""
effective_options = _EMPTY_STUB_OPTIONS if options is None else options
- return _stub.generic_stub(
- channel._intermediary_low_channel, effective_options.host, # pylint: disable=protected-access
- effective_options.metadata_transformer,
+ return _client_adaptations.generic_stub(
+ channel._channel, # pylint: disable=protected-access
+ effective_options.host, effective_options.metadata_transformer,
effective_options.request_serializers,
- effective_options.response_deserializers, effective_options.thread_pool,
- effective_options.thread_pool_size)
+ effective_options.response_deserializers)
def dynamic_stub(channel, service, cardinalities, options=None):
@@ -300,55 +219,16 @@ def dynamic_stub(channel, service, cardinalities, options=None):
A face.DynamicStub with which RPCs can be invoked.
"""
effective_options = StubOptions() if options is None else options
- return _stub.dynamic_stub(
- channel._intermediary_low_channel, effective_options.host, service, # pylint: disable=protected-access
- cardinalities, effective_options.metadata_transformer,
+ return _client_adaptations.dynamic_stub(
+ channel._channel, # pylint: disable=protected-access
+ service, cardinalities, effective_options.host,
+ effective_options.metadata_transformer,
effective_options.request_serializers,
- effective_options.response_deserializers, effective_options.thread_pool,
- effective_options.thread_pool_size)
-
+ effective_options.response_deserializers)
-class ServerCredentials(object):
- """A value encapsulating the data required to open a secure port on a Server.
- This class and its instances have no supported interface - it exists to define
- the type of its instances and its instances exist to be passed to other
- functions.
- """
-
- def __init__(self, low_credentials):
- self._low_credentials = low_credentials
-
-
-def ssl_server_credentials(
- private_key_certificate_chain_pairs, root_certificates=None,
- require_client_auth=False):
- """Creates a ServerCredentials for use with an SSL-enabled Server.
-
- Args:
- private_key_certificate_chain_pairs: A nonempty sequence each element of
- which is a pair the first element of which is a PEM-encoded private key
- and the second element of which is the corresponding PEM-encoded
- certificate chain.
- root_certificates: PEM-encoded client root certificates to be used for
- verifying authenticated clients. If omitted, require_client_auth must also
- be omitted or be False.
- require_client_auth: A boolean indicating whether or not to require clients
- to be authenticated. May only be True if root_certificates is not None.
-
- Returns:
- A ServerCredentials for use with an SSL-enabled Server.
- """
- if len(private_key_certificate_chain_pairs) == 0:
- raise ValueError(
- 'At least one private key-certificate chain pairis required!')
- elif require_client_auth and root_certificates is None:
- raise ValueError(
- 'Illegal to require client auth without providing root certificates!')
- else:
- return ServerCredentials(_low.server_credentials_ssl(
- root_certificates, private_key_certificate_chain_pairs,
- require_client_auth))
+ServerCredentials = grpc.ServerCredentials
+ssl_server_credentials = grpc.ssl_server_credentials
class ServerOptions(object):
@@ -421,9 +301,8 @@ def server(service_implementations, options=None):
An interfaces.Server with which RPCs can be serviced.
"""
effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
- return _server.server(
+ return _server_adaptations.server(
service_implementations, effective_options.multi_method_implementation,
effective_options.request_deserializers,
effective_options.response_serializers, effective_options.thread_pool,
- effective_options.thread_pool_size, effective_options.default_timeout,
- effective_options.maximum_timeout)
+ effective_options.thread_pool_size)
diff --git a/src/python/grpcio/grpc/beta/interfaces.py b/src/python/grpcio/grpc/beta/interfaces.py
index 24de9ad1a8..90f6bbbfcc 100644
--- a/src/python/grpcio/grpc/beta/interfaces.py
+++ b/src/python/grpcio/grpc/beta/interfaces.py
@@ -30,53 +30,16 @@
"""Constants and interfaces of the Beta API of gRPC Python."""
import abc
-import enum
import six
-from grpc._adapter import _types
+import grpc
+ChannelConnectivity = grpc.ChannelConnectivity
+# FATAL_FAILURE was a Beta-API name for SHUTDOWN
+ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN
-@enum.unique
-class ChannelConnectivity(enum.Enum):
- """Mirrors grpc_connectivity_state in the gRPC Core.
-
- Attributes:
- IDLE: The channel is idle.
- CONNECTING: The channel is connecting.
- READY: The channel is ready to conduct RPCs.
- TRANSIENT_FAILURE: The channel has seen a failure from which it expects to
- recover.
- FATAL_FAILURE: The channel has seen a failure from which it cannot recover.
- """
- IDLE = (_types.ConnectivityState.IDLE, 'idle',)
- CONNECTING = (_types.ConnectivityState.CONNECTING, 'connecting',)
- READY = (_types.ConnectivityState.READY, 'ready',)
- TRANSIENT_FAILURE = (
- _types.ConnectivityState.TRANSIENT_FAILURE, 'transient failure',)
- FATAL_FAILURE = (_types.ConnectivityState.FATAL_FAILURE, 'fatal failure',)
-
-
-@enum.unique
-class StatusCode(enum.Enum):
- """Mirrors grpc_status_code in the C core."""
- OK = 0
- CANCELLED = 1
- UNKNOWN = 2
- INVALID_ARGUMENT = 3
- DEADLINE_EXCEEDED = 4
- NOT_FOUND = 5
- ALREADY_EXISTS = 6
- PERMISSION_DENIED = 7
- RESOURCE_EXHAUSTED = 8
- FAILED_PRECONDITION = 9
- ABORTED = 10
- OUT_OF_RANGE = 11
- UNIMPLEMENTED = 12
- INTERNAL = 13
- UNAVAILABLE = 14
- DATA_LOSS = 15
- UNAUTHENTICATED = 16
+StatusCode = grpc.StatusCode
class GRPCCallOptions(object):
@@ -106,46 +69,9 @@ def grpc_call_options(disable_compression=False, credentials=None):
"""
return GRPCCallOptions(disable_compression, None, credentials)
-
-class GRPCAuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
- """Provides information to call credentials metadata plugins.
-
- Attributes:
- service_url: A string URL of the service being called into.
- method_name: A string of the fully qualified method name being called.
- """
-
-
-class GRPCAuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
- """Callback object received by a metadata plugin."""
-
- def __call__(self, metadata, error):
- """Inform the gRPC runtime of the metadata to construct a CallCredentials.
-
- Args:
- metadata: An iterable of 2-sequences (e.g. tuples) of metadata key/value
- pairs.
- error: An Exception to indicate error or None to indicate success.
- """
- raise NotImplementedError()
-
-
-class GRPCAuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
- """
- """
-
- def __call__(self, context, callback):
- """Invoke the plugin.
-
- Must not block. Need only be called by the gRPC runtime.
-
- Args:
- context: A GRPCAuthMetadataContext providing information on what the
- plugin is being used for.
- callback: A GRPCAuthMetadataPluginCallback to be invoked either
- synchronously or asynchronously.
- """
- raise NotImplementedError()
+GRPCAuthMetadataContext = grpc.AuthMetadataContext
+GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback
+GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin
class GRPCServicerContext(six.with_metaclass(abc.ABCMeta)):
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 33636db29e..839c555f05 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -42,38 +42,38 @@ CORE_SOURCE_FILES = [
'src/core/lib/support/cpu_windows.c',
'src/core/lib/support/env_linux.c',
'src/core/lib/support/env_posix.c',
- 'src/core/lib/support/env_win32.c',
+ 'src/core/lib/support/env_windows.c',
'src/core/lib/support/histogram.c',
'src/core/lib/support/host_port.c',
'src/core/lib/support/log.c',
'src/core/lib/support/log_android.c',
'src/core/lib/support/log_linux.c',
'src/core/lib/support/log_posix.c',
- 'src/core/lib/support/log_win32.c',
+ 'src/core/lib/support/log_windows.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/slice.c',
'src/core/lib/support/slice_buffer.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
- 'src/core/lib/support/string_util_win32.c',
- 'src/core/lib/support/string_win32.c',
+ 'src/core/lib/support/string_util_windows.c',
+ 'src/core/lib/support/string_windows.c',
'src/core/lib/support/subprocess_posix.c',
'src/core/lib/support/subprocess_windows.c',
'src/core/lib/support/sync.c',
'src/core/lib/support/sync_posix.c',
- 'src/core/lib/support/sync_win32.c',
+ 'src/core/lib/support/sync_windows.c',
'src/core/lib/support/thd.c',
'src/core/lib/support/thd_posix.c',
- 'src/core/lib/support/thd_win32.c',
+ 'src/core/lib/support/thd_windows.c',
'src/core/lib/support/time.c',
'src/core/lib/support/time_posix.c',
'src/core/lib/support/time_precise.c',
- 'src/core/lib/support/time_win32.c',
+ 'src/core/lib/support/time_windows.c',
'src/core/lib/support/tls_pthread.c',
'src/core/lib/support/tmpfile_msys.c',
'src/core/lib/support/tmpfile_posix.c',
- 'src/core/lib/support/tmpfile_win32.c',
+ 'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
'src/core/lib/surface/init.c',
'src/core/lib/channel/channel_args.c',
@@ -83,7 +83,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/channel/connected_channel.c',
'src/core/lib/channel/http_client_filter.c',
'src/core/lib/channel/http_server_filter.c',
- 'src/core/lib/compression/compression_algorithm.c',
+ 'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/debug/trace.c',
'src/core/lib/http/format_request.c',
@@ -94,6 +94,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/endpoint_pair_posix.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
+ 'src/core/lib/iomgr/ev_poll_and_epoll_posix.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
@@ -103,6 +104,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/load_file.c',
+ 'src/core/lib/iomgr/polling_entity.c',
'src/core/lib/iomgr/pollset_set_windows.c',
'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
@@ -160,6 +162,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
+ 'src/core/ext/transport/chttp2/transport/bin_decoder.c',
'src/core/ext/transport/chttp2/transport/bin_encoder.c',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
@@ -189,7 +192,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/security/credentials/credentials_metadata.c',
'src/core/lib/security/credentials/fake/fake_credentials.c',
'src/core/lib/security/credentials/google_default/credentials_posix.c',
- 'src/core/lib/security/credentials/google_default/credentials_win32.c',
+ 'src/core/lib/security/credentials/google_default/credentials_windows.c',
'src/core/lib/security/credentials/google_default/google_default_credentials.c',
'src/core/lib/security/credentials/iam/iam_credentials.c',
'src/core/lib/security/credentials/jwt/json_token.c',
@@ -231,10 +234,9 @@ CORE_SOURCE_FILES = [
'src/core/ext/client_config/subchannel_index.c',
'src/core/ext/client_config/uri_parser.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
+ 'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
- 'src/core/ext/transport/cronet/client/secure/cronet_channel_create.c',
- 'src/core/ext/transport/cronet/transport/cronet_api_dummy.c',
- 'src/core/ext/transport/cronet/transport/cronet_transport.c',
+ 'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
'src/core/ext/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
@@ -244,7 +246,10 @@ CORE_SOURCE_FILES = [
'src/core/ext/lb_policy/round_robin/round_robin.c',
'src/core/ext/resolver/dns/native/dns_resolver.c',
'src/core/ext/resolver/sockaddr/sockaddr_resolver.c',
+ 'src/core/ext/load_reporting/load_reporting.c',
+ 'src/core/ext/load_reporting/load_reporting_filter.c',
'src/core/ext/census/context.c',
+ 'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',
diff --git a/src/python/grpcio/tests/interop/client.py b/src/python/grpcio/tests/interop/client.py
index db29eb4aa7..8aa1ce30c1 100644
--- a/src/python/grpcio/tests/interop/client.py
+++ b/src/python/grpcio/tests/interop/client.py
@@ -65,39 +65,37 @@ def _args():
help='email address of the default service account', type=str)
return parser.parse_args()
-def _oauth_access_token(args):
- credentials = oauth2client_client.GoogleCredentials.get_application_default()
- scoped_credentials = credentials.create_scoped([args.oauth_scope])
- return scoped_credentials.get_access_token().access_token
def _stub(args):
- if args.oauth_scope:
- if args.test_case == 'oauth2_auth_token':
- # TODO(jtattermusch): This testcase sets the auth metadata key-value
- # manually, which also means that the user would need to do the same
- # thing every time he/she would like to use and out of band oauth token.
- # The transformer function that produces the metadata key-value from
- # the access token should be provided by gRPC auth library.
- access_token = _oauth_access_token(args)
- metadata_transformer = lambda x: [
- ('authorization', 'Bearer %s' % access_token)]
- else:
- metadata_transformer = lambda x: [
- ('authorization', 'Bearer %s' % _oauth_access_token(args))]
+ if args.test_case == 'oauth2_auth_token':
+ creds = oauth2client_client.GoogleCredentials.get_application_default()
+ scoped_creds = creds.create_scoped([args.oauth_scope])
+ access_token = scoped_creds.get_access_token().access_token
+ call_creds = implementations.access_token_call_credentials(access_token)
+ elif args.test_case == 'compute_engine_creds':
+ creds = oauth2client_client.GoogleCredentials.get_application_default()
+ scoped_creds = creds.create_scoped([args.oauth_scope])
+ call_creds = implementations.google_call_credentials(scoped_creds)
+ elif args.test_case == 'jwt_token_creds':
+ creds = oauth2client_client.GoogleCredentials.get_application_default()
+ call_creds = implementations.google_call_credentials(creds)
else:
- metadata_transformer = lambda x: []
+ call_creds = None
if args.use_tls:
if args.use_test_ca:
root_certificates = resources.test_root_certificates()
else:
root_certificates = None # will load default roots.
+ channel_creds = implementations.ssl_channel_credentials(root_certificates)
+ if call_creds is not None:
+ channel_creds = implementations.composite_channel_credentials(
+ channel_creds, call_creds)
+
channel = test_utilities.not_really_secure_channel(
- args.server_host, args.server_port,
- implementations.ssl_channel_credentials(root_certificates),
+ args.server_host, args.server_port, channel_creds,
args.server_host_override)
- stub = test_pb2.beta_create_TestService_stub(
- channel, metadata_transformer=metadata_transformer)
+ stub = test_pb2.beta_create_TestService_stub(channel)
else:
channel = implementations.insecure_channel(
args.server_host, args.server_port)
diff --git a/src/python/grpcio/tests/interop/methods.py b/src/python/grpcio/tests/interop/methods.py
index 67862ed7d3..7eac511525 100644
--- a/src/python/grpcio/tests/interop/methods.py
+++ b/src/python/grpcio/tests/interop/methods.py
@@ -39,6 +39,8 @@ import time
from oauth2client import client as oauth2client_client
+from grpc.beta import implementations
+from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import face
@@ -88,13 +90,15 @@ class TestService(test_pb2.BetaTestServiceServicer):
return self.FullDuplexCall(request_iterator, context)
-def _large_unary_common_behavior(stub, fill_username, fill_oauth_scope):
+def _large_unary_common_behavior(stub, fill_username, fill_oauth_scope,
+ protocol_options=None):
with stub:
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE, response_size=314159,
payload=messages_pb2.Payload(body=b'\x00' * 271828),
fill_username=fill_username, fill_oauth_scope=fill_oauth_scope)
- response_future = stub.UnaryCall.future(request, _TIMEOUT)
+ response_future = stub.UnaryCall.future(request, _TIMEOUT,
+ protocol_options=protocol_options)
response = response_future.result()
if response.payload.type is not messages_pb2.COMPRESSABLE:
raise ValueError(
@@ -303,7 +307,34 @@ def _oauth2_auth_token(stub, args):
if args.oauth_scope.find(response.oauth_scope) == -1:
raise ValueError(
'expected to find oauth scope "%s" in received "%s"' %
- (response.oauth_scope, args.oauth_scope))
+ (response.oauth_scope, args.oauth_scope))
+
+
+def _jwt_token_creds(stub, args):
+ json_key_filename = os.environ[
+ oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
+ wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ response = _large_unary_common_behavior(stub, True, False)
+ if wanted_email != response.username:
+ raise ValueError(
+ 'expected username %s, got %s' % (wanted_email, response.username))
+
+
+def _per_rpc_creds(stub, args):
+ json_key_filename = os.environ[
+ oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
+ wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
+ credentials = oauth2client_client.GoogleCredentials.get_application_default()
+ scoped_credentials = credentials.create_scoped([args.oauth_scope])
+ call_creds = implementations.google_call_credentials(scoped_credentials)
+ options = interfaces.grpc_call_options(disable_compression=False,
+ credentials=call_creds)
+ response = _large_unary_common_behavior(stub, True, False,
+ protocol_options=options)
+ if wanted_email != response.username:
+ raise ValueError(
+ 'expected username %s, got %s' % (wanted_email, response.username))
+
@enum.unique
class TestCase(enum.Enum):
@@ -317,6 +348,8 @@ class TestCase(enum.Enum):
EMPTY_STREAM = 'empty_stream'
COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
+ JWT_TOKEN_CREDS = 'jwt_token_creds'
+ PER_RPC_CREDS = 'per_rpc_creds'
TIMEOUT_ON_SLEEPING_SERVER = 'timeout_on_sleeping_server'
def test_interoperability(self, stub, args):
@@ -342,5 +375,9 @@ class TestCase(enum.Enum):
_compute_engine_creds(stub, args)
elif self is TestCase.OAUTH2_AUTH_TOKEN:
_oauth2_auth_token(stub, args)
+ elif self is TestCase.JWT_TOKEN_CREDS:
+ _jwt_token_creds(stub, args)
+ elif self is TestCase.PER_RPC_CREDS:
+ _per_rpc_creds(stub, args)
else:
raise NotImplementedError('Test case "%s" not implemented!' % self.name)
diff --git a/src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py
new file mode 100644
index 0000000000..1c9cbb0d0c
--- /dev/null
+++ b/src/python/grpcio/tests/protoc_plugin/_python_plugin_test.py
@@ -0,0 +1,583 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import collections
+from concurrent import futures
+import contextlib
+import distutils.spawn
+import errno
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import threading
+import unittest
+
+from six import moves
+
+import grpc
+from tests.unit.framework.common import test_constants
+
+# Identifiers of entities we expect to find in the generated module.
+STUB_IDENTIFIER = 'TestServiceStub'
+SERVICER_IDENTIFIER = 'TestServiceServicer'
+ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
+
+
+class _ServicerMethods(object):
+
+ def __init__(self, response_pb2, payload_pb2):
+ self._condition = threading.Condition()
+ self._paused = False
+ self._fail = False
+ self._response_pb2 = response_pb2
+ self._payload_pb2 = payload_pb2
+
+ @contextlib.contextmanager
+ def pause(self): # pylint: disable=invalid-name
+ with self._condition:
+ self._paused = True
+ yield
+ with self._condition:
+ self._paused = False
+ self._condition.notify_all()
+
+ @contextlib.contextmanager
+ def fail(self): # pylint: disable=invalid-name
+ with self._condition:
+ self._fail = True
+ yield
+ with self._condition:
+ self._fail = False
+
+ def _control(self): # pylint: disable=invalid-name
+ with self._condition:
+ if self._fail:
+ raise ValueError()
+ while self._paused:
+ self._condition.wait()
+
+ def UnaryCall(self, request, unused_rpc_context):
+ response = self._response_pb2.SimpleResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * request.response_size
+ self._control()
+ return response
+
+ def StreamingOutputCall(self, request, unused_rpc_context):
+ for parameter in request.response_parameters:
+ response = self._response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ yield response
+
+ def StreamingInputCall(self, request_iter, unused_rpc_context):
+ response = self._response_pb2.StreamingInputCallResponse()
+ aggregated_payload_size = 0
+ for request in request_iter:
+ aggregated_payload_size += len(request.payload.payload_compressable)
+ response.aggregated_payload_size = aggregated_payload_size
+ self._control()
+ return response
+
+ def FullDuplexCall(self, request_iter, unused_rpc_context):
+ for request in request_iter:
+ for parameter in request.response_parameters:
+ response = self._response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ yield response
+
+ def HalfDuplexCall(self, request_iter, unused_rpc_context):
+ responses = []
+ for request in request_iter:
+ for parameter in request.response_parameters:
+ response = self._response_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
+ response.payload.payload_compressable = 'a' * parameter.size
+ self._control()
+ responses.append(response)
+ for response in responses:
+ yield response
+
+
+class _Service(
+ collections.namedtuple(
+ '_Service', ('servicer_methods', 'server', 'stub',))):
+ """A live and running service.
+
+ Attributes:
+ servicer_methods: The _ServicerMethods servicing RPCs.
+ server: The grpc.Server servicing RPCs.
+ stub: A stub on which to invoke RPCs.
+ """
+
+
+def _CreateService(service_pb2, response_pb2, payload_pb2):
+ """Provides a servicer backend and a stub.
+
+ Args:
+ service_pb2: The service_pb2 module generated by this test.
+ response_pb2: The response_pb2 module generated by this test.
+ payload_pb2: The payload_pb2 module generated by this test.
+
+ Returns:
+ A _Service with which to test RPCs.
+ """
+ servicer_methods = _ServicerMethods(response_pb2, payload_pb2)
+
+ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+
+ def UnaryCall(self, request, context):
+ return servicer_methods.UnaryCall(request, context)
+
+ def StreamingOutputCall(self, request, context):
+ return servicer_methods.StreamingOutputCall(request, context)
+
+ def StreamingInputCall(self, request_iter, context):
+ return servicer_methods.StreamingInputCall(request_iter, context)
+
+ def FullDuplexCall(self, request_iter, context):
+ return servicer_methods.FullDuplexCall(request_iter, context)
+
+ def HalfDuplexCall(self, request_iter, context):
+ return servicer_methods.HalfDuplexCall(request_iter, context)
+
+ server = grpc.server(
+ (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+ getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
+ return _Service(servicer_methods, server, stub)
+
+
+def _CreateIncompleteService(service_pb2):
+ """Provides a servicer backend that fails to implement methods and its stub.
+
+ Args:
+ service_pb2: The service_pb2 module generated by this test.
+
+ Returns:
+ A _Service with which to test RPCs. The returned _Service's
+ servicer_methods implements none of the methods required of it.
+ """
+
+ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ pass
+
+ server = grpc.server(
+ (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+ getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
+ return _Service(None, server, stub)
+
+
+def _streaming_input_request_iterator(request_pb2, payload_pb2):
+ for _ in range(3):
+ request = request_pb2.StreamingInputCallRequest()
+ request.payload.payload_type = payload_pb2.COMPRESSABLE
+ request.payload.payload_compressable = 'a'
+ yield request
+
+
+def _streaming_output_request(request_pb2):
+ request = request_pb2.StreamingOutputCallRequest()
+ sizes = [1, 2, 3]
+ request.response_parameters.add(size=sizes[0], interval_us=0)
+ request.response_parameters.add(size=sizes[1], interval_us=0)
+ request.response_parameters.add(size=sizes[2], interval_us=0)
+ return request
+
+
+def _full_duplex_request_iterator(request_pb2):
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ request = request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=2, interval_us=0)
+ request.response_parameters.add(size=3, interval_us=0)
+ yield request
+
+
+class PythonPluginTest(unittest.TestCase):
+ """Test case for the gRPC Python protoc-plugin.
+
+ While reading these tests, remember that the futures API
+ (`stub.method.future()`) only gives futures for the *response-unary*
+ methods and does not exist for response-streaming methods.
+ """
+
+ def setUp(self):
+ # Assume that the appropriate protoc and grpc_python_plugins are on the
+ # path.
+ protoc_command = 'protoc'
+ protoc_plugin_filename = distutils.spawn.find_executable(
+ 'grpc_python_plugin')
+ if not os.path.isfile(protoc_command):
+ # Assume that if we haven't built protoc that it's on the system.
+ protoc_command = 'protoc'
+
+ # Ensure that the output directory exists.
+ self.outdir = tempfile.mkdtemp()
+
+ # Find all proto files
+ paths = []
+ root_dir = os.path.dirname(os.path.realpath(__file__))
+ proto_dir = os.path.join(root_dir, 'protos')
+ for walk_root, _, filenames in os.walk(proto_dir):
+ for filename in filenames:
+ if filename.endswith('.proto'):
+ path = os.path.join(walk_root, filename)
+ paths.append(path)
+
+ # Invoke protoc with the plugin.
+ cmd = [
+ protoc_command,
+ '--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
+ '-I %s' % root_dir,
+ '--python_out=%s' % self.outdir,
+ '--python-grpc_out=%s' % self.outdir
+ ] + paths
+ subprocess.check_call(' '.join(cmd), shell=True, env=os.environ,
+ cwd=os.path.dirname(os.path.realpath(__file__)))
+
+ # Generated proto directories dont include __init__.py, but
+ # these are needed for python package resolution
+ for walk_root, _, _ in os.walk(os.path.join(self.outdir, 'protos')):
+ path = os.path.join(walk_root, '__init__.py')
+ open(path, 'a').close()
+
+ sys.path.insert(0, self.outdir)
+
+ import protos.payload.test_payload_pb2 as payload_pb2
+ import protos.requests.r.test_requests_pb2 as request_pb2
+ import protos.responses.test_responses_pb2 as response_pb2
+ import protos.service.test_service_pb2 as service_pb2
+ self._payload_pb2 = payload_pb2
+ self._request_pb2 = request_pb2
+ self._response_pb2 = response_pb2
+ self._service_pb2 = service_pb2
+
+ def tearDown(self):
+ try:
+ shutil.rmtree(self.outdir)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ sys.path.remove(self.outdir)
+
+ def testImportAttributes(self):
+ # check that we can access the generated module and its members.
+ self.assertIsNotNone(
+ getattr(self._service_pb2, STUB_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(self._service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
+
+ def testUpDown(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ self.assertIsNotNone(service.servicer_methods)
+ self.assertIsNotNone(service.server)
+ self.assertIsNotNone(service.stub)
+
+ def testIncompleteServicer(self):
+ service = _CreateIncompleteService(self._service_pb2)
+ request = self._request_pb2.SimpleRequest(response_size=13)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ service.stub.UnaryCall(request)
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.UNIMPLEMENTED)
+
+ def testUnaryCall(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = self._request_pb2.SimpleRequest(response_size=13)
+ response = service.stub.UnaryCall(request)
+ expected_response = service.servicer_methods.UnaryCall(
+ request, 'not a real context!')
+ self.assertEqual(expected_response, response)
+
+ def testUnaryCallFuture(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = self._request_pb2.SimpleRequest(response_size=13)
+ # Check that the call does not block waiting for the server to respond.
+ with service.servicer_methods.pause():
+ response_future = service.stub.UnaryCall.future(request)
+ response = response_future.result()
+ expected_response = service.servicer_methods.UnaryCall(
+ request, 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testUnaryCallFutureExpired(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = self._request_pb2.SimpleRequest(response_size=13)
+ with service.servicer_methods.pause():
+ response_future = service.stub.UnaryCall.future(
+ request, timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+ self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testUnaryCallFutureCancelled(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = self._request_pb2.SimpleRequest(response_size=13)
+ with service.servicer_methods.pause():
+ response_future = service.stub.UnaryCall.future(request)
+ response_future.cancel()
+ self.assertTrue(response_future.cancelled())
+ self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
+
+ def testUnaryCallFutureFailed(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = self._request_pb2.SimpleRequest(response_size=13)
+ with service.servicer_methods.fail():
+ response_future = service.stub.UnaryCall.future(request)
+ self.assertIsNotNone(response_future.exception())
+ self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
+
+ def testStreamingOutputCall(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = _streaming_output_request(self._request_pb2)
+ responses = service.stub.StreamingOutputCall(request)
+ expected_responses = service.servicer_methods.StreamingOutputCall(
+ request, 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(
+ expected_responses, responses):
+ self.assertEqual(expected_response, response)
+
+ def testStreamingOutputCallExpired(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = _streaming_output_request(self._request_pb2)
+ with service.servicer_methods.pause():
+ responses = service.stub.StreamingOutputCall(
+ request, timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ list(responses)
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testStreamingOutputCallCancelled(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = _streaming_output_request(self._request_pb2)
+ responses = service.stub.StreamingOutputCall(request)
+ next(responses)
+ responses.cancel()
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
+
+ def testStreamingOutputCallFailed(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request = _streaming_output_request(self._request_pb2)
+ with service.servicer_methods.fail():
+ responses = service.stub.StreamingOutputCall(request)
+ self.assertIsNotNone(responses)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
+
+ def testStreamingInputCall(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ response = service.stub.StreamingInputCall(
+ _streaming_input_request_iterator(
+ self._request_pb2, self._payload_pb2))
+ expected_response = service.servicer_methods.StreamingInputCall(
+ _streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
+ 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testStreamingInputCallFuture(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with service.servicer_methods.pause():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(
+ self._request_pb2, self._payload_pb2))
+ response = response_future.result()
+ expected_response = service.servicer_methods.StreamingInputCall(
+ _streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
+ 'not a real RpcContext!')
+ self.assertEqual(expected_response, response)
+
+ def testStreamingInputCallFutureExpired(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with service.servicer_methods.pause():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(
+ self._request_pb2, self._payload_pb2),
+ timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIs(
+ response_future.exception().code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testStreamingInputCallFutureCancelled(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with service.servicer_methods.pause():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(
+ self._request_pb2, self._payload_pb2))
+ response_future.cancel()
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.result()
+
+ def testStreamingInputCallFutureFailed(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with service.servicer_methods.fail():
+ response_future = service.stub.StreamingInputCall.future(
+ _streaming_input_request_iterator(
+ self._request_pb2, self._payload_pb2))
+ self.assertIsNotNone(response_future.exception())
+ self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
+
+ def testFullDuplexCall(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ responses = service.stub.FullDuplexCall(
+ _full_duplex_request_iterator(self._request_pb2))
+ expected_responses = service.servicer_methods.FullDuplexCall(
+ _full_duplex_request_iterator(self._request_pb2),
+ 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(
+ expected_responses, responses):
+ self.assertEqual(expected_response, response)
+
+ def testFullDuplexCallExpired(self):
+ request_iterator = _full_duplex_request_iterator(self._request_pb2)
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with service.servicer_methods.pause():
+ responses = service.stub.FullDuplexCall(
+ request_iterator, timeout=test_constants.SHORT_TIMEOUT)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ list(responses)
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+
+ def testFullDuplexCallCancelled(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ request_iterator = _full_duplex_request_iterator(self._request_pb2)
+ responses = service.stub.FullDuplexCall(request_iterator)
+ next(responses)
+ responses.cancel()
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.CANCELLED)
+
+ def testFullDuplexCallFailed(self):
+ request_iterator = _full_duplex_request_iterator(self._request_pb2)
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with service.servicer_methods.fail():
+ responses = service.stub.FullDuplexCall(request_iterator)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
+
+ def testHalfDuplexCall(self):
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ def half_duplex_request_iterator():
+ request = self._request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ request = self._request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=2, interval_us=0)
+ request.response_parameters.add(size=3, interval_us=0)
+ yield request
+ responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
+ expected_responses = service.servicer_methods.HalfDuplexCall(
+ half_duplex_request_iterator(), 'not a real RpcContext!')
+ for expected_response, response in moves.zip_longest(
+ expected_responses, responses):
+ self.assertEqual(expected_response, response)
+
+ def testHalfDuplexCallWedged(self):
+ condition = threading.Condition()
+ wait_cell = [False]
+ @contextlib.contextmanager
+ def wait(): # pylint: disable=invalid-name
+ # Where's Python 3's 'nonlocal' statement when you need it?
+ with condition:
+ wait_cell[0] = True
+ yield
+ with condition:
+ wait_cell[0] = False
+ condition.notify_all()
+ def half_duplex_request_iterator():
+ request = self._request_pb2.StreamingOutputCallRequest()
+ request.response_parameters.add(size=1, interval_us=0)
+ yield request
+ with condition:
+ while wait_cell[0]:
+ condition.wait()
+ service = _CreateService(
+ self._service_pb2, self._response_pb2, self._payload_pb2)
+ with wait():
+ responses = service.stub.HalfDuplexCall(
+ half_duplex_request_iterator(), timeout=test_constants.SHORT_TIMEOUT)
+ # half-duplex waits for the client to send all info
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(responses)
+ self.assertIs(
+ exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/qps/benchmark_client.py b/src/python/grpcio/tests/qps/benchmark_client.py
index b372ea01ad..080281415d 100644
--- a/src/python/grpcio/tests/qps/benchmark_client.py
+++ b/src/python/grpcio/tests/qps/benchmark_client.py
@@ -30,14 +30,13 @@
"""Defines test client behaviors (UNARY/STREAMING) (SYNC/ASYNC)."""
import abc
+import threading
import time
-try:
- import Queue as queue # Python 2.x
-except ImportError:
- import queue # Python 3
from concurrent import futures
+from six.moves import queue
+import grpc
from grpc.beta import implementations
from grpc.framework.interfaces.face import face
from src.proto.grpc.testing import messages_pb2
@@ -65,6 +64,13 @@ class BenchmarkClient:
else:
channel = implementations.insecure_channel(host, port)
+ connected_event = threading.Event()
+ def wait_for_ready(connectivity):
+ if connectivity == grpc.ChannelConnectivity.READY:
+ connected_event.set()
+ channel.subscribe(wait_for_ready, try_to_connect=True)
+ connected_event.wait()
+
if config.payload_config.WhichOneof('payload') == 'simple_params':
self._generic = False
self._stub = services_pb2.beta_create_BenchmarkService_stub(channel)
@@ -82,6 +88,7 @@ class BenchmarkClient:
self._response_callbacks = []
def add_response_callback(self, callback):
+ """callback will be invoked as callback(client, query_time)"""
self._response_callbacks.append(callback)
@abc.abstractmethod
@@ -95,10 +102,10 @@ class BenchmarkClient:
def stop(self):
pass
- def _handle_response(self, query_time):
+ def _handle_response(self, client, query_time):
self._hist.add(query_time * 1e9) # Report times in nanoseconds
for callback in self._response_callbacks:
- callback(query_time)
+ callback(client, query_time)
class UnarySyncBenchmarkClient(BenchmarkClient):
@@ -121,7 +128,7 @@ class UnarySyncBenchmarkClient(BenchmarkClient):
start_time = time.time()
self._stub.UnaryCall(self._request, _TIMEOUT)
end_time = time.time()
- self._handle_response(end_time - start_time)
+ self._handle_response(self, end_time - start_time)
class UnaryAsyncBenchmarkClient(BenchmarkClient):
@@ -136,19 +143,20 @@ class UnaryAsyncBenchmarkClient(BenchmarkClient):
def _response_received(self, start_time, resp):
resp.result()
end_time = time.time()
- self._handle_response(end_time - start_time)
+ self._handle_response(self, end_time - start_time)
def stop(self):
self._stub = None
-class StreamingSyncBenchmarkClient(BenchmarkClient):
+class _SyncStream(object):
- def __init__(self, server, config, hist):
- super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist)
+ def __init__(self, stub, generic, request, handle_response):
+ self._stub = stub
+ self._generic = generic
+ self._request = request
+ self._handle_response = handle_response
self._is_streaming = False
- self._pool = futures.ThreadPoolExecutor(max_workers=1)
- # Use a thread-safe queue to put requests on the stream
self._request_queue = queue.Queue()
self._send_time_queue = queue.Queue()
@@ -158,15 +166,6 @@ class StreamingSyncBenchmarkClient(BenchmarkClient):
def start(self):
self._is_streaming = True
- self._pool.submit(self._request_stream)
-
- def stop(self):
- self._is_streaming = False
- self._pool.shutdown(wait=True)
- self._stub = None
-
- def _request_stream(self):
- self._is_streaming = True
if self._generic:
stream_callable = self._stub.stream_stream(
'grpc.testing.BenchmarkService', 'StreamingCall')
@@ -175,8 +174,11 @@ class StreamingSyncBenchmarkClient(BenchmarkClient):
response_stream = stream_callable(self._request_generator(), _TIMEOUT)
for _ in response_stream:
- end_time = time.time()
- self._handle_response(end_time - self._send_time_queue.get_nowait())
+ self._handle_response(
+ self, time.time() - self._send_time_queue.get_nowait())
+
+ def stop(self):
+ self._is_streaming = False
def _request_generator(self):
while self._is_streaming:
@@ -187,46 +189,28 @@ class StreamingSyncBenchmarkClient(BenchmarkClient):
pass
-class AsyncReceiver(face.ResponseReceiver):
- """Receiver for async stream responses."""
-
- def __init__(self, send_time_queue, response_handler):
- self._send_time_queue = send_time_queue
- self._response_handler = response_handler
-
- def initial_metadata(self, initial_mdetadata):
- pass
-
- def response(self, response):
- end_time = time.time()
- self._response_handler(end_time - self._send_time_queue.get_nowait())
-
- def complete(self, terminal_metadata, code, details):
- pass
-
-
-class StreamingAsyncBenchmarkClient(BenchmarkClient):
+class StreamingSyncBenchmarkClient(BenchmarkClient):
def __init__(self, server, config, hist):
- super(StreamingAsyncBenchmarkClient, self).__init__(server, config, hist)
- self._send_time_queue = queue.Queue()
- self._receiver = AsyncReceiver(self._send_time_queue, self._handle_response)
- self._rendezvous = None
+ super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist)
+ self._pool = futures.ThreadPoolExecutor(
+ max_workers=config.outstanding_rpcs_per_channel)
+ self._streams = [_SyncStream(self._stub, self._generic,
+ self._request, self._handle_response)
+ for _ in xrange(config.outstanding_rpcs_per_channel)]
+ self._curr_stream = 0
def send_request(self):
- if self._rendezvous is not None:
- self._send_time_queue.put(time.time())
- self._rendezvous.consume(self._request)
+ # Use a round_robin scheduler to determine what stream to send on
+ self._streams[self._curr_stream].send_request()
+ self._curr_stream = (self._curr_stream + 1) % len(self._streams)
def start(self):
- if self._generic:
- stream_callable = self._stub.stream_stream(
- 'grpc.testing.BenchmarkService', 'StreamingCall')
- else:
- stream_callable = self._stub.StreamingCall
- self._rendezvous = stream_callable.event(
- self._receiver, lambda *args: None, _TIMEOUT)
+ for stream in self._streams:
+ self._pool.submit(stream.start)
def stop(self):
- self._rendezvous.terminate()
- self._rendezvous = None
+ for stream in self._streams:
+ stream.stop()
+ self._pool.shutdown(wait=True)
+ self._stub = None
diff --git a/src/python/grpcio/tests/qps/client_runner.py b/src/python/grpcio/tests/qps/client_runner.py
index 1ede7d2af1..1fd58687ad 100644
--- a/src/python/grpcio/tests/qps/client_runner.py
+++ b/src/python/grpcio/tests/qps/client_runner.py
@@ -34,7 +34,7 @@ ClientRunner invokes either periodically or in response to some event.
"""
import abc
-import thread
+import threading
import time
@@ -61,15 +61,18 @@ class OpenLoopClientRunner(ClientRunner):
super(OpenLoopClientRunner, self).__init__(client)
self._is_running = False
self._interval_generator = interval_generator
+ self._dispatch_thread = threading.Thread(
+ target=self._dispatch_requests, args=())
def start(self):
self._is_running = True
self._client.start()
- thread.start_new_thread(self._dispatch_requests, ())
-
+ self._dispatch_thread.start()
+
def stop(self):
self._is_running = False
self._client.stop()
+ self._dispatch_thread.join()
self._client = None
def _dispatch_requests(self):
@@ -98,7 +101,6 @@ class ClosedLoopClientRunner(ClientRunner):
self._client.stop()
self._client = None
- def _send_request(self, response_time):
+ def _send_request(self, client, response_time):
if self._is_running:
- self._client.send_request()
-
+ client.send_request()
diff --git a/src/python/grpcio/tests/qps/qps_worker.py b/src/python/grpcio/tests/qps/qps_worker.py
index 3dda718638..16926379a5 100644
--- a/src/python/grpcio/tests/qps/qps_worker.py
+++ b/src/python/grpcio/tests/qps/qps_worker.py
@@ -43,9 +43,7 @@ def run_worker_server(port):
server.add_insecure_port('[::]:{}'.format(port))
server.start()
servicer.wait_for_quit()
- # Drain outstanding requests for clean exit
- time.sleep(2)
- server.stop(0)
+ server.stop(2)
if __name__ == '__main__':
diff --git a/src/python/grpcio/tests/qps/worker_server.py b/src/python/grpcio/tests/qps/worker_server.py
index 1f9af5482c..d41f8377c2 100644
--- a/src/python/grpcio/tests/qps/worker_server.py
+++ b/src/python/grpcio/tests/qps/worker_server.py
@@ -153,9 +153,8 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer):
if config.rpc_type == control_pb2.UNARY:
client = benchmark_client.UnaryAsyncBenchmarkClient(
server, config, qps_data)
- elif config.rpc_type == control_pb2.STREAMING:
- client = benchmark_client.StreamingAsyncBenchmarkClient(
- server, config, qps_data)
+ else:
+ raise Exception('Async streaming client not supported')
else:
raise Exception('Unsupported client type {}'.format(config.client_type))
diff --git a/src/python/grpcio/tests/stress/client.py b/src/python/grpcio/tests/stress/client.py
index e2e016760c..0de2532cd8 100644
--- a/src/python/grpcio/tests/stress/client.py
+++ b/src/python/grpcio/tests/stress/client.py
@@ -30,10 +30,10 @@
"""Entry point for running stress tests."""
import argparse
-import Queue
import threading
from grpc.beta import implementations
+from six.moves import queue
from src.proto.grpc.testing import metrics_pb2
from src.proto.grpc.testing import test_pb2
@@ -94,7 +94,7 @@ def run_test(args):
test_cases = _parse_weighted_test_cases(args.test_cases)
test_servers = args.server_addresses.split(',')
# Propagate any client exceptions with a queue
- exception_queue = Queue.Queue()
+ exception_queue = queue.Queue()
stop_event = threading.Event()
hist = histogram.Histogram(1, 1)
runners = []
@@ -121,7 +121,7 @@ def run_test(args):
if timeout_secs < 0:
timeout_secs = None
raise exception_queue.get(block=True, timeout=timeout_secs)
- except Queue.Empty:
+ except queue.Empty:
# No exceptions thrown, success
pass
finally:
diff --git a/src/python/grpcio/tests/tests.json b/src/python/grpcio/tests/tests.json
index 691062f25a..20d11b20b5 100644
--- a/src/python/grpcio/tests/tests.json
+++ b/src/python/grpcio/tests/tests.json
@@ -1,12 +1,20 @@
[
+ "_api_test.AllTest",
+ "_api_test.ChannelConnectivityTest",
+ "_auth_test.AccessTokenCallCredentialsTest",
+ "_auth_test.GoogleCallCredentialsTest",
"_base_interface_test.AsyncEasyTest",
"_base_interface_test.AsyncPeasyTest",
"_base_interface_test.SyncEasyTest",
"_base_interface_test.SyncPeasyTest",
"_beta_features_test.BetaFeaturesTest",
"_beta_features_test.ContextManagementAndLifecycleTest",
+ "_cancel_many_calls_test.CancelManyCallsTest",
+ "_channel_connectivity_test.ChannelConnectivityTest",
+ "_channel_ready_future_test.ChannelReadyFutureTest",
"_channel_test.ChannelTest",
"_connectivity_channel_test.ChannelConnectivityTest",
+ "_connectivity_channel_test.ConnectivityStatesTest",
"_core_over_links_base_interface_test.AsyncEasyTest",
"_core_over_links_base_interface_test.AsyncPeasyTest",
"_core_over_links_base_interface_test.SyncEasyTest",
@@ -23,6 +31,7 @@
"_crust_over_core_over_links_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest",
"_crust_over_core_over_links_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest",
"_crust_over_core_over_links_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest",
+ "_empty_message_test.EmptyMessageTest",
"_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest",
"_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest",
"_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest",
@@ -30,6 +39,7 @@
"_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest",
"_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest",
"_health_servicer_test.HealthServicerTest",
+ "_implementations_test.CallCredentialsTest",
"_implementations_test.ChannelCredentialsTest",
"_insecure_interop_test.InsecureInteropTest",
"_intermediary_low_test.CancellationTest",
@@ -41,9 +51,14 @@
"_lonely_invocation_link_test.LonelyInvocationLinkTest",
"_low_test.HangingServerShutdown",
"_low_test.InsecureServerInsecureClient",
+ "_metadata_test.MetadataTest",
"_not_found_test.NotFoundTest",
+ "_python_plugin_test.PythonPluginTest",
+ "_read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest",
+ "_rpc_test.RPCTest",
"_sanity_test.Sanity",
"_secure_interop_test.SecureInteropTest",
+ "_thread_cleanup_test.CleanupThreadTest",
"_transmission_test.RoundTripTest",
"_transmission_test.TransmissionTest",
"_utilities_test.ChannelConnectivityTest",
diff --git a/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py b/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py
index 22d4b019c7..09ebdeff33 100644
--- a/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py
+++ b/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py
@@ -164,15 +164,15 @@ class EchoTest(unittest.TestCase):
self.assertIsNotNone(service_accepted)
self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED)
self.assertIs(service_accepted.tag, service_tag)
- self.assertEqual(method, service_accepted.service_acceptance.method)
- self.assertEqual(self.host, service_accepted.service_acceptance.host)
+ self.assertEqual(method.encode(), service_accepted.service_acceptance.method)
+ self.assertEqual(self.host.encode(), service_accepted.service_acceptance.host)
self.assertIsNotNone(service_accepted.service_acceptance.call)
metadata = dict(service_accepted.metadata)
- self.assertIn(client_metadata_key, metadata)
- self.assertEqual(client_metadata_value, metadata[client_metadata_key])
- self.assertIn(client_binary_metadata_key, metadata)
+ self.assertIn(client_metadata_key.encode(), metadata)
+ self.assertEqual(client_metadata_value.encode(), metadata[client_metadata_key.encode()])
+ self.assertIn(client_binary_metadata_key.encode(), metadata)
self.assertEqual(client_binary_metadata_value,
- metadata[client_binary_metadata_key])
+ metadata[client_binary_metadata_key.encode()])
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.add_metadata(server_leading_metadata_key,
@@ -186,12 +186,12 @@ class EchoTest(unittest.TestCase):
self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind)
self.assertEqual(metadata_tag, metadata_accepted.tag)
metadata = dict(metadata_accepted.metadata)
- self.assertIn(server_leading_metadata_key, metadata)
- self.assertEqual(server_leading_metadata_value,
- metadata[server_leading_metadata_key])
- self.assertIn(server_leading_binary_metadata_key, metadata)
+ self.assertIn(server_leading_metadata_key.encode(), metadata)
+ self.assertEqual(server_leading_metadata_value.encode(),
+ metadata[server_leading_metadata_key.encode()])
+ self.assertIn(server_leading_binary_metadata_key.encode(), metadata)
self.assertEqual(server_leading_binary_metadata_value,
- metadata[server_leading_binary_metadata_key])
+ metadata[server_leading_binary_metadata_key.encode()])
for datum in test_data:
client_call.write(datum, write_tag, _low.WriteFlags.WRITE_NO_COMPRESS)
@@ -277,17 +277,17 @@ class EchoTest(unittest.TestCase):
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind)
self.assertEqual(finish_tag, finish_accepted.tag)
- self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status)
+ self.assertEqual(_low.Status(_low.Code.OK, details.encode()), finish_accepted.status)
metadata = dict(finish_accepted.metadata)
- self.assertIn(server_trailing_metadata_key, metadata)
- self.assertEqual(server_trailing_metadata_value,
- metadata[server_trailing_metadata_key])
- self.assertIn(server_trailing_binary_metadata_key, metadata)
+ self.assertIn(server_trailing_metadata_key.encode(), metadata)
+ self.assertEqual(server_trailing_metadata_value.encode(),
+ metadata[server_trailing_metadata_key.encode()])
+ self.assertIn(server_trailing_binary_metadata_key.encode(), metadata)
self.assertEqual(server_trailing_binary_metadata_value,
- metadata[server_trailing_binary_metadata_key])
+ metadata[server_trailing_binary_metadata_key.encode()])
self.assertSetEqual(set(key for key, _ in finish_accepted.metadata),
- set((server_trailing_metadata_key,
- server_trailing_binary_metadata_key,)))
+ set((server_trailing_metadata_key.encode(),
+ server_trailing_binary_metadata_key.encode(),)))
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
@@ -302,7 +302,8 @@ class EchoTest(unittest.TestCase):
self._perform_echo_test([_BYTE_SEQUENCE])
def testManyOneByteEchoes(self):
- self._perform_echo_test(_BYTE_SEQUENCE)
+ self._perform_echo_test(
+ [_BYTE_SEQUENCE[i:i+1] for i in range(len(_BYTE_SEQUENCE))])
def testManyManyByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
@@ -409,7 +410,7 @@ class CancellationTest(unittest.TestCase):
finish_event = self.client_events.get()
self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
- self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'),
+ self.assertEqual(_low.Status(_low.Code.CANCELLED, b'Cancelled'),
finish_event.status)
self.assertSequenceEqual(test_data, server_data)
diff --git a/src/python/grpcio/tests/unit/_adapter/_low_test.py b/src/python/grpcio/tests/unit/_adapter/_low_test.py
index ec46617996..e09a1f2564 100644
--- a/src/python/grpcio/tests/unit/_adapter/_low_test.py
+++ b/src/python/grpcio/tests/unit/_adapter/_low_test.py
@@ -148,11 +148,11 @@ class InsecureServerInsecureClient(unittest.TestCase):
# Check that Python's user agent string is a part of the full user agent
# string
received_initial_metadata_dict = dict(received_initial_metadata)
- self.assertIn('user-agent', received_initial_metadata_dict)
- self.assertIn('Python-gRPC-{}'.format(_grpcio_metadata.__version__),
- received_initial_metadata_dict['user-agent'])
- self.assertEqual(method, request_event.call_details.method)
- self.assertEqual(host, request_event.call_details.host)
+ self.assertIn(b'user-agent', received_initial_metadata_dict)
+ self.assertIn('Python-gRPC-{}'.format(_grpcio_metadata.__version__).encode(),
+ received_initial_metadata_dict[b'user-agent'])
+ self.assertEqual(method.encode(), request_event.call_details.method)
+ self.assertEqual(host.encode(), request_event.call_details.host)
self.assertLess(abs(deadline - request_event.call_details.deadline),
deadline_tolerance)
@@ -198,12 +198,12 @@ class InsecureServerInsecureClient(unittest.TestCase):
test_common.metadata_transmitted(server_initial_metadata,
client_result.initial_metadata))
elif client_result.type == _types.OpType.RECV_MESSAGE:
- self.assertEqual(response, client_result.message)
+ self.assertEqual(response.encode(), client_result.message)
elif client_result.type == _types.OpType.RECV_STATUS_ON_CLIENT:
self.assertTrue(
test_common.metadata_transmitted(server_trailing_metadata,
client_result.trailing_metadata))
- self.assertEqual(server_status_details, client_result.status.details)
+ self.assertEqual(server_status_details.encode(), client_result.status.details)
self.assertEqual(server_status_code, client_result.status.code)
self.assertEqual(set([
_types.OpType.SEND_INITIAL_METADATA,
@@ -220,7 +220,7 @@ class InsecureServerInsecureClient(unittest.TestCase):
self.assertNotIn(client_result.type, found_server_op_types)
found_server_op_types.add(server_result.type)
if server_result.type == _types.OpType.RECV_MESSAGE:
- self.assertEqual(request, server_result.message)
+ self.assertEqual(request.encode(), server_result.message)
elif server_result.type == _types.OpType.RECV_CLOSE_ON_SERVER:
self.assertFalse(server_result.cancelled)
self.assertEqual(set([
diff --git a/src/python/grpcio/tests/unit/_api_test.py b/src/python/grpcio/tests/unit/_api_test.py
new file mode 100644
index 0000000000..1501ec2a04
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_api_test.py
@@ -0,0 +1,104 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test of gRPC Python's application-layer API."""
+
+import unittest
+
+import six
+
+import grpc
+
+from tests.unit import _from_grpc_import_star
+
+
+class AllTest(unittest.TestCase):
+
+ def testAll(self):
+ expected_grpc_code_elements = (
+ 'FutureTimeoutError',
+ 'FutureCancelledError',
+ 'Future',
+ 'ChannelConnectivity',
+ 'StatusCode',
+ 'RpcError',
+ 'RpcContext',
+ 'Call',
+ 'ChannelCredentials',
+ 'CallCredentials',
+ 'AuthMetadataContext',
+ 'AuthMetadataPluginCallback',
+ 'AuthMetadataPlugin',
+ 'ServerCredentials',
+ 'UnaryUnaryMultiCallable',
+ 'UnaryStreamMultiCallable',
+ 'StreamUnaryMultiCallable',
+ 'StreamStreamMultiCallable',
+ 'Channel',
+ 'ServicerContext',
+ 'RpcMethodHandler',
+ 'HandlerCallDetails',
+ 'GenericRpcHandler',
+ 'Server',
+ 'unary_unary_rpc_method_handler',
+ 'unary_stream_rpc_method_handler',
+ 'stream_unary_rpc_method_handler',
+ 'stream_stream_rpc_method_handler',
+ 'method_handlers_generic_handler',
+ 'ssl_channel_credentials',
+ 'metadata_call_credentials',
+ 'access_token_call_credentials',
+ 'composite_call_credentials',
+ 'composite_channel_credentials',
+ 'ssl_server_credentials',
+ 'channel_ready_future',
+ 'insecure_channel',
+ 'secure_channel',
+ 'server',
+ )
+
+ six.assertCountEqual(
+ self, expected_grpc_code_elements,
+ _from_grpc_import_star.GRPC_ELEMENTS)
+
+
+class ChannelConnectivityTest(unittest.TestCase):
+
+ def testChannelConnectivity(self):
+ self.assertSequenceEqual(
+ (grpc.ChannelConnectivity.IDLE,
+ grpc.ChannelConnectivity.CONNECTING,
+ grpc.ChannelConnectivity.READY,
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ grpc.ChannelConnectivity.SHUTDOWN,),
+ tuple(grpc.ChannelConnectivity))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_auth_test.py b/src/python/grpcio/tests/unit/_auth_test.py
new file mode 100644
index 0000000000..c31f7b06f7
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_auth_test.py
@@ -0,0 +1,96 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests of standard AuthMetadataPlugins."""
+
+import collections
+import threading
+import unittest
+
+from grpc import _auth
+
+
+class MockGoogleCreds(object):
+
+ def get_access_token(self):
+ token = collections.namedtuple('MockAccessTokenInfo',
+ ('access_token', 'expires_in'))
+ token.access_token = 'token'
+ return token
+
+
+class MockExceptionGoogleCreds(object):
+
+ def get_access_token(self):
+ raise Exception()
+
+
+class GoogleCallCredentialsTest(unittest.TestCase):
+
+ def test_google_call_credentials_success(self):
+ callback_event = threading.Event()
+
+ def mock_callback(metadata, error):
+ self.assertEqual(metadata, (('authorization', 'Bearer token'),))
+ self.assertIsNone(error)
+ callback_event.set()
+
+ call_creds = _auth.GoogleCallCredentials(MockGoogleCreds())
+ call_creds(None, mock_callback)
+ self.assertTrue(callback_event.wait(1.0))
+
+ def test_google_call_credentials_error(self):
+ callback_event = threading.Event()
+
+ def mock_callback(metadata, error):
+ self.assertIsNotNone(error)
+ callback_event.set()
+
+ call_creds = _auth.GoogleCallCredentials(MockExceptionGoogleCreds())
+ call_creds(None, mock_callback)
+ self.assertTrue(callback_event.wait(1.0))
+
+
+class AccessTokenCallCredentialsTest(unittest.TestCase):
+
+ def test_google_call_credentials_success(self):
+ callback_event = threading.Event()
+
+ def mock_callback(metadata, error):
+ self.assertEqual(metadata, (('authorization', 'Bearer token'),))
+ self.assertIsNone(error)
+ callback_event.set()
+
+ call_creds = _auth.AccessTokenCallCredentials('token')
+ call_creds(None, mock_callback)
+ self.assertTrue(callback_event.wait(1.0))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_channel_connectivity_test.py b/src/python/grpcio/tests/unit/_channel_connectivity_test.py
new file mode 100644
index 0000000000..a1575efada
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_channel_connectivity_test.py
@@ -0,0 +1,161 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests of grpc._channel.Channel connectivity."""
+
+import threading
+import time
+import unittest
+from concurrent import futures
+
+import grpc
+from grpc import _channel
+from grpc import _server
+from tests.unit.framework.common import test_constants
+
+
+def _ready_in_connectivities(connectivities):
+ return grpc.ChannelConnectivity.READY in connectivities
+
+
+def _last_connectivity_is_not_ready(connectivities):
+ return connectivities[-1] is not grpc.ChannelConnectivity.READY
+
+
+class _Callback(object):
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._connectivities = []
+
+ def update(self, connectivity):
+ with self._condition:
+ self._connectivities.append(connectivity)
+ self._condition.notify()
+
+ def connectivities(self):
+ with self._condition:
+ return tuple(self._connectivities)
+
+ def block_until_connectivities_satisfy(self, predicate):
+ with self._condition:
+ while True:
+ connectivities = tuple(self._connectivities)
+ if predicate(connectivities):
+ return connectivities
+ else:
+ self._condition.wait()
+
+
+class ChannelConnectivityTest(unittest.TestCase):
+
+ def test_lonely_channel_connectivity(self):
+ callback = _Callback()
+
+ channel = _channel.Channel('localhost:12345', None, None)
+ channel.subscribe(callback.update, try_to_connect=False)
+ first_connectivities = callback.block_until_connectivities_satisfy(bool)
+ channel.subscribe(callback.update, try_to_connect=True)
+ second_connectivities = callback.block_until_connectivities_satisfy(
+ lambda connectivities: 2 <= len(connectivities))
+ # Wait for a connection that will never happen.
+ time.sleep(test_constants.SHORT_TIMEOUT)
+ third_connectivities = callback.connectivities()
+ channel.unsubscribe(callback.update)
+ fourth_connectivities = callback.connectivities()
+ channel.unsubscribe(callback.update)
+ fifth_connectivities = callback.connectivities()
+
+ self.assertSequenceEqual(
+ (grpc.ChannelConnectivity.IDLE,), first_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.READY, second_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.READY, third_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.READY, fourth_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.READY, fifth_connectivities)
+
+ def test_immediately_connectable_channel_connectivity(self):
+ server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0))
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ first_callback = _Callback()
+ second_callback = _Callback()
+
+ channel = _channel.Channel('localhost:{}'.format(port), None, None)
+ channel.subscribe(first_callback.update, try_to_connect=False)
+ first_connectivities = first_callback.block_until_connectivities_satisfy(
+ bool)
+ # Wait for a connection that will never happen because try_to_connect=True
+ # has not yet been passed.
+ time.sleep(test_constants.SHORT_TIMEOUT)
+ second_connectivities = first_callback.connectivities()
+ channel.subscribe(second_callback.update, try_to_connect=True)
+ third_connectivities = first_callback.block_until_connectivities_satisfy(
+ lambda connectivities: 2 <= len(connectivities))
+ fourth_connectivities = second_callback.block_until_connectivities_satisfy(
+ bool)
+ # Wait for a connection that will happen (or may already have happened).
+ first_callback.block_until_connectivities_satisfy(_ready_in_connectivities)
+ second_callback.block_until_connectivities_satisfy(_ready_in_connectivities)
+ del channel
+
+ self.assertSequenceEqual(
+ (grpc.ChannelConnectivity.IDLE,), first_connectivities)
+ self.assertSequenceEqual(
+ (grpc.ChannelConnectivity.IDLE,), second_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE, third_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.FATAL_FAILURE, third_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ fourth_connectivities)
+ self.assertNotIn(
+ grpc.ChannelConnectivity.FATAL_FAILURE, fourth_connectivities)
+
+ def test_reachable_then_unreachable_channel_connectivity(self):
+ server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0))
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ callback = _Callback()
+
+ channel = _channel.Channel('localhost:{}'.format(port), None, None)
+ channel.subscribe(callback.update, try_to_connect=True)
+ callback.block_until_connectivities_satisfy(_ready_in_connectivities)
+ # Now take down the server and confirm that channel readiness is repudiated.
+ server.stop(None)
+ callback.block_until_connectivities_satisfy(_last_connectivity_is_not_ready)
+ channel.unsubscribe(callback.update)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_channel_ready_future_test.py b/src/python/grpcio/tests/unit/_channel_ready_future_test.py
new file mode 100644
index 0000000000..b84bc0197a
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_channel_ready_future_test.py
@@ -0,0 +1,103 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests of grpc.channel_ready_future."""
+
+import threading
+import unittest
+from concurrent import futures
+
+import grpc
+from grpc import _channel
+from grpc import _server
+from tests.unit.framework.common import test_constants
+
+
+class _Callback(object):
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._value = None
+
+ def accept_value(self, value):
+ with self._condition:
+ self._value = value
+ self._condition.notify_all()
+
+ def block_until_called(self):
+ with self._condition:
+ while self._value is None:
+ self._condition.wait()
+ return self._value
+
+
+class ChannelReadyFutureTest(unittest.TestCase):
+
+ def test_lonely_channel_connectivity(self):
+ channel = grpc.insecure_channel('localhost:12345')
+ callback = _Callback()
+
+ ready_future = grpc.channel_ready_future(channel)
+ ready_future.add_done_callback(callback.accept_value)
+ with self.assertRaises(grpc.FutureTimeoutError):
+ ready_future.result(test_constants.SHORT_TIMEOUT)
+ self.assertFalse(ready_future.cancelled())
+ self.assertFalse(ready_future.done())
+ self.assertTrue(ready_future.running())
+ ready_future.cancel()
+ value_passed_to_callback = callback.block_until_called()
+ self.assertIs(ready_future, value_passed_to_callback)
+ self.assertTrue(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+
+ def test_immediately_connectable_channel_connectivity(self):
+ server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0))
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ callback = _Callback()
+
+ ready_future = grpc.channel_ready_future(channel)
+ ready_future.add_done_callback(callback.accept_value)
+ self.assertIsNone(ready_future.result(test_constants.SHORT_TIMEOUT))
+ value_passed_to_callback = callback.block_until_called()
+ self.assertIs(ready_future, value_passed_to_callback)
+ self.assertFalse(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+ # Cancellation after maturity has no effect.
+ ready_future.cancel()
+ self.assertFalse(ready_future.cancelled())
+ self.assertTrue(ready_future.done())
+ self.assertFalse(ready_future.running())
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py
new file mode 100644
index 0000000000..c1de779014
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py
@@ -0,0 +1,222 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test making many calls and immediately cancelling most of them."""
+
+import threading
+import unittest
+
+from grpc._cython import cygrpc
+from grpc.framework.foundation import logging_pool
+from tests.unit.framework.common import test_constants
+
+_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
+_EMPTY_FLAGS = 0
+_EMPTY_METADATA = cygrpc.Metadata(())
+
+_SERVER_SHUTDOWN_TAG = 'server_shutdown'
+_REQUEST_CALL_TAG = 'request_call'
+_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
+_RECEIVE_MESSAGE_TAG = 'receive_message'
+_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
+
+_SUCCESS_CALL_FRACTION = 1.0 / 8.0
+
+
+class _State(object):
+
+ def __init__(self):
+ self.condition = threading.Condition()
+ self.handlers_released = False
+ self.parked_handlers = 0
+ self.handled_rpcs = 0
+
+
+def _is_cancellation_event(event):
+ return (
+ event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
+ event.batch_operations[0].received_cancelled)
+
+
+class _Handler(object):
+
+ def __init__(self, state, completion_queue, rpc_event):
+ self._state = state
+ self._lock = threading.Lock()
+ self._completion_queue = completion_queue
+ self._call = rpc_event.operation_call
+
+ def __call__(self):
+ with self._state.condition:
+ self._state.parked_handlers += 1
+ if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
+ self._state.condition.notify_all()
+ while not self._state.handlers_released:
+ self._state.condition.wait()
+
+ with self._lock:
+ self._call.start_batch(
+ cygrpc.Operations(
+ (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
+ _RECEIVE_CLOSE_ON_SERVER_TAG)
+ self._call.start_batch(
+ cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
+ _RECEIVE_MESSAGE_TAG)
+ first_event = self._completion_queue.poll()
+ if _is_cancellation_event(first_event):
+ self._completion_queue.poll()
+ else:
+ with self._lock:
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_send_message(b'\x79\x57', _EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
+ _EMPTY_FLAGS),
+ )
+ self._call.start_batch(
+ cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG)
+ self._completion_queue.poll()
+ self._completion_queue.poll()
+
+
+def _serve(state, server, server_completion_queue, thread_pool):
+ for _ in range(test_constants.RPC_CONCURRENCY):
+ call_completion_queue = cygrpc.CompletionQueue()
+ server.request_call(
+ call_completion_queue, server_completion_queue, _REQUEST_CALL_TAG)
+ rpc_event = server_completion_queue.poll()
+ thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
+ with state.condition:
+ state.handled_rpcs += 1
+ if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
+ state.condition.notify_all()
+ server_completion_queue.poll()
+
+
+class _QueueDriver(object):
+
+ def __init__(self, condition, completion_queue, due):
+ self._condition = condition
+ self._completion_queue = completion_queue
+ self._due = due
+ self._events = []
+ self._returned = False
+
+ def start(self):
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events.append(event)
+ self._due.remove(event.tag)
+ self._condition.notify_all()
+ if not self._due:
+ self._returned = True
+ return
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+
+ def events(self, at_least):
+ with self._condition:
+ while len(self._events) < at_least:
+ self._condition.wait()
+ return tuple(self._events)
+
+
+class CancelManyCallsTest(unittest.TestCase):
+
+ def testCancelManyCalls(self):
+ server_thread_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+
+ server_completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server()
+ server.register_completion_queue(server_completion_queue)
+ port = server.add_http2_port('[::]:0')
+ server.start()
+ channel = cygrpc.Channel('localhost:{}'.format(port))
+
+ state = _State()
+
+ server_thread_args = (
+ state, server, server_completion_queue, server_thread_pool,)
+ server_thread = threading.Thread(target=_serve, args=server_thread_args)
+ server_thread.start()
+
+ client_condition = threading.Condition()
+ client_due = set()
+ client_completion_queue = cygrpc.CompletionQueue()
+ client_driver = _QueueDriver(
+ client_condition, client_completion_queue, client_due)
+ client_driver.start()
+
+ with client_condition:
+ client_calls = []
+ for index in range(test_constants.RPC_CONCURRENCY):
+ client_call = channel.create_call(
+ None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies', None,
+ _INFINITE_FUTURE)
+ operations = (
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_send_message(b'\x45\x56', _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ )
+ tag = 'client_complete_call_{0:04d}_tag'.format(index)
+ client_call.start_batch(cygrpc.Operations(operations), tag)
+ client_due.add(tag)
+ client_calls.append(client_call)
+
+ with state.condition:
+ while True:
+ if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
+ state.condition.wait()
+ elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
+ state.condition.wait()
+ else:
+ state.handlers_released = True
+ state.condition.notify_all()
+ break
+
+ client_driver.events(
+ test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
+ with client_condition:
+ for client_call in client_calls:
+ client_call.cancel()
+
+ with state.condition:
+ server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py
new file mode 100644
index 0000000000..6ae7a90fbe
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py
@@ -0,0 +1,251 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test a corner-case at the level of the Cython API."""
+
+import threading
+import unittest
+
+from grpc._cython import cygrpc
+
+_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
+_EMPTY_FLAGS = 0
+_EMPTY_METADATA = cygrpc.Metadata(())
+
+
+class _ServerDriver(object):
+
+ def __init__(self, completion_queue, shutdown_tag):
+ self._condition = threading.Condition()
+ self._completion_queue = completion_queue
+ self._shutdown_tag = shutdown_tag
+ self._events = []
+ self._saw_shutdown_tag = False
+
+ def start(self):
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events.append(event)
+ self._condition.notify()
+ if event.tag is self._shutdown_tag:
+ self._saw_shutdown_tag = True
+ break
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+
+ def done(self):
+ with self._condition:
+ return self._saw_shutdown_tag
+
+ def first_event(self):
+ with self._condition:
+ while not self._events:
+ self._condition.wait()
+ return self._events[0]
+
+ def events(self):
+ with self._condition:
+ while not self._saw_shutdown_tag:
+ self._condition.wait()
+ return tuple(self._events)
+
+
+class _QueueDriver(object):
+
+ def __init__(self, condition, completion_queue, due):
+ self._condition = condition
+ self._completion_queue = completion_queue
+ self._due = due
+ self._events = []
+ self._returned = False
+
+ def start(self):
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events.append(event)
+ self._due.remove(event.tag)
+ self._condition.notify_all()
+ if not self._due:
+ self._returned = True
+ return
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+
+ def done(self):
+ with self._condition:
+ return self._returned
+
+ def event_with_tag(self, tag):
+ with self._condition:
+ while True:
+ for event in self._events:
+ if event.tag is tag:
+ return event
+ self._condition.wait()
+
+ def events(self):
+ with self._condition:
+ while not self._returned:
+ self._condition.wait()
+ return tuple(self._events)
+
+
+class ReadSomeButNotAllResponsesTest(unittest.TestCase):
+
+ def testReadSomeButNotAllResponses(self):
+ server_completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server()
+ server.register_completion_queue(server_completion_queue)
+ port = server.add_http2_port('[::]:0')
+ server.start()
+ channel = cygrpc.Channel('localhost:{}'.format(port))
+
+ server_shutdown_tag = 'server_shutdown_tag'
+ server_driver = _ServerDriver(server_completion_queue, server_shutdown_tag)
+ server_driver.start()
+
+ client_condition = threading.Condition()
+ client_due = set()
+ client_completion_queue = cygrpc.CompletionQueue()
+ client_driver = _QueueDriver(
+ client_condition, client_completion_queue, client_due)
+ client_driver.start()
+
+ server_call_condition = threading.Condition()
+ server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
+ server_send_first_message_tag = 'server_send_first_message_tag'
+ server_send_second_message_tag = 'server_send_second_message_tag'
+ server_complete_rpc_tag = 'server_complete_rpc_tag'
+ server_call_due = set((
+ server_send_initial_metadata_tag,
+ server_send_first_message_tag,
+ server_send_second_message_tag,
+ server_complete_rpc_tag,
+ ))
+ server_call_completion_queue = cygrpc.CompletionQueue()
+ server_call_driver = _QueueDriver(
+ server_call_condition, server_call_completion_queue, server_call_due)
+ server_call_driver.start()
+
+ server_rpc_tag = 'server_rpc_tag'
+ request_call_result = server.request_call(
+ server_call_completion_queue, server_completion_queue, server_rpc_tag)
+
+ client_call = channel.create_call(
+ None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies', None,
+ _INFINITE_FUTURE)
+ client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
+ client_complete_rpc_tag = 'client_complete_rpc_tag'
+ with client_condition:
+ client_receive_initial_metadata_start_batch_result = (
+ client_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ ]), client_receive_initial_metadata_tag))
+ client_due.add(client_receive_initial_metadata_tag)
+ client_complete_rpc_start_batch_result = (
+ client_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
+ ]), client_complete_rpc_tag))
+ client_due.add(client_complete_rpc_tag)
+
+ server_rpc_event = server_driver.first_event()
+
+ with server_call_condition:
+ server_send_initial_metadata_start_batch_result = (
+ server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_send_initial_metadata(
+ _EMPTY_METADATA, _EMPTY_FLAGS),
+ ]), server_send_initial_metadata_tag))
+ server_send_first_message_start_batch_result = (
+ server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
+ ]), server_send_first_message_tag))
+ server_send_initial_metadata_event = server_call_driver.event_with_tag(
+ server_send_initial_metadata_tag)
+ server_send_first_message_event = server_call_driver.event_with_tag(
+ server_send_first_message_tag)
+ with server_call_condition:
+ server_send_second_message_start_batch_result = (
+ server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
+ ]), server_send_second_message_tag))
+ server_complete_rpc_start_batch_result = (
+ server_rpc_event.operation_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ cygrpc.Metadata(()), cygrpc.StatusCode.ok, b'test details',
+ _EMPTY_FLAGS),
+ ]), server_complete_rpc_tag))
+ server_send_second_message_event = server_call_driver.event_with_tag(
+ server_send_second_message_tag)
+ server_complete_rpc_event = server_call_driver.event_with_tag(
+ server_complete_rpc_tag)
+ server_call_driver.events()
+
+ with client_condition:
+ client_receive_first_message_tag = 'client_receive_first_message_tag'
+ client_receive_first_message_start_batch_result = (
+ client_call.start_batch(cygrpc.Operations([
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ ]), client_receive_first_message_tag))
+ client_due.add(client_receive_first_message_tag)
+ client_receive_first_message_event = client_driver.event_with_tag(
+ client_receive_first_message_tag)
+
+ client_call_cancel_result = client_call.cancel()
+ client_driver.events()
+
+ server.shutdown(server_completion_queue, server_shutdown_tag)
+ server.cancel_all_calls()
+ server_driver.events()
+
+ self.assertEqual(cygrpc.CallError.ok, request_call_result)
+ self.assertEqual(
+ cygrpc.CallError.ok, server_send_initial_metadata_start_batch_result)
+ self.assertEqual(
+ cygrpc.CallError.ok, client_receive_initial_metadata_start_batch_result)
+ self.assertEqual(
+ cygrpc.CallError.ok, client_complete_rpc_start_batch_result)
+ self.assertEqual(cygrpc.CallError.ok, client_call_cancel_result)
+ self.assertIs(server_rpc_tag, server_rpc_event.tag)
+ self.assertEqual(
+ cygrpc.CompletionType.operation_complete, server_rpc_event.type)
+ self.assertIsInstance(server_rpc_event.operation_call, cygrpc.Call)
+ self.assertEqual(0, len(server_rpc_event.batch_operations))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio/tests/unit/_cython/cygrpc_test.py
index 0a511101f0..a006a20ce3 100644
--- a/src/python/grpcio/tests/unit/_cython/cygrpc_test.py
+++ b/src/python/grpcio/tests/unit/_cython/cygrpc_test.py
@@ -37,7 +37,7 @@ from tests.unit import test_common
from tests.unit import resources
-_SSL_HOST_OVERRIDE = 'foo.test.google.fr'
+_SSL_HOST_OVERRIDE = b'foo.test.google.fr'
_CALL_CREDENTIALS_METADATA_KEY = 'call-creds-key'
_CALL_CREDENTIALS_METADATA_VALUE = 'call-creds-value'
_EMPTY_FLAGS = 0
@@ -143,22 +143,60 @@ class TypeSmokeTest(unittest.TestCase):
del completion_queue
-class InsecureServerInsecureClient(unittest.TestCase):
+class ServerClientMixin(object):
- def setUp(self):
+ def setUpMixin(self, server_credentials, client_credentials, host_override):
self.server_completion_queue = cygrpc.CompletionQueue()
self.server = cygrpc.Server()
self.server.register_completion_queue(self.server_completion_queue)
- self.port = self.server.add_http2_port('[::]:0')
+ if server_credentials:
+ self.port = self.server.add_http2_port('[::]:0', server_credentials)
+ else:
+ self.port = self.server.add_http2_port('[::]:0')
self.server.start()
self.client_completion_queue = cygrpc.CompletionQueue()
- self.client_channel = cygrpc.Channel('localhost:{}'.format(self.port))
-
- def tearDown(self):
+ if client_credentials:
+ client_channel_arguments = cygrpc.ChannelArgs([
+ cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override,
+ host_override)])
+ self.client_channel = cygrpc.Channel(
+ 'localhost:{}'.format(self.port), client_channel_arguments,
+ client_credentials)
+ else:
+ self.client_channel = cygrpc.Channel('localhost:{}'.format(self.port))
+ if host_override:
+ self.host_argument = None # default host
+ self.expected_host = host_override
+ else:
+ # arbitrary host name necessitating no further identification
+ self.host_argument = b'hostess'
+ self.expected_host = self.host_argument
+
+ def tearDownMixin(self):
del self.server
del self.client_completion_queue
del self.server_completion_queue
+ def _perform_operations(self, operations, call, queue, deadline, description):
+ """Perform the list of operations with given call, queue, and deadline.
+
+ Invocation errors are reported with as an exception with `description` in
+ the message. Performs the operations asynchronously, returning a future.
+ """
+ def performer():
+ tag = object()
+ try:
+ call_result = call.start_batch(cygrpc.Operations(operations), tag)
+ self.assertEqual(cygrpc.CallError.ok, call_result)
+ event = queue.poll(deadline)
+ self.assertEqual(cygrpc.CompletionType.operation_complete, event.type)
+ self.assertTrue(event.success)
+ self.assertIs(tag, event.tag)
+ except Exception as error:
+ raise Exception("Error in '{}': {}".format(description, error.message))
+ return event
+ return test_utilities.SimpleFuture(performer)
+
def testEcho(self):
DEADLINE = time.time()+5
DEADLINE_TOLERANCE = 0.25
@@ -175,7 +213,6 @@ class InsecureServerInsecureClient(unittest.TestCase):
REQUEST = b'in death a member of project mayhem has a name'
RESPONSE = b'his name is robert paulson'
METHOD = b'twinkies'
- HOST = b'hostess'
cygrpc_deadline = cygrpc.Timespec(DEADLINE)
@@ -188,7 +225,8 @@ class InsecureServerInsecureClient(unittest.TestCase):
client_call_tag = object()
client_call = self.client_channel.create_call(
- None, 0, self.client_completion_queue, METHOD, HOST, cygrpc_deadline)
+ None, 0, self.client_completion_queue, METHOD, self.host_argument,
+ cygrpc_deadline)
client_initial_metadata = cygrpc.Metadata([
cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY,
CLIENT_METADATA_ASCII_VALUE),
@@ -216,7 +254,8 @@ class InsecureServerInsecureClient(unittest.TestCase):
test_common.metadata_transmitted(client_initial_metadata,
request_event.request_metadata))
self.assertEqual(METHOD, request_event.request_call_details.method)
- self.assertEqual(HOST, request_event.request_call_details.host)
+ self.assertEqual(self.expected_host,
+ request_event.request_call_details.host)
self.assertLess(
abs(DEADLINE - float(request_event.request_call_details.deadline)),
DEADLINE_TOLERANCE)
@@ -292,172 +331,101 @@ class InsecureServerInsecureClient(unittest.TestCase):
del client_call
del server_call
-
-class SecureServerSecureClient(unittest.TestCase):
-
- def setUp(self):
- server_credentials = cygrpc.server_credentials_ssl(
- None, [cygrpc.SslPemKeyCertPair(resources.private_key(),
- resources.certificate_chain())], False)
- channel_credentials = cygrpc.channel_credentials_ssl(
- resources.test_root_certificates(), None)
- self.server_completion_queue = cygrpc.CompletionQueue()
- self.server = cygrpc.Server()
- self.server.register_completion_queue(self.server_completion_queue)
- self.port = self.server.add_http2_port('[::]:0', server_credentials)
- self.server.start()
- self.client_completion_queue = cygrpc.CompletionQueue()
- client_channel_arguments = cygrpc.ChannelArgs([
- cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override,
- _SSL_HOST_OVERRIDE)])
- self.client_channel = cygrpc.Channel(
- 'localhost:{}'.format(self.port), client_channel_arguments,
- channel_credentials)
-
- def tearDown(self):
- del self.server
- del self.client_completion_queue
- del self.server_completion_queue
-
- def testEcho(self):
+ def test6522(self):
DEADLINE = time.time()+5
DEADLINE_TOLERANCE = 0.25
- CLIENT_METADATA_ASCII_KEY = b'key'
- CLIENT_METADATA_ASCII_VALUE = b'val'
- CLIENT_METADATA_BIN_KEY = b'key-bin'
- CLIENT_METADATA_BIN_VALUE = b'\0'*1000
- SERVER_INITIAL_METADATA_KEY = b'init_me_me_me'
- SERVER_INITIAL_METADATA_VALUE = b'whodawha?'
- SERVER_TRAILING_METADATA_KEY = b'california_is_in_a_drought'
- SERVER_TRAILING_METADATA_VALUE = b'zomg it is'
- SERVER_STATUS_CODE = cygrpc.StatusCode.ok
- SERVER_STATUS_DETAILS = b'our work is never over'
- REQUEST = b'in death a member of project mayhem has a name'
- RESPONSE = b'his name is robert paulson'
- METHOD = b'/twinkies'
- HOST = None # Default host
+ METHOD = b'twinkies'
cygrpc_deadline = cygrpc.Timespec(DEADLINE)
+ empty_metadata = cygrpc.Metadata([])
server_request_tag = object()
- request_call_result = self.server.request_call(
+ self.server.request_call(
self.server_completion_queue, self.server_completion_queue,
server_request_tag)
+ client_call = self.client_channel.create_call(
+ None, 0, self.client_completion_queue, METHOD, self.host_argument,
+ cygrpc_deadline)
- self.assertEqual(cygrpc.CallError.ok, request_call_result)
-
- plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, '')
- call_credentials = cygrpc.call_credentials_metadata_plugin(plugin)
+ # Prologue
+ def perform_client_operations(operations, description):
+ return self._perform_operations(
+ operations, client_call,
+ self.client_completion_queue, cygrpc_deadline, description)
- client_call_tag = object()
- client_call = self.client_channel.create_call(
- None, 0, self.client_completion_queue, METHOD, HOST, cygrpc_deadline)
- client_call.set_credentials(call_credentials)
- client_initial_metadata = cygrpc.Metadata([
- cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY,
- CLIENT_METADATA_ASCII_VALUE),
- cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)])
- client_start_batch_result = client_call.start_batch(cygrpc.Operations([
- cygrpc.operation_send_initial_metadata(client_initial_metadata,
- _EMPTY_FLAGS),
- cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS),
- cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
- cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS)
- ]), client_call_tag)
- self.assertEqual(cygrpc.CallError.ok, client_start_batch_result)
- client_event_future = test_utilities.CompletionQueuePollFuture(
- self.client_completion_queue, cygrpc_deadline)
+ client_event_future = perform_client_operations([
+ cygrpc.operation_send_initial_metadata(empty_metadata,
+ _EMPTY_FLAGS),
+ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
+ ], "Client prologue")
request_event = self.server_completion_queue.poll(cygrpc_deadline)
- self.assertEqual(cygrpc.CompletionType.operation_complete,
- request_event.type)
- self.assertIsInstance(request_event.operation_call, cygrpc.Call)
- self.assertIs(server_request_tag, request_event.tag)
- self.assertEqual(0, len(request_event.batch_operations))
- client_metadata_with_credentials = list(client_initial_metadata) + [
- (_CALL_CREDENTIALS_METADATA_KEY, _CALL_CREDENTIALS_METADATA_VALUE)]
- self.assertTrue(
- test_common.metadata_transmitted(client_metadata_with_credentials,
- request_event.request_metadata))
- self.assertEqual(METHOD, request_event.request_call_details.method)
- self.assertEqual(_SSL_HOST_OVERRIDE,
- request_event.request_call_details.host)
- self.assertLess(
- abs(DEADLINE - float(request_event.request_call_details.deadline)),
- DEADLINE_TOLERANCE)
-
- server_call_tag = object()
server_call = request_event.operation_call
- server_initial_metadata = cygrpc.Metadata([
- cygrpc.Metadatum(SERVER_INITIAL_METADATA_KEY,
- SERVER_INITIAL_METADATA_VALUE)])
- server_trailing_metadata = cygrpc.Metadata([
- cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY,
- SERVER_TRAILING_METADATA_VALUE)])
- server_start_batch_result = server_call.start_batch([
- cygrpc.operation_send_initial_metadata(server_initial_metadata,
- _EMPTY_FLAGS),
- cygrpc.operation_receive_message(_EMPTY_FLAGS),
- cygrpc.operation_send_message(RESPONSE, _EMPTY_FLAGS),
- cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
- cygrpc.operation_send_status_from_server(
- server_trailing_metadata, SERVER_STATUS_CODE,
- SERVER_STATUS_DETAILS, _EMPTY_FLAGS)
- ], server_call_tag)
- self.assertEqual(cygrpc.CallError.ok, server_start_batch_result)
- client_event = client_event_future.result()
- server_event = self.server_completion_queue.poll(cygrpc_deadline)
+ def perform_server_operations(operations, description):
+ return self._perform_operations(
+ operations, server_call,
+ self.server_completion_queue, cygrpc_deadline, description)
- self.assertEqual(6, len(client_event.batch_operations))
- found_client_op_types = set()
- for client_result in client_event.batch_operations:
- # we expect each op type to be unique
- self.assertNotIn(client_result.type, found_client_op_types)
- found_client_op_types.add(client_result.type)
- if client_result.type == cygrpc.OperationType.receive_initial_metadata:
- self.assertTrue(
- test_common.metadata_transmitted(server_initial_metadata,
- client_result.received_metadata))
- elif client_result.type == cygrpc.OperationType.receive_message:
- self.assertEqual(RESPONSE, client_result.received_message.bytes())
- elif client_result.type == cygrpc.OperationType.receive_status_on_client:
- self.assertTrue(
- test_common.metadata_transmitted(server_trailing_metadata,
- client_result.received_metadata))
- self.assertEqual(SERVER_STATUS_DETAILS,
- client_result.received_status_details)
- self.assertEqual(SERVER_STATUS_CODE, client_result.received_status_code)
- self.assertEqual(set([
- cygrpc.OperationType.send_initial_metadata,
- cygrpc.OperationType.send_message,
- cygrpc.OperationType.send_close_from_client,
- cygrpc.OperationType.receive_initial_metadata,
- cygrpc.OperationType.receive_message,
- cygrpc.OperationType.receive_status_on_client
- ]), found_client_op_types)
+ server_event_future = perform_server_operations([
+ cygrpc.operation_send_initial_metadata(empty_metadata,
+ _EMPTY_FLAGS),
+ ], "Server prologue")
- self.assertEqual(5, len(server_event.batch_operations))
- found_server_op_types = set()
- for server_result in server_event.batch_operations:
- self.assertNotIn(client_result.type, found_server_op_types)
- found_server_op_types.add(server_result.type)
- if server_result.type == cygrpc.OperationType.receive_message:
- self.assertEqual(REQUEST, server_result.received_message.bytes())
- elif server_result.type == cygrpc.OperationType.receive_close_on_server:
- self.assertFalse(server_result.received_cancelled)
- self.assertEqual(set([
- cygrpc.OperationType.send_initial_metadata,
- cygrpc.OperationType.receive_message,
- cygrpc.OperationType.send_message,
- cygrpc.OperationType.receive_close_on_server,
- cygrpc.OperationType.send_status_from_server
- ]), found_server_op_types)
+ client_event_future.result() # force completion
+ server_event_future.result()
- del client_call
- del server_call
+ # Messaging
+ for _ in range(10):
+ client_event_future = perform_client_operations([
+ cygrpc.operation_send_message(b'', _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ ], "Client message")
+ server_event_future = perform_server_operations([
+ cygrpc.operation_send_message(b'', _EMPTY_FLAGS),
+ cygrpc.operation_receive_message(_EMPTY_FLAGS),
+ ], "Server receive")
+
+ client_event_future.result() # force completion
+ server_event_future.result()
+
+ # Epilogue
+ client_event_future = perform_client_operations([
+ cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS)
+ ], "Client epilogue")
+
+ server_event_future = perform_server_operations([
+ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ empty_metadata, cygrpc.StatusCode.ok, b'', _EMPTY_FLAGS)
+ ], "Server epilogue")
+
+ client_event_future.result() # force completion
+ server_event_future.result()
+
+
+class InsecureServerInsecureClient(unittest.TestCase, ServerClientMixin):
+
+ def setUp(self):
+ self.setUpMixin(None, None, None)
+
+ def tearDown(self):
+ self.tearDownMixin()
+
+
+class SecureServerSecureClient(unittest.TestCase, ServerClientMixin):
+
+ def setUp(self):
+ server_credentials = cygrpc.server_credentials_ssl(
+ None, [cygrpc.SslPemKeyCertPair(resources.private_key(),
+ resources.certificate_chain())], False)
+ client_credentials = cygrpc.channel_credentials_ssl(
+ resources.test_root_certificates(), None)
+ self.setUpMixin(server_credentials, client_credentials, _SSL_HOST_OVERRIDE)
+
+ def tearDown(self):
+ self.tearDownMixin()
if __name__ == '__main__':
diff --git a/src/python/grpcio/tests/unit/_cython/test_utilities.py b/src/python/grpcio/tests/unit/_cython/test_utilities.py
index 6a09739643..6280ce74c4 100644
--- a/src/python/grpcio/tests/unit/_cython/test_utilities.py
+++ b/src/python/grpcio/tests/unit/_cython/test_utilities.py
@@ -32,15 +32,35 @@ import threading
from grpc._cython import cygrpc
-class CompletionQueuePollFuture:
+class SimpleFuture(object):
+ """A simple future mechanism."""
- def __init__(self, completion_queue, deadline):
- def poller_function():
- self._event_result = completion_queue.poll(deadline)
- self._event_result = None
- self._thread = threading.Thread(target=poller_function)
+ def __init__(self, function, *args, **kwargs):
+ def wrapped_function():
+ try:
+ self._result = function(*args, **kwargs)
+ except Exception as error:
+ self._error = error
+ self._result = None
+ self._error = None
+ self._thread = threading.Thread(target=wrapped_function)
self._thread.start()
def result(self):
+ """The resulting value of this future.
+
+ Re-raises any exceptions.
+ """
self._thread.join()
- return self._event_result
+ if self._error:
+ # TODO(atash): re-raise exceptions in a way that preserves tracebacks
+ raise self._error
+ return self._result
+
+
+class CompletionQueuePollFuture(SimpleFuture):
+
+ def __init__(self, completion_queue, deadline):
+ super(CompletionQueuePollFuture, self).__init__(
+ lambda: completion_queue.poll(deadline))
+
diff --git a/src/python/grpcio/tests/unit/_empty_message_test.py b/src/python/grpcio/tests/unit/_empty_message_test.py
new file mode 100644
index 0000000000..f324f6216b
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_empty_message_test.py
@@ -0,0 +1,137 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+import grpc
+from grpc.framework.foundation import logging_pool
+
+from tests.unit.framework.common import test_constants
+
+_REQUEST = b''
+_RESPONSE = b''
+
+_UNARY_UNARY = b'/test/UnaryUnary'
+_UNARY_STREAM = b'/test/UnaryStream'
+_STREAM_UNARY = b'/test/StreamUnary'
+_STREAM_STREAM = b'/test/StreamStream'
+
+
+def handle_unary_unary(request, servicer_context):
+ return _RESPONSE
+
+
+def handle_unary_stream(request, servicer_context):
+ for _ in range(test_constants.STREAM_LENGTH):
+ yield _RESPONSE
+
+
+def handle_stream_unary(request_iterator, servicer_context):
+ for request in request_iterator:
+ pass
+ return _RESPONSE
+
+
+def handle_stream_stream(request_iterator, servicer_context):
+ for request in request_iterator:
+ yield _RESPONSE
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(self, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = handle_stream_stream
+ elif self.request_streaming:
+ self.stream_unary = handle_stream_unary
+ elif self.response_streaming:
+ self.unary_stream = handle_unary_stream
+ else:
+ self.unary_unary = handle_unary_unary
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(False, False)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(False, True)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(True, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(True, True)
+ else:
+ return None
+
+
+class EmptyMessageTest(unittest.TestCase):
+
+ def setUp(self):
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server((_GenericHandler(),), self._server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
+
+ def tearDown(self):
+ self._server.stop(0)
+
+ def testUnaryUnary(self):
+ response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
+ self.assertEqual(_RESPONSE, response)
+
+ def testUnaryStream(self):
+ response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
+ self.assertSequenceEqual(
+ [_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
+
+ def testStreamUnary(self):
+ response = self._channel.stream_unary(_STREAM_UNARY)(
+ [_REQUEST] * test_constants.STREAM_LENGTH)
+ self.assertEqual(_RESPONSE, response)
+
+ def testStreamStream(self):
+ response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
+ [_REQUEST] * test_constants.STREAM_LENGTH)
+ self.assertSequenceEqual(
+ [_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
+
diff --git a/src/python/grpcio/tests/unit/_from_grpc_import_star.py b/src/python/grpcio/tests/unit/_from_grpc_import_star.py
new file mode 100644
index 0000000000..78d2fb7dc5
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_from_grpc_import_star.py
@@ -0,0 +1,38 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+_BEFORE_IMPORT = tuple(globals())
+
+from grpc import *
+
+_AFTER_IMPORT = tuple(globals())
+
+GRPC_ELEMENTS = tuple(
+ element for element in _AFTER_IMPORT
+ if element not in _BEFORE_IMPORT and element != '_BEFORE_IMPORT')
diff --git a/src/python/grpcio/tests/unit/_links/_transmission_test.py b/src/python/grpcio/tests/unit/_links/_transmission_test.py
index 888684d197..1f6edd18ca 100644
--- a/src/python/grpcio/tests/unit/_links/_transmission_test.py
+++ b/src/python/grpcio/tests/unit/_links/_transmission_test.py
@@ -153,7 +153,7 @@ class RoundTripTest(unittest.TestCase):
invocation_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(invocation_mate.tickets()[-1].code, test_code)
- self.assertEqual(invocation_mate.tickets()[-1].message, test_message)
+ self.assertEqual(invocation_mate.tickets()[-1].message, test_message.encode())
def _perform_scenario_test(self, scenario):
test_operation_id = object()
diff --git a/src/python/grpcio/tests/unit/_metadata_test.py b/src/python/grpcio/tests/unit/_metadata_test.py
new file mode 100644
index 0000000000..77b3901261
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_metadata_test.py
@@ -0,0 +1,216 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Tests server and client side metadata API."""
+
+import unittest
+import weakref
+
+import grpc
+from grpc import _grpcio_metadata
+from grpc.framework.foundation import logging_pool
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_CHANNEL_ARGS = (('grpc.primary_user_agent', 'primary-agent'),
+ ('grpc.secondary_user_agent', 'secondary-agent'))
+
+_REQUEST = b'\x00\x00\x00'
+_RESPONSE = b'\x00\x00\x00'
+
+_UNARY_UNARY = b'/test/UnaryUnary'
+_UNARY_STREAM = b'/test/UnaryStream'
+_STREAM_UNARY = b'/test/StreamUnary'
+_STREAM_STREAM = b'/test/StreamStream'
+
+_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
+
+_CLIENT_METADATA = (
+ (b'client-md-key', b'client-md-key'),
+ (b'client-md-key-bin', b'\x00\x01')
+)
+
+_SERVER_INITIAL_METADATA = (
+ (b'server-initial-md-key', b'server-initial-md-value'),
+ (b'server-initial-md-key-bin', b'\x00\x02')
+)
+
+_SERVER_TRAILING_METADATA = (
+ (b'server-trailing-md-key', b'server-trailing-md-value'),
+ (b'server-trailing-md-key-bin', b'\x00\x03')
+)
+
+
+def user_agent(metadata):
+ for key, val in metadata:
+ if key == b'user-agent':
+ return val.decode('ascii')
+ raise KeyError('No user agent!')
+
+
+def validate_client_metadata(test, servicer_context):
+ test.assertTrue(test_common.metadata_transmitted(
+ _CLIENT_METADATA, servicer_context.invocation_metadata()))
+ test.assertTrue(user_agent(servicer_context.invocation_metadata())
+ .startswith('primary-agent ' + _USER_AGENT))
+ test.assertTrue(user_agent(servicer_context.invocation_metadata())
+ .endswith('secondary-agent'))
+
+
+def handle_unary_unary(test, request, servicer_context):
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ return _RESPONSE
+
+
+def handle_unary_stream(test, request, servicer_context):
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ for _ in range(test_constants.STREAM_LENGTH):
+ yield _RESPONSE
+
+
+def handle_stream_unary(test, request_iterator, servicer_context):
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ # TODO(issue:#6891) We should be able to remove this loop
+ for request in request_iterator:
+ pass
+ return _RESPONSE
+
+
+def handle_stream_stream(test, request_iterator, servicer_context):
+ validate_client_metadata(test, servicer_context)
+ servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA)
+ servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
+ # TODO(issue:#6891) We should be able to remove this loop,
+ # and replace with return; yield
+ for request in request_iterator:
+ yield _RESPONSE
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(self, test, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = lambda x, y: handle_stream_stream(test, x, y)
+ elif self.request_streaming:
+ self.stream_unary = lambda x, y: handle_stream_unary(test, x, y)
+ elif self.response_streaming:
+ self.unary_stream = lambda x, y: handle_unary_stream(test, x, y)
+ else:
+ self.unary_unary = lambda x, y: handle_unary_unary(test, x, y)
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, test):
+ self._test = test
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(self._test, False, False)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(self._test, False, True)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(self._test, True, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(self._test, True, True)
+ else:
+ return None
+
+
+class MetadataTest(unittest.TestCase):
+
+ def setUp(self):
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server((_GenericHandler(weakref.proxy(self)),),
+ self._server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+ self._channel = grpc.insecure_channel('localhost:%d' % port,
+ options=_CHANNEL_ARGS)
+
+ def tearDown(self):
+ self._server.stop(0)
+
+ def testUnaryUnary(self):
+ multi_callable = self._channel.unary_unary(_UNARY_UNARY)
+ unused_response, call = multi_callable(
+ _REQUEST, metadata=_CLIENT_METADATA, with_call=True)
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA, call.initial_metadata()))
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+
+ def testUnaryStream(self):
+ multi_callable = self._channel.unary_stream(_UNARY_STREAM)
+ call = multi_callable(_REQUEST, metadata=_CLIENT_METADATA)
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA, call.initial_metadata()))
+ for _ in call:
+ pass
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+
+ def testStreamUnary(self):
+ multi_callable = self._channel.stream_unary(_STREAM_UNARY)
+ unused_response, call = multi_callable(
+ [_REQUEST] * test_constants.STREAM_LENGTH,
+ metadata=_CLIENT_METADATA, with_call=True)
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA, call.initial_metadata()))
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+
+ def testStreamStream(self):
+ multi_callable = self._channel.stream_stream(_STREAM_STREAM)
+ call = multi_callable([_REQUEST] * test_constants.STREAM_LENGTH,
+ metadata=_CLIENT_METADATA)
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_INITIAL_METADATA, call.initial_metadata()))
+ for _ in call:
+ pass
+ self.assertTrue(test_common.metadata_transmitted(
+ _SERVER_TRAILING_METADATA, call.trailing_metadata()))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_rpc_test.py b/src/python/grpcio/tests/unit/_rpc_test.py
new file mode 100644
index 0000000000..8407593c86
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_rpc_test.py
@@ -0,0 +1,768 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test of gRPC Python's application-layer API."""
+
+import itertools
+import threading
+import unittest
+from concurrent import futures
+
+import grpc
+from grpc.framework.foundation import logging_pool
+
+from tests.unit.framework.common import test_constants
+from tests.unit.framework.common import test_control
+
+_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
+_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
+_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
+_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
+
+_UNARY_UNARY = b'/test/UnaryUnary'
+_UNARY_STREAM = b'/test/UnaryStream'
+_STREAM_UNARY = b'/test/StreamUnary'
+_STREAM_STREAM = b'/test/StreamStream'
+
+
+class _Callback(object):
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._value = None
+ self._called = False
+
+ def __call__(self, value):
+ with self._condition:
+ self._value = value
+ self._called = True
+ self._condition.notify_all()
+
+ def value(self):
+ with self._condition:
+ while not self._called:
+ self._condition.wait()
+ return self._value
+
+
+class _Handler(object):
+
+ def __init__(self, control):
+ self._control = control
+
+ def handle_unary_unary(self, request, servicer_context):
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+ return request
+
+ def handle_unary_stream(self, request, servicer_context):
+ for _ in range(test_constants.STREAM_LENGTH):
+ self._control.control()
+ yield request
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+
+ def handle_stream_unary(self, request_iterator, servicer_context):
+ if servicer_context is not None:
+ servicer_context.invocation_metadata()
+ self._control.control()
+ response_elements = []
+ for request in request_iterator:
+ self._control.control()
+ response_elements.append(request)
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+ return b''.join(response_elements)
+
+ def handle_stream_stream(self, request_iterator, servicer_context):
+ self._control.control()
+ if servicer_context is not None:
+ servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),))
+ for request in request_iterator:
+ self._control.control()
+ yield request
+ self._control.control()
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(
+ self, request_streaming, response_streaming, request_deserializer,
+ response_serializer, unary_unary, unary_stream, stream_unary,
+ stream_stream):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = request_deserializer
+ self.response_serializer = response_serializer
+ self.unary_unary = unary_unary
+ self.unary_stream = unary_stream
+ self.stream_unary = stream_unary
+ self.stream_stream = stream_stream
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, handler):
+ self._handler = handler
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(
+ False, False, None, None, self._handler.handle_unary_unary, None,
+ None, None)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(
+ False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None,
+ self._handler.handle_unary_stream, None, None)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(
+ True, False, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, None,
+ self._handler.handle_stream_unary, None)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(
+ True, True, None, None, None, None, None,
+ self._handler.handle_stream_stream)
+ else:
+ return None
+
+
+def _unary_unary_multi_callable(channel):
+ return channel.unary_unary(_UNARY_UNARY)
+
+
+def _unary_stream_multi_callable(channel):
+ return channel.unary_stream(
+ _UNARY_STREAM,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
+
+
+def _stream_unary_multi_callable(channel):
+ return channel.stream_unary(
+ _STREAM_UNARY,
+ request_serializer=_SERIALIZE_REQUEST,
+ response_deserializer=_DESERIALIZE_RESPONSE)
+
+
+def _stream_stream_multi_callable(channel):
+ return channel.stream_stream(_STREAM_STREAM)
+
+
+class RPCTest(unittest.TestCase):
+
+ def setUp(self):
+ self._control = test_control.PauseFailControl()
+ self._handler = _Handler(self._control)
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+
+ self._server = grpc.server((), self._server_pool)
+ port = self._server.add_insecure_port(b'[::]:0')
+ self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
+ self._server.start()
+
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
+
+ def testUnrecognizedMethod(self):
+ request = b'abc'
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ self._channel.unary_unary(b'NoSuchMethod')(request)
+
+ self.assertEqual(
+ grpc.StatusCode.UNIMPLEMENTED, exception_context.exception.code())
+
+ def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x07\x08'
+ expected_response = self._handler.handle_unary_unary(request, None)
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ response = multi_callable(
+ request, metadata=(
+ (b'test', b'SuccessfulUnaryRequestBlockingUnaryResponse'),))
+
+ self.assertEqual(expected_response, response)
+
+ def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
+ request = b'\x07\x08'
+ expected_response = self._handler.handle_unary_unary(request, None)
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ response, call = multi_callable(
+ request, metadata=(
+ (b'test', b'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),),
+ with_call=True)
+
+ self.assertEqual(expected_response, response)
+ self.assertIs(grpc.StatusCode.OK, call.code())
+
+ def testSuccessfulUnaryRequestFutureUnaryResponse(self):
+ request = b'\x07\x08'
+ expected_response = self._handler.handle_unary_unary(request, None)
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ response_future = multi_callable.future(
+ request, metadata=(
+ (b'test', b'SuccessfulUnaryRequestFutureUnaryResponse'),))
+ response = response_future.result()
+
+ self.assertEqual(expected_response, response)
+
+ def testSuccessfulUnaryRequestStreamResponse(self):
+ request = b'\x37\x58'
+ expected_responses = tuple(self._handler.handle_unary_stream(request, None))
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request,
+ metadata=((b'test', b'SuccessfulUnaryRequestStreamResponse'),))
+ responses = tuple(response_iterator)
+
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testSuccessfulStreamRequestBlockingUnaryResponse(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(iter(requests), None)
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response = multi_callable(
+ request_iterator,
+ metadata=((b'test', b'SuccessfulStreamRequestBlockingUnaryResponse'),))
+
+ self.assertEqual(expected_response, response)
+
+ def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(iter(requests), None)
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response, call = multi_callable(
+ request_iterator,
+ metadata=(
+ (b'test', b'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),
+ ), with_call=True)
+
+ self.assertEqual(expected_response, response)
+ self.assertIs(grpc.StatusCode.OK, call.code())
+
+ def testSuccessfulStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(iter(requests), None)
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=(
+ (b'test', b'SuccessfulStreamRequestFutureUnaryResponse'),))
+ response = response_future.result()
+
+ self.assertEqual(expected_response, response)
+
+ def testSuccessfulStreamRequestStreamResponse(self):
+ requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
+ expected_responses = tuple(
+ self._handler.handle_stream_stream(iter(requests), None))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=((b'test', b'SuccessfulStreamRequestStreamResponse'),))
+ responses = tuple(response_iterator)
+
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testSequentialInvocations(self):
+ first_request = b'\x07\x08'
+ second_request = b'\x0809'
+ expected_first_response = self._handler.handle_unary_unary(
+ first_request, None)
+ expected_second_response = self._handler.handle_unary_unary(
+ second_request, None)
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ first_response = multi_callable(
+ first_request, metadata=((b'test', b'SequentialInvocations'),))
+ second_response = multi_callable(
+ second_request, metadata=((b'test', b'SequentialInvocations'),))
+
+ self.assertEqual(expected_first_response, first_response)
+ self.assertEqual(expected_second_response, second_response)
+
+ def testConcurrentBlockingInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(iter(requests), None)
+ expected_responses = [expected_response] * test_constants.THREAD_CONCURRENCY
+ response_futures = [None] * test_constants.THREAD_CONCURRENCY
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ request_iterator = iter(requests)
+ response_future = pool.submit(
+ multi_callable, request_iterator,
+ metadata=((b'test', b'ConcurrentBlockingInvocations'),))
+ response_futures[index] = response_future
+ responses = tuple(
+ response_future.result() for response_future in response_futures)
+
+ pool.shutdown(wait=True)
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testConcurrentFutureInvocations(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ expected_response = self._handler.handle_stream_unary(iter(requests), None)
+ expected_responses = [expected_response] * test_constants.THREAD_CONCURRENCY
+ response_futures = [None] * test_constants.THREAD_CONCURRENCY
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ request_iterator = iter(requests)
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=((b'test', b'ConcurrentFutureInvocations'),))
+ response_futures[index] = response_future
+ responses = tuple(
+ response_future.result() for response_future in response_futures)
+
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
+ pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ request = b'\x67\x68'
+ expected_response = self._handler.handle_unary_unary(request, None)
+ response_futures = [None] * test_constants.THREAD_CONCURRENCY
+ lock = threading.Lock()
+ test_is_running_cell = [True]
+ def wrap_future(future):
+ def wrap():
+ try:
+ return future.result()
+ except grpc.RpcError:
+ with lock:
+ if test_is_running_cell[0]:
+ raise
+ return None
+ return wrap
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ for index in range(test_constants.THREAD_CONCURRENCY):
+ inner_response_future = multi_callable.future(
+ request,
+ metadata=(
+ (b'test',
+ b'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
+ outer_response_future = pool.submit(wrap_future(inner_response_future))
+ response_futures[index] = outer_response_future
+
+ some_completed_response_futures_iterator = itertools.islice(
+ futures.as_completed(response_futures),
+ test_constants.THREAD_CONCURRENCY // 2)
+ for response_future in some_completed_response_futures_iterator:
+ self.assertEqual(expected_response, response_future.result())
+ with lock:
+ test_is_running_cell[0] = False
+
+ def testConsumingOneStreamResponseUnaryRequest(self):
+ request = b'\x57\x38'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request,
+ metadata=(
+ (b'test', b'ConsumingOneStreamResponseUnaryRequest'),))
+ next(response_iterator)
+
+ def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
+ request = b'\x57\x38'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request,
+ metadata=(
+ (b'test', b'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ next(response_iterator)
+
+ def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
+ requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=(
+ (b'test', b'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
+ for _ in range(test_constants.STREAM_LENGTH // 2):
+ next(response_iterator)
+
+ def testConsumingTooManyStreamResponsesStreamRequest(self):
+ requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=(
+ (b'test', b'ConsumingTooManyStreamResponsesStreamRequest'),))
+ for _ in range(test_constants.STREAM_LENGTH):
+ next(response_iterator)
+ for _ in range(test_constants.STREAM_LENGTH):
+ with self.assertRaises(StopIteration):
+ next(response_iterator)
+
+ self.assertIsNotNone(response_iterator.initial_metadata())
+ self.assertIs(grpc.StatusCode.OK, response_iterator.code())
+ self.assertIsNotNone(response_iterator.details())
+ self.assertIsNotNone(response_iterator.trailing_metadata())
+
+ def testCancelledUnaryRequestUnaryResponse(self):
+ request = b'\x07\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request,
+ metadata=((b'test', b'CancelledUnaryRequestUnaryResponse'),))
+ response_future.cancel()
+
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.result()
+ self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
+
+ def testCancelledUnaryRequestStreamResponse(self):
+ request = b'\x07\x19'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ with self._control.pause():
+ response_iterator = multi_callable(
+ request,
+ metadata=((b'test', b'CancelledUnaryRequestStreamResponse'),))
+ self._control.block_until_paused()
+ response_iterator.cancel()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(response_iterator)
+ self.assertIs(grpc.StatusCode.CANCELLED, exception_context.exception.code())
+ self.assertIsNotNone(response_iterator.initial_metadata())
+ self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
+ self.assertIsNotNone(response_iterator.details())
+ self.assertIsNotNone(response_iterator.trailing_metadata())
+
+ def testCancelledStreamRequestUnaryResponse(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=((b'test', b'CancelledStreamRequestUnaryResponse'),))
+ self._control.block_until_paused()
+ response_future.cancel()
+
+ self.assertTrue(response_future.cancelled())
+ with self.assertRaises(grpc.FutureCancelledError):
+ response_future.result()
+ self.assertIsNotNone(response_future.initial_metadata())
+ self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
+ self.assertIsNotNone(response_future.details())
+ self.assertIsNotNone(response_future.trailing_metadata())
+
+ def testCancelledStreamRequestStreamResponse(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ with self._control.pause():
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=((b'test', b'CancelledStreamRequestStreamResponse'),))
+ response_iterator.cancel()
+
+ with self.assertRaises(grpc.RpcError):
+ next(response_iterator)
+ self.assertIsNotNone(response_iterator.initial_metadata())
+ self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
+ self.assertIsNotNone(response_iterator.details())
+ self.assertIsNotNone(response_iterator.trailing_metadata())
+
+ def testExpiredUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x07\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(
+ request, timeout=test_constants.SHORT_TIMEOUT,
+ metadata=((b'test', b'ExpiredUnaryRequestBlockingUnaryResponse'),),
+ with_call=True)
+
+ self.assertIsNotNone(exception_context.exception.initial_metadata())
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
+ self.assertIsNotNone(exception_context.exception.details())
+ self.assertIsNotNone(exception_context.exception.trailing_metadata())
+
+ def testExpiredUnaryRequestFutureUnaryResponse(self):
+ request = b'\x07\x17'
+ callback = _Callback()
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request, timeout=test_constants.SHORT_TIMEOUT,
+ metadata=((b'test', b'ExpiredUnaryRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ self.assertIs(response_future, value_passed_to_callback)
+ self.assertIsNotNone(response_future.initial_metadata())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
+ self.assertIsNotNone(response_future.details())
+ self.assertIsNotNone(response_future.trailing_metadata())
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code())
+
+ def testExpiredUnaryRequestStreamResponse(self):
+ request = b'\x07\x19'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_iterator = multi_callable(
+ request, timeout=test_constants.SHORT_TIMEOUT,
+ metadata=((b'test', b'ExpiredUnaryRequestStreamResponse'),))
+ next(response_iterator)
+
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code())
+
+ def testExpiredStreamRequestBlockingUnaryResponse(self):
+ requests = tuple(b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(
+ request_iterator, timeout=test_constants.SHORT_TIMEOUT,
+ metadata=((b'test', b'ExpiredStreamRequestBlockingUnaryResponse'),))
+
+ self.assertIsNotNone(exception_context.exception.initial_metadata())
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
+ self.assertIsNotNone(exception_context.exception.details())
+ self.assertIsNotNone(exception_context.exception.trailing_metadata())
+
+ def testExpiredStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+ callback = _Callback()
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.pause():
+ response_future = multi_callable.future(
+ request_iterator, timeout=test_constants.SHORT_TIMEOUT,
+ metadata=((b'test', b'ExpiredStreamRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIs(response_future, value_passed_to_callback)
+ self.assertIsNotNone(response_future.initial_metadata())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
+ self.assertIsNotNone(response_future.details())
+ self.assertIsNotNone(response_future.trailing_metadata())
+
+ def testExpiredStreamRequestStreamResponse(self):
+ requests = tuple(b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ with self._control.pause():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_iterator = multi_callable(
+ request_iterator, timeout=test_constants.SHORT_TIMEOUT,
+ metadata=((b'test', b'ExpiredStreamRequestStreamResponse'),))
+ next(response_iterator)
+
+ self.assertIs(
+ grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code())
+ self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_iterator.code())
+
+ def testFailedUnaryRequestBlockingUnaryResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.fail():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(
+ request,
+ metadata=((b'test', b'FailedUnaryRequestBlockingUnaryResponse'),),
+ with_call=True)
+
+ self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+
+ def testFailedUnaryRequestFutureUnaryResponse(self):
+ request = b'\x37\x17'
+ callback = _Callback()
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ with self._control.fail():
+ response_future = multi_callable.future(
+ request,
+ metadata=((b'test', b'FailedUnaryRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(
+ grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code())
+ self.assertIs(response_future, value_passed_to_callback)
+
+ def testFailedUnaryRequestStreamResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ with self._control.fail():
+ response_iterator = multi_callable(
+ request,
+ metadata=((b'test', b'FailedUnaryRequestStreamResponse'),))
+ next(response_iterator)
+
+ self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+
+ def testFailedStreamRequestBlockingUnaryResponse(self):
+ requests = tuple(b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.fail():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(
+ request_iterator,
+ metadata=((b'test', b'FailedStreamRequestBlockingUnaryResponse'),))
+
+ self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+
+ def testFailedStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+ callback = _Callback()
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ with self._control.fail():
+ response_future = multi_callable.future(
+ request_iterator,
+ metadata=((b'test', b'FailedStreamRequestFutureUnaryResponse'),))
+ response_future.add_done_callback(callback)
+ value_passed_to_callback = callback.value()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_future.result()
+ self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
+ self.assertIs(
+ grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+ self.assertIsInstance(response_future.exception(), grpc.RpcError)
+ self.assertIs(response_future, value_passed_to_callback)
+
+ def testFailedStreamRequestStreamResponse(self):
+ requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ with self._control.fail():
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ response_iterator = multi_callable(
+ request_iterator,
+ metadata=((b'test', b'FailedStreamRequestStreamResponse'),))
+ tuple(response_iterator)
+
+ self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code())
+ self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
+
+ def testIgnoredUnaryRequestFutureUnaryResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_unary_multi_callable(self._channel)
+ multi_callable.future(
+ request,
+ metadata=((b'test', b'IgnoredUnaryRequestFutureUnaryResponse'),))
+
+ def testIgnoredUnaryRequestStreamResponse(self):
+ request = b'\x37\x17'
+
+ multi_callable = _unary_stream_multi_callable(self._channel)
+ multi_callable(
+ request,
+ metadata=((b'test', b'IgnoredUnaryRequestStreamResponse'),))
+
+ def testIgnoredStreamRequestFutureUnaryResponse(self):
+ requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_unary_multi_callable(self._channel)
+ multi_callable.future(
+ request_iterator,
+ metadata=((b'test', b'IgnoredStreamRequestFutureUnaryResponse'),))
+
+ def testIgnoredStreamRequestStreamResponse(self):
+ requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
+ request_iterator = iter(requests)
+
+ multi_callable = _stream_stream_multi_callable(self._channel)
+ multi_callable(
+ request_iterator,
+ metadata=((b'test', b'IgnoredStreamRequestStreamResponse'),))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/_thread_cleanup_test.py b/src/python/grpcio/tests/unit/_thread_cleanup_test.py
new file mode 100644
index 0000000000..3e4f317edc
--- /dev/null
+++ b/src/python/grpcio/tests/unit/_thread_cleanup_test.py
@@ -0,0 +1,117 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Tests for CleanupThread."""
+
+import threading
+import time
+import unittest
+
+from grpc import _common
+
+_SHORT_TIME = 0.5
+_LONG_TIME = 2.0
+_EPSILON = 0.1
+
+
+def cleanup(timeout):
+ if timeout is not None:
+ time.sleep(timeout)
+ else:
+ time.sleep(_LONG_TIME)
+
+
+def slow_cleanup(timeout):
+ # Don't respect timeout
+ time.sleep(_LONG_TIME)
+
+
+class CleanupThreadTest(unittest.TestCase):
+
+ def testTargetInvocation(self):
+ event = threading.Event()
+ def target(arg1, arg2, arg3=None):
+ self.assertEqual('arg1', arg1)
+ self.assertEqual('arg2', arg2)
+ self.assertEqual('arg3', arg3)
+ event.set()
+
+ cleanup_thread = _common.CleanupThread(behavior=lambda x: None,
+ target=target, name='test-name',
+ args=('arg1', 'arg2'), kwargs={'arg3': 'arg3'})
+ cleanup_thread.start()
+ cleanup_thread.join()
+ self.assertEqual(cleanup_thread.name, 'test-name')
+ self.assertTrue(event.is_set())
+
+ def testJoinNoTimeout(self):
+ cleanup_thread = _common.CleanupThread(behavior=cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join()
+ end_time = time.time()
+ self.assertAlmostEqual(_LONG_TIME, end_time - start_time, delta=_EPSILON)
+
+ def testJoinTimeout(self):
+ cleanup_thread = _common.CleanupThread(behavior=cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(_SHORT_TIME)
+ end_time = time.time()
+ self.assertAlmostEqual(_SHORT_TIME, end_time - start_time, delta=_EPSILON)
+
+ def testJoinTimeoutSlowBehavior(self):
+ cleanup_thread = _common.CleanupThread(behavior=slow_cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(_SHORT_TIME)
+ end_time = time.time()
+ self.assertAlmostEqual(_LONG_TIME, end_time - start_time, delta=_EPSILON)
+
+ def testJoinTimeoutSlowTarget(self):
+ event = threading.Event()
+ def target():
+ event.wait(_LONG_TIME)
+ cleanup_thread = _common.CleanupThread(behavior=cleanup, target=target)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(_SHORT_TIME)
+ end_time = time.time()
+ self.assertAlmostEqual(_SHORT_TIME, end_time - start_time, delta=_EPSILON)
+ event.set()
+
+ def testJoinZeroTimeout(self):
+ cleanup_thread = _common.CleanupThread(behavior=cleanup)
+ cleanup_thread.start()
+ start_time = time.time()
+ cleanup_thread.join(0)
+ end_time = time.time()
+ self.assertAlmostEqual(0, end_time - start_time, delta=_EPSILON)
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/beta/_beta_features_test.py b/src/python/grpcio/tests/unit/beta/_beta_features_test.py
index bb2893a21b..3a9701b8eb 100644
--- a/src/python/grpcio/tests/unit/beta/_beta_features_test.py
+++ b/src/python/grpcio/tests/unit/beta/_beta_features_test.py
@@ -42,8 +42,8 @@ from tests.unit.framework.common import test_constants
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
-_PER_RPC_CREDENTIALS_METADATA_KEY = 'my-call-credentials-metadata-key'
-_PER_RPC_CREDENTIALS_METADATA_VALUE = 'my-call-credentials-metadata-value'
+_PER_RPC_CREDENTIALS_METADATA_KEY = b'my-call-credentials-metadata-key'
+_PER_RPC_CREDENTIALS_METADATA_VALUE = b'my-call-credentials-metadata-value'
_GROUP = 'group'
_UNARY_UNARY = 'unary-unary'
diff --git a/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py b/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py
index 5dc8720639..488f7d7141 100644
--- a/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py
+++ b/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py
@@ -187,5 +187,15 @@ class ChannelConnectivityTest(unittest.TestCase):
server_completion_queue_thread.join()
+class ConnectivityStatesTest(unittest.TestCase):
+
+ def testBetaConnectivityStates(self):
+ self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
+ self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
+
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/beta/_implementations_test.py b/src/python/grpcio/tests/unit/beta/_implementations_test.py
index 26be670c45..127f93e9bb 100644
--- a/src/python/grpcio/tests/unit/beta/_implementations_test.py
+++ b/src/python/grpcio/tests/unit/beta/_implementations_test.py
@@ -29,8 +29,11 @@
"""Tests the implementations module of the gRPC Python Beta API."""
+import datetime
import unittest
+from oauth2client import client as oauth2client_client
+
from grpc.beta import implementations
from tests.unit import resources
@@ -49,5 +52,19 @@ class ChannelCredentialsTest(unittest.TestCase):
channel_credentials, implementations.ChannelCredentials)
+class CallCredentialsTest(unittest.TestCase):
+
+ def test_google_call_credentials(self):
+ creds = oauth2client_client.GoogleCredentials(
+ 'token', 'client_id', 'secret', 'refresh_token',
+ datetime.datetime(2008, 6, 24), 'https://refresh.uri.com/',
+ 'user_agent')
+ call_creds = implementations.google_call_credentials(creds)
+ self.assertIsInstance(call_creds, implementations.CallCredentials)
+
+ def test_access_token_call_credentials(self):
+ call_creds = implementations.access_token_call_credentials('token')
+ self.assertIsInstance(call_creds, implementations.CallCredentials)
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/src/python/grpcio/tests/unit/beta/_not_found_test.py b/src/python/grpcio/tests/unit/beta/_not_found_test.py
index 44fcd1e13c..37b8c49120 100644
--- a/src/python/grpcio/tests/unit/beta/_not_found_test.py
+++ b/src/python/grpcio/tests/unit/beta/_not_found_test.py
@@ -61,7 +61,7 @@ class NotFoundTest(unittest.TestCase):
def test_future_stream_unary_not_found(self):
rpc_future = self._generic_stub.future_stream_unary(
- 'grupe', 'mevvod', b'def', test_constants.LONG_TIMEOUT)
+ 'grupe', 'mevvod', [b'def'], test_constants.LONG_TIMEOUT)
with self.assertRaises(face.LocalError) as exception_assertion_context:
rpc_future.result()
self.assertIs(
diff --git a/src/python/grpcio/tests/unit/beta/test_utilities.py b/src/python/grpcio/tests/unit/beta/test_utilities.py
index 0313e06a93..e374b203ce 100644
--- a/src/python/grpcio/tests/unit/beta/test_utilities.py
+++ b/src/python/grpcio/tests/unit/beta/test_utilities.py
@@ -29,7 +29,7 @@
"""Test-appropriate entry points into the gRPC Python Beta API."""
-from grpc._adapter import _intermediary_low
+import grpc
from grpc.beta import implementations
@@ -48,9 +48,8 @@ def not_really_secure_channel(
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
- hostport = '%s:%d' % (host, port)
- intermediary_low_channel = _intermediary_low.Channel(
- hostport, channel_credentials._low_credentials,
- server_host_override=server_host_override)
- return implementations.Channel(
- intermediary_low_channel._internal, intermediary_low_channel)
+ target = '%s:%d' % (host, port)
+ channel = grpc.secure_channel(
+ target, channel_credentials._credentials,
+ ((b'grpc.ssl_target_name_override', server_host_override,),))
+ return implementations.Channel(channel)
diff --git a/src/python/grpcio/tests/unit/framework/common/test_control.py b/src/python/grpcio/tests/unit/framework/common/test_control.py
index ca5ba3a854..088e2f8b88 100644
--- a/src/python/grpcio/tests/unit/framework/common/test_control.py
+++ b/src/python/grpcio/tests/unit/framework/common/test_control.py
@@ -60,10 +60,16 @@ class Control(six.with_metaclass(abc.ABCMeta)):
class PauseFailControl(Control):
- """A Control that can be used to pause or fail code under control."""
+ """A Control that can be used to pause or fail code under control.
+
+ This object is only safe for use from two threads: one of the system under
+ test calling control and the other from the test system calling pause,
+ block_until_paused, and fail.
+ """
def __init__(self):
self._condition = threading.Condition()
+ self._pause = False
self._paused = False
self._fail = False
@@ -72,19 +78,31 @@ class PauseFailControl(Control):
if self._fail:
raise Defect()
- while self._paused:
+ while self._pause:
+ self._paused = True
+ self._condition.notify_all()
self._condition.wait()
+ self._paused = False
@contextlib.contextmanager
def pause(self):
"""Pauses code under control while controlling code is in context."""
with self._condition:
- self._paused = True
+ self._pause = True
yield
with self._condition:
- self._paused = False
+ self._pause = False
self._condition.notify_all()
+ def block_until_paused(self):
+ """Blocks controlling code until code under control is paused.
+
+ May only be called within the context of a pause call.
+ """
+ with self._condition:
+ while not self._paused:
+ self._condition.wait()
+
@contextlib.contextmanager
def fail(self):
"""Fails code under control while controlling code is in context."""
diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py b/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py
index 4f8e26c9a2..5d16bf98be 100644
--- a/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py
+++ b/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py
@@ -29,6 +29,8 @@
"""Tests of the base interface of RPC Framework."""
+from __future__ import division
+
import logging
import random
import threading
@@ -54,13 +56,13 @@ class _Serialization(test_interfaces.Serialization):
return request + request
def deserialize_request(self, serialized_request):
- return serialized_request[:len(serialized_request) / 2]
+ return serialized_request[:len(serialized_request) // 2]
def serialize_response(self, response):
return response * 3
def deserialize_response(self, serialized_response):
- return serialized_response[2 * len(serialized_response) / 3:]
+ return serialized_response[2 * len(serialized_response) // 3:]
def _advance(quadruples, operator, controller):
diff --git a/src/python/grpcio/tests/unit/test_common.py b/src/python/grpcio/tests/unit/test_common.py
index 7b4d20dccb..b779f65e7e 100644
--- a/src/python/grpcio/tests/unit/test_common.py
+++ b/src/python/grpcio/tests/unit/test_common.py
@@ -61,6 +61,10 @@ def metadata_transmitted(original_metadata, transmitted_metadata):
original = collections.defaultdict(list)
for key_value_pair in original_metadata:
key, value = tuple(key_value_pair)
+ if not isinstance(key, bytes):
+ key = key.encode()
+ if not isinstance(value, bytes):
+ value = value.encode()
original[key].append(value)
transmitted = collections.defaultdict(list)
for key_value_pair in transmitted_metadata:
diff --git a/src/ruby/bin/math_services.rb b/src/ruby/bin/math_services.rb
index 2d482129c2..34c36abdda 100755
--- a/src/ruby/bin/math_services.rb
+++ b/src/ruby/bin/math_services.rb
@@ -1,13 +1,41 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: math.proto for package 'math'
+# Original file comments:
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
require 'grpc'
require 'math'
module Math
module Math
-
- # TODO: add proto service documentation here
class Service
include GRPC::GenericService
@@ -16,9 +44,20 @@ module Math
self.unmarshal_class_method = :decode
self.service_name = 'math.Math'
+ # Div divides args.dividend by args.divisor and returns the quotient and
+ # remainder.
rpc :Div, DivArgs, DivReply
+ # DivMany accepts an arbitrary number of division args from the client stream
+ # and sends back the results in the reply stream. The stream continues until
+ # the client closes its end; the server does the same after sending all the
+ # replies. The stream ends immediately if either end aborts.
rpc :DivMany, stream(DivArgs), stream(DivReply)
+ # Fib generates numbers in the Fibonacci sequence. If args.limit > 0, Fib
+ # generates up to limit numbers; otherwise it continues until the call is
+ # canceled. Unlike Fib above, Fib has no final FibReply.
rpc :Fib, FibArgs, stream(Num)
+ # Sum sums a stream of numbers, returning the final result once the stream
+ # is closed.
rpc :Sum, stream(Num), Num
end
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index 1b06273af9..b436057c16 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -101,30 +101,14 @@ static VALUE sym_message;
static VALUE sym_status;
static VALUE sym_cancelled;
-/* hash_all_calls is a hash of Call address -> reference count that is used to
- * track the creation and destruction of rb_call instances.
- */
-static VALUE hash_all_calls;
-
/* Destroys a Call. */
static void grpc_rb_call_destroy(void *p) {
- grpc_call *call = NULL;
- VALUE ref_count = Qnil;
+ grpc_call* call = NULL;
if (p == NULL) {
return;
- };
- call = (grpc_call *)p;
-
- ref_count = rb_hash_aref(hash_all_calls, OFFT2NUM((VALUE)call));
- if (ref_count == Qnil) {
- return; /* No longer in the hash, so already deleted */
- } else if (NUM2UINT(ref_count) == 1) {
- rb_hash_delete(hash_all_calls, OFFT2NUM((VALUE)call));
- grpc_call_destroy(call);
- } else {
- rb_hash_aset(hash_all_calls, OFFT2NUM((VALUE)call),
- UINT2NUM(NUM2UINT(ref_count) - 1));
}
+ call = (grpc_call *)p;
+ grpc_call_destroy(call);
}
static size_t md_ary_datasize(const void *p) {
@@ -151,7 +135,7 @@ static const rb_data_type_t grpc_rb_md_ary_data_type = {
* touches a hash object.
* TODO(yugui) Directly use st_table and call the free function earlier?
*/
- 0,
+ 0,
#endif
};
@@ -163,12 +147,7 @@ static const rb_data_type_t grpc_call_data_type = {
NULL,
NULL,
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
- /* it is unsafe to specify RUBY_TYPED_FREE_IMMEDIATELY because
- * grpc_rb_call_destroy
- * touches a hash object.
- * TODO(yugui) Directly use st_table and call the free function earlier?
- */
- 0,
+ RUBY_TYPED_FREE_IMMEDIATELY
#endif
};
@@ -190,6 +169,11 @@ const char *grpc_call_error_detail_of(grpc_call_error err) {
static VALUE grpc_rb_call_cancel(VALUE self) {
grpc_call *call = NULL;
grpc_call_error err;
+ if (RTYPEDDATA_DATA(self) == NULL) {
+ //This call has been closed
+ return Qnil;
+ }
+
TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
err = grpc_call_cancel(call, NULL);
if (err != GRPC_CALL_OK) {
@@ -200,11 +184,29 @@ static VALUE grpc_rb_call_cancel(VALUE self) {
return Qnil;
}
+/* Releases the c-level resources associated with a call
+ Once a call has been closed, no further requests can be
+ processed.
+*/
+static VALUE grpc_rb_call_close(VALUE self) {
+ grpc_call *call = NULL;
+ TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
+ if(call != NULL) {
+ grpc_call_destroy(call);
+ RTYPEDDATA_DATA(self) = NULL;
+ }
+ return Qnil;
+}
+
/* Called to obtain the peer that this call is connected to. */
static VALUE grpc_rb_call_get_peer(VALUE self) {
VALUE res = Qnil;
grpc_call *call = NULL;
char *peer = NULL;
+ if (RTYPEDDATA_DATA(self) == NULL) {
+ rb_raise(grpc_rb_eCallError, "Cannot get peer value on closed call");
+ return Qnil;
+ }
TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
peer = grpc_call_get_peer(call);
res = rb_str_new2(peer);
@@ -218,6 +220,10 @@ static VALUE grpc_rb_call_get_peer_cert(VALUE self) {
grpc_call *call = NULL;
VALUE res = Qnil;
grpc_auth_context *ctx = NULL;
+ if (RTYPEDDATA_DATA(self) == NULL) {
+ rb_raise(grpc_rb_eCallError, "Cannot get peer cert on closed call");
+ return Qnil;
+ }
TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
ctx = grpc_call_auth_context(call);
@@ -323,6 +329,10 @@ static VALUE grpc_rb_call_set_credentials(VALUE self, VALUE credentials) {
grpc_call *call = NULL;
grpc_call_credentials *creds;
grpc_call_error err;
+ if (RTYPEDDATA_DATA(self) == NULL) {
+ rb_raise(grpc_rb_eCallError, "Cannot set credentials of closed call");
+ return Qnil;
+ }
TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
creds = grpc_rb_get_wrapped_call_credentials(credentials);
err = grpc_call_set_credentials(call, creds);
@@ -731,7 +741,7 @@ static VALUE grpc_run_batch_stack_build_result(run_batch_stack *st) {
}
tag = Object.new
timeout = 10
- call.start_batch(cqueue, tag, timeout, ops)
+ call.start_batch(cq, tag, timeout, ops)
Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call.
@@ -749,6 +759,10 @@ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
VALUE result = Qnil;
VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
unsigned write_flag = 0;
+ if (RTYPEDDATA_DATA(self) == NULL) {
+ rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call");
+ return Qnil;
+ }
TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);
/* Validate the ops args, adding them to a ruby array */
@@ -888,6 +902,7 @@ void Init_grpc_call() {
/* Add ruby analogues of the Call methods. */
rb_define_method(grpc_rb_cCall, "run_batch", grpc_rb_call_run_batch, 4);
rb_define_method(grpc_rb_cCall, "cancel", grpc_rb_call_cancel, 0);
+ rb_define_method(grpc_rb_cCall, "close", grpc_rb_call_close, 0);
rb_define_method(grpc_rb_cCall, "peer", grpc_rb_call_get_peer, 0);
rb_define_method(grpc_rb_cCall, "peer_cert", grpc_rb_call_get_peer_cert, 0);
rb_define_method(grpc_rb_cCall, "status", grpc_rb_call_get_status, 0);
@@ -925,11 +940,6 @@ void Init_grpc_call() {
"BatchResult", "send_message", "send_metadata", "send_close",
"send_status", "message", "metadata", "status", "cancelled", NULL);
- /* The hash for reference counting calls, to ensure they can't be destroyed
- * more than once */
- hash_all_calls = rb_hash_new();
- rb_define_const(grpc_rb_cCall, "INTERNAL_ALL_CALLs", hash_all_calls);
-
Init_grpc_error_codes();
Init_grpc_op_codes();
Init_grpc_write_flags();
@@ -944,16 +954,8 @@ grpc_call *grpc_rb_get_wrapped_call(VALUE v) {
/* Obtains the wrapped object for a given call */
VALUE grpc_rb_wrap_call(grpc_call *c) {
- VALUE obj = Qnil;
if (c == NULL) {
return Qnil;
}
- obj = rb_hash_aref(hash_all_calls, OFFT2NUM((VALUE)c));
- if (obj == Qnil) { /* Not in the hash add it */
- rb_hash_aset(hash_all_calls, OFFT2NUM((VALUE)c), UINT2NUM(1));
- } else {
- rb_hash_aset(hash_all_calls, OFFT2NUM((VALUE)c),
- UINT2NUM(NUM2UINT(obj) + 1));
- }
return TypedData_Wrap_Struct(grpc_rb_cCall, &grpc_call_data_type, c);
}
diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c
index 013321ffc8..6943c93d4a 100644
--- a/src/ruby/ext/grpc/rb_channel.c
+++ b/src/ruby/ext/grpc/rb_channel.c
@@ -373,7 +373,7 @@ static void Init_grpc_connectivity_states() {
rb_define_const(grpc_rb_mConnectivityStates, "TRANSIENT_FAILURE",
LONG2NUM(GRPC_CHANNEL_TRANSIENT_FAILURE));
rb_define_const(grpc_rb_mConnectivityStates, "FATAL_FAILURE",
- LONG2NUM(GRPC_CHANNEL_FATAL_FAILURE));
+ LONG2NUM(GRPC_CHANNEL_SHUTDOWN));
}
void Init_grpc_channel() {
diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c
index b6ddbe88dc..9466402db0 100644
--- a/src/ruby/ext/grpc/rb_completion_queue.c
+++ b/src/ruby/ext/grpc/rb_completion_queue.c
@@ -150,6 +150,14 @@ static rb_data_type_t grpc_rb_completion_queue_data_type = {
#endif
};
+/* Releases the c-level resources associated with a completion queue */
+static VALUE grpc_rb_completion_queue_close(VALUE self) {
+ grpc_completion_queue* cq = grpc_rb_get_wrapped_completion_queue(self);
+ grpc_rb_completion_queue_destroy(cq);
+ RTYPEDDATA_DATA(self) = NULL;
+ return Qnil;
+}
+
/* Allocates a completion queue. */
static VALUE grpc_rb_completion_queue_alloc(VALUE cls) {
grpc_completion_queue *cq = grpc_completion_queue_create(NULL);
@@ -212,6 +220,11 @@ void Init_grpc_completion_queue() {
this func, so no separate initialization step is necessary. */
rb_define_alloc_func(grpc_rb_cCompletionQueue,
grpc_rb_completion_queue_alloc);
+
+ /* close: Provides a way to close the underlying file descriptor without
+ waiting for ruby garbage collection. */
+ rb_define_method(grpc_rb_cCompletionQueue, "close",
+ grpc_rb_completion_queue_close, 0);
}
/* Gets the wrapped completion queue from the ruby wrapper */
diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c
index 06a07ac646..9246893f9f 100644
--- a/src/ruby/ext/grpc/rb_grpc.c
+++ b/src/ruby/ext/grpc/rb_grpc.c
@@ -318,7 +318,7 @@ void Init_grpc_c() {
grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core");
grpc_rb_sNewServerRpc =
rb_struct_define("NewServerRpc", "method", "host",
- "deadline", "metadata", "call", NULL);
+ "deadline", "metadata", "call", "cq", NULL);
grpc_rb_sStatus =
rb_struct_define("Status", "code", "details", "metadata", NULL);
sym_code = ID2SYM(rb_intern("code"));
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 44d7bded23..4d9707638f 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -33,7 +33,7 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include "rb_grpc_imports.generated.h"
@@ -126,7 +126,8 @@ grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
grpc_is_binary_header_type grpc_is_binary_header_import;
grpc_call_error_to_string_type grpc_call_error_to_string_import;
-grpc_cronet_secure_channel_create_type grpc_cronet_secure_channel_create_import;
+grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
+grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import;
grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import;
@@ -396,7 +397,8 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal");
grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header");
grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
- grpc_cronet_secure_channel_create_import = (grpc_cronet_secure_channel_create_type) GetProcAddress(library, "grpc_cronet_secure_channel_create");
+ grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
+ grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next");
grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator");
grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity");
@@ -577,4 +579,4 @@ void grpc_rb_load_imports(HMODULE library) {
gpr_thd_join_import = (gpr_thd_join_type) GetProcAddress(library, "gpr_thd_join");
}
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index 814506920e..072e29c232 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -36,14 +36,14 @@
#include <grpc/support/port_platform.h>
-#ifdef GPR_WIN32
+#ifdef GPR_WINDOWS
#include <windows.h>
#include <grpc/census.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
-#include <grpc/grpc_cronet.h>
+#include <grpc/grpc_posix.h>
#include <grpc/grpc_security.h>
#include <grpc/impl/codegen/alloc.h>
#include <grpc/impl/codegen/byte_buffer.h>
@@ -57,7 +57,7 @@
#include <grpc/support/cpu.h>
#include <grpc/support/histogram.h>
#include <grpc/support/host_port.h>
-#include <grpc/support/log_win32.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include <grpc/support/subprocess.h>
#include <grpc/support/thd.h>
@@ -329,9 +329,12 @@ extern grpc_is_binary_header_type grpc_is_binary_header_import;
typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error);
extern grpc_call_error_to_string_type grpc_call_error_to_string_import;
#define grpc_call_error_to_string grpc_call_error_to_string_import
-typedef grpc_channel *(*grpc_cronet_secure_channel_create_type)(void *engine, const char *target, const grpc_channel_args *args, void *reserved);
-extern grpc_cronet_secure_channel_create_type grpc_cronet_secure_channel_create_import;
-#define grpc_cronet_secure_channel_create grpc_cronet_secure_channel_create_import
+typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args);
+extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
+#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import
+typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd);
+extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
+#define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import
typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it);
extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import;
#define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import
@@ -479,7 +482,7 @@ extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_impo
typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader);
extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import
-typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...);
+typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(4, 5);
extern gpr_log_type gpr_log_import;
#define gpr_log gpr_log_import
typedef void(*gpr_log_message_type)(const char *file, int line, gpr_log_severity severity, const char *message);
@@ -824,7 +827,7 @@ extern gpr_format_message_type gpr_format_message_import;
typedef char *(*gpr_strdup_type)(const char *src);
extern gpr_strdup_type gpr_strdup_import;
#define gpr_strdup gpr_strdup_import
-typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...);
+typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...) GPRC_PRINT_FORMAT_CHECK(2, 3);
extern gpr_asprintf_type gpr_asprintf_import;
#define gpr_asprintf gpr_asprintf_import
typedef const char *(*gpr_subprocess_binary_extension_type)();
@@ -869,6 +872,6 @@ extern gpr_thd_join_type gpr_thd_join_import;
void grpc_rb_load_imports(HMODULE library);
-#endif /* GPR_WIN32 */
+#endif /* GPR_WINDOWS */
#endif
diff --git a/src/ruby/ext/grpc/rb_loader.c b/src/ruby/ext/grpc/rb_loader.c
index 242535f164..19a6b33c29 100644
--- a/src/ruby/ext/grpc/rb_loader.c
+++ b/src/ruby/ext/grpc/rb_loader.c
@@ -33,7 +33,7 @@
#include "rb_grpc_imports.generated.h"
-#if GPR_WIN32
+#if GPR_WINDOWS
#include <tchar.h>
int grpc_rb_load_core() {
diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c
index 0899feb685..f108b8acfc 100644
--- a/src/ruby/ext/grpc/rb_server.c
+++ b/src/ruby/ext/grpc/rb_server.c
@@ -234,7 +234,7 @@ static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
err = grpc_server_request_call(
s->wrapped, &call, &st.details, &st.md_ary,
grpc_rb_get_wrapped_completion_queue(cqueue),
- grpc_rb_get_wrapped_completion_queue(cqueue),
+ grpc_rb_get_wrapped_completion_queue(s->mark),
ROBJECT(tag_new));
if (err != GRPC_CALL_OK) {
grpc_request_call_stack_cleanup(&st);
@@ -244,7 +244,7 @@ static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
return Qnil;
}
- ev = grpc_rb_completion_queue_pluck_event(cqueue, tag_new, timeout);
+ ev = grpc_rb_completion_queue_pluck_event(s->mark, tag_new, timeout);
if (ev.type == GRPC_QUEUE_TIMEOUT) {
grpc_request_call_stack_cleanup(&st);
return Qnil;
@@ -262,7 +262,7 @@ static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
rb_str_new2(st.details.host),
rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
INT2NUM(deadline.tv_nsec)),
- grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), NULL);
+ grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), cqueue, NULL);
grpc_request_call_stack_cleanup(&st);
return result;
}
diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb
index e449e89176..b03ddbc193 100644
--- a/src/ruby/lib/grpc/generic/active_call.rb
+++ b/src/ruby/lib/grpc/generic/active_call.rb
@@ -103,7 +103,7 @@ module GRPC
#
# @param call [Call] the call used by the ActiveCall
# @param q [CompletionQueue] the completion queue used to accept
- # the call
+ # the call. This queue will be closed on call completion.
# @param marshal [Function] f(obj)->string that marshal requests
# @param unmarshal [Function] f(string)->obj that unmarshals responses
# @param deadline [Fixnum] the deadline for the call to complete
@@ -191,6 +191,8 @@ module GRPC
@call.status = batch_result.status
op_is_done
batch_result.check_status
+ @call.close
+ @cq.close
end
# remote_send sends a request to the remote endpoint.
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 1f6d5f365d..238f409a1d 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -69,6 +69,10 @@ module GRPC
@readq = Queue.new
@unmarshal = unmarshal
@metadata_tag = metadata_tag
+ @reads_complete = false
+ @writes_complete = false
+ @complete = false
+ @done_mutex = Mutex.new
end
# Begins orchestration of the Bidi stream for a client sending requests.
@@ -115,6 +119,16 @@ module GRPC
@op_notifier.notify(self)
end
+ # signals that a bidi operation is complete (read + write)
+ def finished
+ @done_mutex.synchronize do
+ return unless @reads_complete && @writes_complete && !@complete
+ @call.close
+ @cq.close
+ @complete = true
+ end
+ end
+
# performs a read using @call.run_batch, ensures metadata is set up
def read_using_run_batch
ops = { RECV_MESSAGE => nil }
@@ -163,12 +177,16 @@ module GRPC
SEND_CLOSE_FROM_CLIENT => nil)
GRPC.logger.debug('bidi-write-loop: done')
notify_done
+ @writes_complete = true
+ finished
end
GRPC.logger.debug('bidi-write-loop: finished')
rescue StandardError => e
GRPC.logger.warn('bidi-write-loop: failed')
GRPC.logger.warn(e)
notify_done
+ @writes_complete = true
+ finished
raise e
end
@@ -212,6 +230,8 @@ module GRPC
@readq.push(e) # let each_queued_msg terminate with this error
end
GRPC.logger.debug('bidi-read-loop: finished')
+ @reads_complete = true
+ finished
end
end
end
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index 6b0b4ce557..ab7333d133 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -355,7 +355,7 @@ module GRPC
return an_rpc if @pool.jobs_waiting <= @max_waiting_requests
GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
noop = proc { |x| x }
- c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
+ c = ActiveCall.new(an_rpc.call, an_rpc.cq, noop, noop, an_rpc.deadline)
c.send_status(GRPC::Core::StatusCodes::RESOURCE_EXHAUSTED, '')
nil
end
@@ -366,7 +366,7 @@ module GRPC
return an_rpc if rpc_descs.key?(mth)
GRPC.logger.warn("UNIMPLEMENTED: #{an_rpc}")
noop = proc { |x| x }
- c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
+ c = ActiveCall.new(an_rpc.call, an_rpc.cq, noop, noop, an_rpc.deadline)
c.send_status(GRPC::Core::StatusCodes::UNIMPLEMENTED, '')
nil
end
@@ -377,7 +377,8 @@ module GRPC
loop_tag = Object.new
while running_state == :running
begin
- an_rpc = @server.request_call(@cq, loop_tag, INFINITE_FUTURE)
+ comp_queue = Core::CompletionQueue.new
+ an_rpc = @server.request_call(comp_queue, loop_tag, INFINITE_FUTURE)
break if (!an_rpc.nil?) && an_rpc.call.nil?
active_call = new_active_server_call(an_rpc)
unless active_call.nil?
@@ -416,15 +417,16 @@ module GRPC
unless @connect_md_proc.nil?
connect_md = @connect_md_proc.call(an_rpc.method, an_rpc.metadata)
end
- an_rpc.call.run_batch(@cq, handle_call_tag, INFINITE_FUTURE,
+ an_rpc.call.run_batch(an_rpc.cq, handle_call_tag, INFINITE_FUTURE,
SEND_INITIAL_METADATA => connect_md)
+
return nil unless available?(an_rpc)
return nil unless implemented?(an_rpc)
# Create the ActiveCall
GRPC.logger.info("deadline is #{an_rpc.deadline}; (now=#{Time.now})")
rpc_desc = rpc_descs[an_rpc.method.to_sym]
- c = ActiveCall.new(an_rpc.call, @cq,
+ c = ActiveCall.new(an_rpc.call, an_rpc.cq,
rpc_desc.marshal_proc, rpc_desc.unmarshal_proc(:input),
an_rpc.deadline)
mth = an_rpc.method.to_sym
diff --git a/src/ruby/pb/grpc/health/v1/health_services.rb b/src/ruby/pb/grpc/health/v1/health_services.rb
index cb79b20437..68a3956f54 100644
--- a/src/ruby/pb/grpc/health/v1/health_services.rb
+++ b/src/ruby/pb/grpc/health/v1/health_services.rb
@@ -1,5 +1,35 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: grpc/health/v1/health.proto for package 'grpc.health.v1'
+# Original file comments:
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
require 'grpc'
require 'grpc/health/v1/health'
@@ -8,8 +38,6 @@ module Grpc
module Health
module V1
module Health
-
- # TODO: add proto service documentation here
class Service
include GRPC::GenericService
diff --git a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb
index 9f6e7e0e42..eb523ffa6f 100644
--- a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb
+++ b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services.rb
@@ -1,15 +1,45 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
-# Source: src/proto/grpc/testing/duplicate/echo_duplicate.proto for package 'grpc.testing.duplicate'
+# Source: grpc/testing/duplicate/echo_duplicate.proto for package 'grpc.testing.duplicate'
+# Original file comments:
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This is a partial copy of echo.proto with a different package name.
+#
require 'grpc'
-require 'src/proto/grpc/testing/duplicate/echo_duplicate'
+require 'grpc/testing/duplicate/echo_duplicate'
module Grpc
module Testing
module Duplicate
module EchoTestService
-
- # TODO: add proto service documentation here
class Service
include GRPC::GenericService
diff --git a/src/ruby/pb/grpc/testing/metrics_services.rb b/src/ruby/pb/grpc/testing/metrics_services.rb
index f5778bbbb1..467b7b3ee5 100644
--- a/src/ruby/pb/grpc/testing/metrics_services.rb
+++ b/src/ruby/pb/grpc/testing/metrics_services.rb
@@ -1,5 +1,41 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: grpc/testing/metrics.proto for package 'grpc.testing'
+# Original file comments:
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Contains the definitions for a metrics service and the type of metrics
+# exposed by the service.
+#
+# Currently, 'Gauge' (i.e a metric that represents the measured value of
+# something at an instant of time) is the only metric type supported by the
+# service.
require 'grpc'
require 'grpc/testing/metrics'
@@ -7,8 +43,6 @@ require 'grpc/testing/metrics'
module Grpc
module Testing
module MetricsService
-
- # TODO: add proto service documentation here
class Service
include GRPC::GenericService
@@ -17,7 +51,10 @@ module Grpc
self.unmarshal_class_method = :decode
self.service_name = 'grpc.testing.MetricsService'
+ # Returns the values of all the gauges that are currently being maintained by
+ # the service
rpc :GetAllGauges, EmptyMessage, stream(GaugeResponse)
+ # Returns the value of one gauge
rpc :GetGauge, GaugeRequest, GaugeResponse
end
diff --git a/src/ruby/pb/src/proto/grpc/testing/empty.rb b/src/ruby/pb/src/proto/grpc/testing/empty.rb
new file mode 100644
index 0000000000..9c2568d605
--- /dev/null
+++ b/src/ruby/pb/src/proto/grpc/testing/empty.rb
@@ -0,0 +1,15 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/testing/empty.proto
+
+require 'google/protobuf'
+
+Google::Protobuf::DescriptorPool.generated_pool.build do
+ add_message "grpc.testing.Empty" do
+ end
+end
+
+module Grpc
+ module Testing
+ Empty = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Empty").msgclass
+ end
+end
diff --git a/src/ruby/pb/src/proto/grpc/testing/messages.rb b/src/ruby/pb/src/proto/grpc/testing/messages.rb
new file mode 100644
index 0000000000..2bdfe0eade
--- /dev/null
+++ b/src/ruby/pb/src/proto/grpc/testing/messages.rb
@@ -0,0 +1,84 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/testing/messages.proto
+
+require 'google/protobuf'
+
+Google::Protobuf::DescriptorPool.generated_pool.build do
+ add_message "grpc.testing.Payload" do
+ optional :type, :enum, 1, "grpc.testing.PayloadType"
+ optional :body, :bytes, 2
+ end
+ add_message "grpc.testing.EchoStatus" do
+ optional :code, :int32, 1
+ optional :message, :string, 2
+ end
+ add_message "grpc.testing.SimpleRequest" do
+ optional :response_type, :enum, 1, "grpc.testing.PayloadType"
+ optional :response_size, :int32, 2
+ optional :payload, :message, 3, "grpc.testing.Payload"
+ optional :fill_username, :bool, 4
+ optional :fill_oauth_scope, :bool, 5
+ optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
+ optional :response_status, :message, 7, "grpc.testing.EchoStatus"
+ end
+ add_message "grpc.testing.SimpleResponse" do
+ optional :payload, :message, 1, "grpc.testing.Payload"
+ optional :username, :string, 2
+ optional :oauth_scope, :string, 3
+ end
+ add_message "grpc.testing.StreamingInputCallRequest" do
+ optional :payload, :message, 1, "grpc.testing.Payload"
+ end
+ add_message "grpc.testing.StreamingInputCallResponse" do
+ optional :aggregated_payload_size, :int32, 1
+ end
+ add_message "grpc.testing.ResponseParameters" do
+ optional :size, :int32, 1
+ optional :interval_us, :int32, 2
+ end
+ add_message "grpc.testing.StreamingOutputCallRequest" do
+ optional :response_type, :enum, 1, "grpc.testing.PayloadType"
+ repeated :response_parameters, :message, 2, "grpc.testing.ResponseParameters"
+ optional :payload, :message, 3, "grpc.testing.Payload"
+ optional :response_compression, :enum, 6, "grpc.testing.CompressionType"
+ optional :response_status, :message, 7, "grpc.testing.EchoStatus"
+ end
+ add_message "grpc.testing.StreamingOutputCallResponse" do
+ optional :payload, :message, 1, "grpc.testing.Payload"
+ end
+ add_message "grpc.testing.ReconnectParams" do
+ optional :max_reconnect_backoff_ms, :int32, 1
+ end
+ add_message "grpc.testing.ReconnectInfo" do
+ optional :passed, :bool, 1
+ repeated :backoff_ms, :int32, 2
+ end
+ add_enum "grpc.testing.PayloadType" do
+ value :COMPRESSABLE, 0
+ value :UNCOMPRESSABLE, 1
+ value :RANDOM, 2
+ end
+ add_enum "grpc.testing.CompressionType" do
+ value :NONE, 0
+ value :GZIP, 1
+ value :DEFLATE, 2
+ end
+end
+
+module Grpc
+ module Testing
+ Payload = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.Payload").msgclass
+ EchoStatus = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.EchoStatus").msgclass
+ SimpleRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleRequest").msgclass
+ SimpleResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.SimpleResponse").msgclass
+ StreamingInputCallRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingInputCallRequest").msgclass
+ StreamingInputCallResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingInputCallResponse").msgclass
+ ResponseParameters = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ResponseParameters").msgclass
+ StreamingOutputCallRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingOutputCallRequest").msgclass
+ StreamingOutputCallResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.StreamingOutputCallResponse").msgclass
+ ReconnectParams = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectParams").msgclass
+ ReconnectInfo = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.ReconnectInfo").msgclass
+ PayloadType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.PayloadType").enummodule
+ CompressionType = Google::Protobuf::DescriptorPool.generated_pool.lookup("grpc.testing.CompressionType").enummodule
+ end
+end
diff --git a/src/ruby/pb/src/proto/grpc/testing/test.rb b/src/ruby/pb/src/proto/grpc/testing/test.rb
new file mode 100644
index 0000000000..245b5ce00c
--- /dev/null
+++ b/src/ruby/pb/src/proto/grpc/testing/test.rb
@@ -0,0 +1,14 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/testing/test.proto
+
+require 'google/protobuf'
+
+require 'src/proto/grpc/testing/empty'
+require 'src/proto/grpc/testing/messages'
+Google::Protobuf::DescriptorPool.generated_pool.build do
+end
+
+module Grpc
+ module Testing
+ end
+end
diff --git a/src/ruby/pb/src/proto/grpc/testing/test_services.rb b/src/ruby/pb/src/proto/grpc/testing/test_services.rb
new file mode 100644
index 0000000000..2652de5e6d
--- /dev/null
+++ b/src/ruby/pb/src/proto/grpc/testing/test_services.rb
@@ -0,0 +1,110 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# Source: src/proto/grpc/testing/test.proto for package 'grpc.testing'
+# Original file comments:
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# An integration test service that covers all the method signature permutations
+# of unary/streaming requests/responses.
+#
+
+require 'grpc'
+require 'src/proto/grpc/testing/test'
+
+module Grpc
+ module Testing
+ module TestService
+ # A simple service to test the various types of RPCs and experiment with
+ # performance with various types of payload.
+ class Service
+
+ include GRPC::GenericService
+
+ self.marshal_class_method = :encode
+ self.unmarshal_class_method = :decode
+ self.service_name = 'grpc.testing.TestService'
+
+ # One empty request followed by one empty response.
+ rpc :EmptyCall, Empty, Empty
+ # One request followed by one response.
+ rpc :UnaryCall, SimpleRequest, SimpleResponse
+ # One request followed by a sequence of responses (streamed download).
+ # The server returns the payload with client desired type and sizes.
+ rpc :StreamingOutputCall, StreamingOutputCallRequest, stream(StreamingOutputCallResponse)
+ # A sequence of requests followed by one response (streamed upload).
+ # The server returns the aggregated size of client payload as the result.
+ rpc :StreamingInputCall, stream(StreamingInputCallRequest), StreamingInputCallResponse
+ # A sequence of requests with each request served by the server immediately.
+ # As one request could lead to multiple responses, this interface
+ # demonstrates the idea of full duplexing.
+ rpc :FullDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse)
+ # A sequence of requests followed by a sequence of responses.
+ # The server buffers all the client requests and then serves them in order. A
+ # stream of responses are returned to the client when the server starts with
+ # first request.
+ rpc :HalfDuplexCall, stream(StreamingOutputCallRequest), stream(StreamingOutputCallResponse)
+ end
+
+ Stub = Service.rpc_stub_class
+ end
+ module UnimplementedService
+ # A simple service NOT implemented at servers so clients can test for
+ # that case.
+ class Service
+
+ include GRPC::GenericService
+
+ self.marshal_class_method = :encode
+ self.unmarshal_class_method = :decode
+ self.service_name = 'grpc.testing.UnimplementedService'
+
+ # A call that no server should implement
+ rpc :UnimplementedCall, Empty, Empty
+ end
+
+ Stub = Service.rpc_stub_class
+ end
+ module ReconnectService
+ # A service used to control reconnect server.
+ class Service
+
+ include GRPC::GenericService
+
+ self.marshal_class_method = :encode
+ self.unmarshal_class_method = :decode
+ self.service_name = 'grpc.testing.ReconnectService'
+
+ rpc :Start, ReconnectParams, Empty
+ rpc :Stop, Empty, ReconnectInfo
+ end
+
+ Stub = Service.rpc_stub_class
+ end
+ end
+end
diff --git a/src/ruby/qps/src/proto/grpc/testing/services_services.rb b/src/ruby/qps/src/proto/grpc/testing/services_services.rb
index 3fd9f20f47..94b9a1e164 100644
--- a/src/ruby/qps/src/proto/grpc/testing/services_services.rb
+++ b/src/ruby/qps/src/proto/grpc/testing/services_services.rb
@@ -1,5 +1,37 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: src/proto/grpc/testing/services.proto for package 'grpc.testing'
+# Original file comments:
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# An integration test service that covers all the method signature permutations
+# of unary/streaming requests/responses.
require 'grpc'
require 'src/proto/grpc/testing/services'
@@ -7,8 +39,6 @@ require 'src/proto/grpc/testing/services'
module Grpc
module Testing
module BenchmarkService
-
- # TODO: add proto service documentation here
class Service
include GRPC::GenericService
@@ -17,15 +47,17 @@ module Grpc
self.unmarshal_class_method = :decode
self.service_name = 'grpc.testing.BenchmarkService'
+ # One request followed by one response.
+ # The server returns the client payload as-is.
rpc :UnaryCall, SimpleRequest, SimpleResponse
+ # One request followed by one response.
+ # The server returns the client payload as-is.
rpc :StreamingCall, stream(SimpleRequest), stream(SimpleResponse)
end
Stub = Service.rpc_stub_class
end
module WorkerService
-
- # TODO: add proto service documentation here
class Service
include GRPC::GenericService
@@ -34,9 +66,23 @@ module Grpc
self.unmarshal_class_method = :decode
self.service_name = 'grpc.testing.WorkerService'
+ # Start server with specified workload.
+ # First request sent specifies the ServerConfig followed by ServerStatus
+ # response. After that, a "Mark" can be sent anytime to request the latest
+ # stats. Closing the stream will initiate shutdown of the test server
+ # and once the shutdown has finished, the OK status is sent to terminate
+ # this RPC.
rpc :RunServer, stream(ServerArgs), stream(ServerStatus)
+ # Start client with specified workload.
+ # First request sent specifies the ClientConfig followed by ClientStatus
+ # response. After that, a "Mark" can be sent anytime to request the latest
+ # stats. Closing the stream will initiate shutdown of the test client
+ # and once the shutdown has finished, the OK status is sent to terminate
+ # this RPC.
rpc :RunClient, stream(ClientArgs), stream(ClientStatus)
+ # Just return the core count - unary call
rpc :CoreCount, CoreRequest, CoreResponse
+ # Quit this worker
rpc :QuitWorker, Void, Void
end