aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/boringssl/gen_build_yaml.py2
-rw-r--r--src/compiler/cpp_generator.cc96
-rw-r--r--src/compiler/ruby_generator.cc39
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/client_channel/README.md (renamed from src/core/ext/client_config/README.md)21
-rw-r--r--src/core/ext/client_channel/channel_connectivity.c (renamed from src/core/ext/client_config/channel_connectivity.c)2
-rw-r--r--src/core/ext/client_channel/client_channel.c (renamed from src/core/ext/client_config/client_channel.c)319
-rw-r--r--src/core/ext/client_channel/client_channel.h (renamed from src/core/ext/client_config/client_channel.h)10
-rw-r--r--src/core/ext/client_channel/client_channel_factory.c (renamed from src/core/ext/client_config/client_channel_factory.c)6
-rw-r--r--src/core/ext/client_channel/client_channel_factory.h (renamed from src/core/ext/client_config/client_channel_factory.h)17
-rw-r--r--src/core/ext/client_channel/client_channel_plugin.c (renamed from src/core/ext/client_config/client_config_plugin.c)12
-rw-r--r--src/core/ext/client_channel/connector.c (renamed from src/core/ext/client_config/connector.c)2
-rw-r--r--src/core/ext/client_channel/connector.h (renamed from src/core/ext/client_config/connector.h)10
-rw-r--r--src/core/ext/client_channel/default_initial_connect_string.c (renamed from src/core/ext/client_config/default_initial_connect_string.c)5
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.c (renamed from src/core/ext/client_config/http_connect_handshaker.c)4
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.h (renamed from src/core/ext/client_config/http_connect_handshaker.h)6
-rw-r--r--src/core/ext/client_channel/initial_connect_string.c (renamed from src/core/ext/client_config/initial_connect_string.c)11
-rw-r--r--src/core/ext/client_channel/initial_connect_string.h (renamed from src/core/ext/client_config/initial_connect_string.h)16
-rw-r--r--src/core/ext/client_channel/lb_policy.c (renamed from src/core/ext/client_config/lb_policy.c)2
-rw-r--r--src/core/ext/client_channel/lb_policy.h (renamed from src/core/ext/client_config/lb_policy.h)13
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.c (renamed from src/core/ext/client_config/lb_policy_factory.c)80
-rw-r--r--src/core/ext/client_channel/lb_policy_factory.h (renamed from src/core/ext/client_config/lb_policy_factory.h)50
-rw-r--r--src/core/ext/client_channel/lb_policy_registry.c (renamed from src/core/ext/client_config/lb_policy_registry.c)2
-rw-r--r--src/core/ext/client_channel/lb_policy_registry.h (renamed from src/core/ext/client_config/lb_policy_registry.h)8
-rw-r--r--src/core/ext/client_channel/parse_address.c (renamed from src/core/ext/client_config/parse_address.c)35
-rw-r--r--src/core/ext/client_channel/parse_address.h (renamed from src/core/ext/client_config/parse_address.h)18
-rw-r--r--src/core/ext/client_channel/resolver.c (renamed from src/core/ext/client_config/resolver.c)5
-rw-r--r--src/core/ext/client_channel/resolver.h (renamed from src/core/ext/client_config/resolver.h)26
-rw-r--r--src/core/ext/client_channel/resolver_factory.c (renamed from src/core/ext/client_config/resolver_factory.c)2
-rw-r--r--src/core/ext/client_channel/resolver_factory.h (renamed from src/core/ext/client_config/resolver_factory.h)19
-rw-r--r--src/core/ext/client_channel/resolver_registry.c (renamed from src/core/ext/client_config/resolver_registry.c)16
-rw-r--r--src/core/ext/client_channel/resolver_registry.h (renamed from src/core/ext/client_config/resolver_registry.h)15
-rw-r--r--src/core/ext/client_channel/subchannel.c (renamed from src/core/ext/client_config/subchannel.c)34
-rw-r--r--src/core/ext/client_channel/subchannel.h (renamed from src/core/ext/client_config/subchannel.h)18
-rw-r--r--src/core/ext/client_channel/subchannel_index.c (renamed from src/core/ext/client_config/subchannel_index.c)22
-rw-r--r--src/core/ext/client_channel/subchannel_index.h (renamed from src/core/ext/client_config/subchannel_index.h)14
-rw-r--r--src/core/ext/client_channel/uri_parser.c (renamed from src/core/ext/client_config/uri_parser.c)2
-rw-r--r--src/core/ext/client_channel/uri_parser.h (renamed from src/core/ext/client_config/uri_parser.h)6
-rw-r--r--src/core/ext/client_config/resolver_result.c94
-rw-r--r--src/core/ext/client_config/resolver_result.h74
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c311
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.h2
-rw-r--r--src/core/ext/lb_policy/grpclb/load_balancer_api.h2
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c51
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c59
-rw-r--r--src/core/ext/load_reporting/load_reporting.h20
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c4
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c56
-rw-r--r--src/core/ext/resolver/sockaddr/sockaddr_resolver.c56
-rw-r--r--src/core/ext/transport/chttp2/alpn/alpn.c2
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.c73
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c6
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.c84
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.c8
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c8
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c7
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c2356
-rw-r--r--src/core/ext/transport/chttp2/transport/frame.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c68
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c18
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c13
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c38
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c23
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c34
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.h5
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c492
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.h17
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h674
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c910
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.c351
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.c47
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.h7
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c466
-rw-r--r--src/core/lib/channel/channel_args.c67
-rw-r--r--src/core/lib/channel/channel_args.h15
-rw-r--r--src/core/lib/channel/channel_stack.c4
-rw-r--r--src/core/lib/channel/channel_stack.h4
-rw-r--r--src/core/lib/channel/deadline_filter.c76
-rw-r--r--src/core/lib/channel/deadline_filter.h33
-rw-r--r--src/core/lib/channel/handshaker.c2
-rw-r--r--src/core/lib/channel/http_server_filter.c7
-rw-r--r--src/core/lib/channel/message_size_filter.c94
-rw-r--r--src/core/lib/http/httpcli.c28
-rw-r--r--src/core/lib/http/httpcli.h2
-rw-r--r--src/core/lib/iomgr/closure.c18
-rw-r--r--src/core/lib/iomgr/closure.h11
-rw-r--r--src/core/lib/iomgr/combiner.c372
-rw-r--r--src/core/lib/iomgr/combiner.h15
-rw-r--r--src/core/lib/iomgr/endpoint.c4
-rw-r--r--src/core/lib/iomgr/endpoint.h4
-rw-r--r--src/core/lib/iomgr/endpoint_pair.h5
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.c17
-rw-r--r--src/core/lib/iomgr/endpoint_pair_uv.c (renamed from src/core/lib/security/credentials/google_default/credentials_windows.c)34
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.c13
-rw-r--r--src/core/lib/iomgr/error.c18
-rw-r--r--src/core/lib/iomgr/error.h12
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c251
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.h5
-rw-r--r--src/core/lib/iomgr/ev_poll_and_epoll_posix.c46
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c284
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.h1
-rw-r--r--src/core/lib/iomgr/ev_posix.c30
-rw-r--r--src/core/lib/iomgr/ev_posix.h13
-rw-r--r--src/core/lib/iomgr/exec_ctx.c61
-rw-r--r--src/core/lib/iomgr/exec_ctx.h24
-rw-r--r--src/core/lib/iomgr/iocp_windows.c6
-rw-r--r--src/core/lib/iomgr/iomgr.c11
-rw-r--r--src/core/lib/iomgr/iomgr.h2
-rw-r--r--src/core/lib/iomgr/iomgr_posix.c4
-rw-r--r--src/core/lib/iomgr/iomgr_uv.c49
-rw-r--r--src/core/lib/iomgr/iomgr_windows.c4
-rw-r--r--src/core/lib/iomgr/pollset_set_uv.c62
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.c6
-rw-r--r--src/core/lib/iomgr/pollset_uv.c142
-rw-r--r--src/core/lib/iomgr/pollset_uv.h42
-rw-r--r--src/core/lib/iomgr/pollset_windows.c6
-rw-r--r--src/core/lib/iomgr/port.h129
-rw-r--r--src/core/lib/iomgr/resolve_address.h1
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c8
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c231
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c10
-rw-r--r--src/core/lib/iomgr/resource_quota.c717
-rw-r--r--src/core/lib/iomgr/resource_quota.h224
-rw-r--r--src/core/lib/iomgr/sockaddr.h12
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.c99
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.h28
-rw-r--r--src/core/lib/iomgr/socket_utils.h (renamed from src/core/lib/iomgr/workqueue_posix.h)31
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.c32
-rw-r--r--src/core/lib/iomgr/socket_utils_linux.c16
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.c17
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h10
-rw-r--r--src/core/lib/iomgr/socket_utils_uv.c49
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.c48
-rw-r--r--src/core/lib/iomgr/socket_windows.c6
-rw-r--r--src/core/lib/iomgr/tcp_client.h9
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c79
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.h45
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c153
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c48
-rw-r--r--src/core/lib/iomgr/tcp_posix.c101
-rw-r--r--src/core/lib/iomgr/tcp_posix.h4
-rw-r--r--src/core/lib/iomgr/tcp_server.h9
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c142
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c365
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c107
-rw-r--r--src/core/lib/iomgr/tcp_uv.c338
-rw-r--r--src/core/lib/iomgr/tcp_uv.h57
-rw-r--r--src/core/lib/iomgr/tcp_windows.c41
-rw-r--r--src/core/lib/iomgr/tcp_windows.h4
-rw-r--r--src/core/lib/iomgr/timer.h34
-rw-r--r--src/core/lib/iomgr/timer_generic.c (renamed from src/core/lib/iomgr/timer.c)6
-rw-r--r--src/core/lib/iomgr/timer_generic.h49
-rw-r--r--src/core/lib/iomgr/timer_heap.c6
-rw-r--r--src/core/lib/iomgr/timer_uv.c99
-rw-r--r--src/core/lib/iomgr/timer_uv.h47
-rw-r--r--src/core/lib/iomgr/udp_server.c202
-rw-r--r--src/core/lib/iomgr/udp_server.h8
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.c20
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.h11
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix_noop.c8
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.c118
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.h80
-rw-r--r--src/core/lib/iomgr/wakeup_fd_eventfd.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_nospecial.c6
-rw-r--r--src/core/lib/iomgr/wakeup_fd_pipe.c16
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.c39
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.h5
-rw-r--r--src/core/lib/iomgr/workqueue.h17
-rw-r--r--src/core/lib/iomgr/workqueue_posix.c196
-rw-r--r--src/core/lib/iomgr/workqueue_uv.c66
-rw-r--r--src/core/lib/iomgr/workqueue_uv.h37
-rw-r--r--src/core/lib/iomgr/workqueue_windows.c10
-rw-r--r--src/core/lib/profiling/basic_timers.c11
-rw-r--r--src/core/lib/profiling/timers.h2
-rw-r--r--src/core/lib/security/credentials/google_default/credentials_generic.c (renamed from src/core/lib/security/credentials/google_default/credentials_posix.c)21
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c5
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h14
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.c16
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.c20
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c7
-rw-r--r--src/core/lib/support/log.c9
-rw-r--r--src/core/lib/support/string.c11
-rw-r--r--src/core/lib/support/string.h4
-rw-r--r--src/core/lib/support/thd.c2
-rw-r--r--src/core/lib/surface/call.c136
-rw-r--r--src/core/lib/surface/call.h32
-rw-r--r--src/core/lib/surface/channel.c18
-rw-r--r--src/core/lib/surface/completion_queue.c167
-rw-r--r--src/core/lib/surface/completion_queue.h3
-rw-r--r--src/core/lib/surface/init.c6
-rw-r--r--src/core/lib/surface/server.c35
-rw-r--r--src/core/lib/surface/server.h3
-rw-r--r--src/core/lib/transport/connectivity_state.c3
-rw-r--r--src/core/lib/transport/mdstr_hash_table.c157
-rw-r--r--src/core/lib/transport/mdstr_hash_table.h92
-rw-r--r--src/core/lib/transport/method_config.c340
-rw-r--r--src/core/lib/transport/method_config.h136
-rw-r--r--src/core/lib/transport/static_metadata.c4
-rw-r--r--src/core/lib/transport/static_metadata.h21
-rw-r--r--src/core/lib/transport/transport.c35
-rw-r--r--src/core/lib/transport/transport.h9
-rw-r--r--src/core/lib/transport/transport_op_string.c105
-rw-r--r--src/core/lib/tsi/ssl_transport_security.c14
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.c8
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.c8
-rw-r--r--src/core/plugin_registry/grpc_unsecure_plugin_registry.c8
-rw-r--r--src/cpp/client/cronet_credentials.cc69
-rw-r--r--src/cpp/common/channel_arguments.cc16
-rw-r--r--src/cpp/common/resource_quota_cc.cc51
-rw-r--r--src/cpp/server/server_builder.cc159
-rw-r--r--src/cpp/server/server_cc.cc310
-rw-r--r--src/cpp/test/server_context_test_spouse.cc52
-rw-r--r--src/cpp/thread_manager/thread_manager.cc181
-rw-r--r--src/cpp/thread_manager/thread_manager.h159
-rw-r--r--src/csharp/Grpc.Auth/GoogleAuthInterceptors.cs4
-rw-r--r--src/csharp/Grpc.Core/DefaultCallInvoker.cs1
-rw-r--r--src/csharp/Grpc.Core/Grpc.Core.csproj2
-rw-r--r--src/csharp/Grpc.Core/GrpcEnvironment.cs1
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCall.cs19
-rw-r--r--src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs15
-rw-r--r--src/csharp/Grpc.Core/Internal/ClientResponseStream.cs2
-rw-r--r--src/csharp/Grpc.Core/Internal/CompletionRegistry.cs29
-rw-r--r--src/csharp/Grpc.Core/Internal/InterceptingCallInvoker.cs2
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs7
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeMethods.cs41
-rw-r--r--src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs85
-rw-r--r--src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs10
-rw-r--r--src/csharp/Grpc.Core/Internal/ServerCallHandler.cs45
-rw-r--r--src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs6
-rw-r--r--src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs6
-rw-r--r--src/csharp/Grpc.Core/Metadata.cs70
-rw-r--r--src/csharp/Grpc.Core/Server.cs58
-rw-r--r--src/csharp/Grpc.Core/ServerCallContext.cs9
-rw-r--r--src/csharp/Grpc.Core/Utils/TaskUtils.cs59
-rw-r--r--src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json5
-rw-r--r--src/csharp/Grpc.IntegrationTesting/InterarrivalTimers.cs2
-rw-r--r--src/csharp/Grpc.IntegrationTesting/InteropClient.cs19
-rw-r--r--src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs8
-rw-r--r--src/csharp/Grpc.IntegrationTesting/QpsWorker.cs5
-rw-r--r--src/csharp/Grpc.IntegrationTesting/Test.cs37
-rw-r--r--src/csharp/Grpc.IntegrationTesting/TestGrpc.cs105
-rw-r--r--src/csharp/ext/grpc_csharp_ext.c64
-rw-r--r--src/node/ext/byte_buffer.cc14
-rw-r--r--src/node/ext/call.cc81
-rw-r--r--src/node/ext/call.h68
-rw-r--r--src/node/ext/channel.cc12
-rw-r--r--src/node/ext/completion_queue.cc114
-rw-r--r--src/node/ext/completion_queue.h46
-rw-r--r--src/node/ext/completion_queue_async_worker.cc2
-rw-r--r--src/node/ext/node_grpc.cc23
-rw-r--r--src/node/ext/server.cc93
-rw-r--r--src/node/ext/server.h1
-rw-r--r--src/node/index.js4
-rw-r--r--src/node/interop/interop_client.js15
-rw-r--r--src/node/performance/benchmark_client_express.js291
-rw-r--r--src/node/performance/benchmark_server.js5
-rw-r--r--src/node/performance/benchmark_server_express.js109
-rw-r--r--src/node/performance/worker.js10
-rw-r--r--src/node/performance/worker_service_impl.js228
-rw-r--r--src/node/src/client.js1
-rw-r--r--src/node/src/common.js2
-rw-r--r--src/node/src/grpc_extension.js2
-rw-r--r--src/node/test/async_test.js1
-rw-r--r--src/node/test/interop_sanity_test.js4
-rw-r--r--src/objective-c/!ProtoCompiler-gRPCPlugin.podspec4
-rw-r--r--src/objective-c/!ProtoCompiler.podspec2
-rw-r--r--src/objective-c/BoringSSL.podspec532
-rw-r--r--src/objective-c/CronetFramework.podspec39
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+ChannelArg.h17
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h16
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+OAuth2.h14
-rw-r--r--src/objective-c/GRPCClient/GRPCCall+Tests.h25
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.h189
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.m210
-rw-r--r--src/objective-c/GRPCClient/private/GRPCCompletionQueue.h17
-rw-r--r--src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h19
-rw-r--r--src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m98
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.m127
-rw-r--r--src/objective-c/GRPCClient/private/GRPCReachabilityFlagNames.xmacro.h2
-rw-r--r--src/objective-c/GRPCClient/private/GRPCRequestHeaders.m41
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.m86
-rw-r--r--src/objective-c/GRPCClient/private/NSError+GRPC.h7
-rw-r--r--src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj2
-rw-r--r--src/objective-c/tests/Connectivity/Podfile22
-rw-r--r--src/objective-c/tests/InteropTests.m17
-rw-r--r--src/php/lib/Grpc/BaseStub.php16
-rwxr-xr-xsrc/php/tests/interop/interop_client.php28
-rw-r--r--src/proto/grpc/testing/control.proto11
-rw-r--r--src/proto/grpc/testing/duplicate/echo_duplicate.proto1
-rw-r--r--src/proto/grpc/testing/proto2/empty2.proto37
-rw-r--r--src/proto/grpc/testing/proto2/empty2_extensions.proto43
-rw-r--r--src/proto/grpc/testing/stats.proto8
-rw-r--r--src/proto/grpc/testing/test.proto4
-rw-r--r--src/python/.gitignore2
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi8
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi1
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi34
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py59
-rw-r--r--src/python/grpcio/support.py5
-rw-r--r--src/python/grpcio_reflection/.gitignore5
-rw-r--r--src/python/grpcio_reflection/grpc/__init__.py30
-rw-r--r--src/python/grpcio_reflection/grpc/reflection/__init__.py29
-rw-r--r--src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py29
-rw-r--r--src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py143
-rw-r--r--src/python/grpcio_reflection/grpc_version.py32
-rw-r--r--src/python/grpcio_reflection/reflection_commands.py78
-rw-r--r--src/python/grpcio_reflection/setup.py73
-rw-r--r--src/python/grpcio_tests/tests/interop/client.py5
-rw-r--r--src/python/grpcio_tests/tests/interop/methods.py188
-rw-r--r--src/python/grpcio_tests/tests/reflection/__init__.py28
-rw-r--r--src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py185
-rw-r--r--src/python/grpcio_tests/tests/tests.json37
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_args_test.py7
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py12
-rw-r--r--src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py4
-rw-r--r--src/ruby/README.md2
-rw-r--r--src/ruby/ext/grpc/rb_call_credentials.c13
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c10
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h15
-rw-r--r--src/ruby/lib/grpc/generic/active_call.rb146
-rw-r--r--src/ruby/lib/grpc/generic/client_stub.rb12
-rw-r--r--src/ruby/lib/grpc/generic/rpc_desc.rb43
-rw-r--r--src/ruby/lib/grpc/generic/rpc_server.rb3
-rw-r--r--src/ruby/qps/server.rb4
-rw-r--r--src/ruby/spec/generic/active_call_spec.rb24
-rw-r--r--src/ruby/spec/generic/client_stub_spec.rb176
-rw-r--r--src/ruby/spec/generic/rpc_desc_spec.rb24
-rw-r--r--src/ruby/spec/generic/rpc_server_spec.rb1
-rw-r--r--src/ruby/spec/pb/health/checker_spec.rb38
335 files changed, 14434 insertions, 6622 deletions
diff --git a/src/boringssl/gen_build_yaml.py b/src/boringssl/gen_build_yaml.py
index 20f6413adf..c53beb0da5 100755
--- a/src/boringssl/gen_build_yaml.py
+++ b/src/boringssl/gen_build_yaml.py
@@ -36,7 +36,7 @@ import yaml
sys.dont_write_bytecode = True
boring_ssl_root = os.path.abspath(os.path.join(
- os.path.dirname(sys.argv[0]),
+ os.path.dirname(sys.argv[0]),
'../../third_party/boringssl'))
sys.path.append(os.path.join(boring_ssl_root, 'util'))
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index d0c35ea1ab..fa72f9b0d9 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -624,7 +624,7 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Indent();
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
- " ::grpc::Service::MarkMethodStreamedUnary($Idx$,\n"
+ " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
" new ::grpc::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
@@ -656,6 +656,58 @@ void PrintHeaderServerMethodStreamedUnary(
}
}
+void PrintHeaderServerMethodSplitStreaming(
+ Printer *printer, const Method *method,
+ std::map<grpc::string, grpc::string> *vars) {
+ (*vars)["Method"] = method->name();
+ (*vars)["Request"] = method->input_type_name();
+ (*vars)["Response"] = method->output_type_name();
+ if (method->ServerOnlyStreaming()) {
+ printer->Print(*vars, "template <class BaseClass>\n");
+ printer->Print(*vars,
+ "class WithSplitStreamingMethod_$Method$ : "
+ "public BaseClass {\n");
+ printer->Print(
+ " private:\n"
+ " void BaseClassMustBeDerivedFromService(const Service *service) "
+ "{}\n");
+ printer->Print(" public:\n");
+ printer->Indent();
+ printer->Print(*vars,
+ "WithSplitStreamingMethod_$Method$() {\n"
+ " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
+ " new ::grpc::SplitServerStreamingHandler< $Request$, "
+ "$Response$>(std::bind"
+ "(&WithSplitStreamingMethod_$Method$<BaseClass>::"
+ "Streamed$Method$, this, std::placeholders::_1, "
+ "std::placeholders::_2)));\n"
+ "}\n");
+ printer->Print(*vars,
+ "~WithSplitStreamingMethod_$Method$() GRPC_OVERRIDE {\n"
+ " BaseClassMustBeDerivedFromService(this);\n"
+ "}\n");
+ printer->Print(
+ *vars,
+ "// disable regular version of this method\n"
+ "::grpc::Status $Method$("
+ "::grpc::ServerContext* context, const $Request$* request, "
+ "::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
+ "{\n"
+ " abort();\n"
+ " return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
+ "}\n");
+ printer->Print(*vars,
+ "// replace default version of method with split streamed\n"
+ "virtual ::grpc::Status Streamed$Method$("
+ "::grpc::ServerContext* context, "
+ "::grpc::ServerSplitStreamer< "
+ "$Request$,$Response$>* server_split_streamer)"
+ " = 0;\n");
+ printer->Outdent();
+ printer->Print(*vars, "};\n");
+ }
+}
+
void PrintHeaderServerMethodGeneric(
Printer *printer, const Method *method,
std::map<grpc::string, grpc::string> *vars) {
@@ -844,6 +896,48 @@ void PrintHeaderService(Printer *printer, const Service *service,
}
printer->Print(" StreamedUnaryService;\n");
+ // Server side - controlled server-side streaming
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["Idx"] = as_string(i);
+ PrintHeaderServerMethodSplitStreaming(printer, service->method(i).get(),
+ vars);
+ }
+
+ printer->Print("typedef ");
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["method_name"] = service->method(i).get()->name();
+ if (service->method(i)->ServerOnlyStreaming()) {
+ printer->Print(*vars, "WithSplitStreamingMethod_$method_name$<");
+ }
+ }
+ printer->Print("Service");
+ for (int i = 0; i < service->method_count(); ++i) {
+ if (service->method(i)->ServerOnlyStreaming()) {
+ printer->Print(" >");
+ }
+ }
+ printer->Print(" SplitStreamedService;\n");
+
+ // Server side - typedef for controlled both unary and server-side streaming
+ printer->Print("typedef ");
+ for (int i = 0; i < service->method_count(); ++i) {
+ (*vars)["method_name"] = service->method(i).get()->name();
+ if (service->method(i)->ServerOnlyStreaming()) {
+ printer->Print(*vars, "WithSplitStreamingMethod_$method_name$<");
+ }
+ if (service->method(i)->NoStreaming()) {
+ printer->Print(*vars, "WithStreamedUnaryMethod_$method_name$<");
+ }
+ }
+ printer->Print("Service");
+ for (int i = 0; i < service->method_count(); ++i) {
+ if (service->method(i)->NoStreaming() ||
+ service->method(i)->ServerOnlyStreaming()) {
+ printer->Print(" >");
+ }
+ }
+ printer->Print(" StreamedService;\n");
+
printer->Outdent();
printer->Print("};\n");
printer->Print(service->GetTrailingComments().c_str());
diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc
index 02202568cb..c85babf1b8 100644
--- a/src/compiler/ruby_generator.cc
+++ b/src/compiler/ruby_generator.cc
@@ -119,6 +119,43 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
} // namespace
+// The following functions are copied directly from the source for the protoc
+// ruby generator
+// to ensure compatibility (with the exception of int and string type changes).
+// See
+// https://github.com/google/protobuf/blob/master/src/google/protobuf/compiler/ruby/ruby_generator.cc#L250
+// TODO: keep up to date with protoc code generation, though this behavior isn't
+// expected to change
+bool IsLower(char ch) { return ch >= 'a' && ch <= 'z'; }
+
+char ToUpper(char ch) { return IsLower(ch) ? (ch - 'a' + 'A') : ch; }
+
+// Package names in protobuf are snake_case by convention, but Ruby module
+// names must be PascalCased.
+//
+// foo_bar_baz -> FooBarBaz
+grpc::string PackageToModule(const grpc::string &name) {
+ bool next_upper = true;
+ grpc::string result;
+ result.reserve(name.size());
+
+ for (grpc::string::size_type i = 0; i < name.size(); i++) {
+ if (name[i] == '_') {
+ next_upper = true;
+ } else {
+ if (next_upper) {
+ result.push_back(ToUpper(name[i]));
+ } else {
+ result.push_back(name[i]);
+ }
+ next_upper = false;
+ }
+ }
+
+ return result;
+}
+// end copying of protoc generator for ruby code
+
grpc::string GetServices(const FileDescriptor *file) {
grpc::string output;
{
@@ -162,7 +199,7 @@ grpc::string GetServices(const FileDescriptor *file) {
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
std::map<grpc::string, grpc::string> module_vars = ListToDict({
- "module.name", CapitalizeFirst(modules[i]),
+ "module.name", PackageToModule(modules[i]),
});
out.Print(module_vars, "module $module.name$\n");
out.Indent();
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 9dacc17eb4..a4cf6f37bd 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -133,7 +133,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
- d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
+ d->start_ts = args->start_time;
return GRPC_ERROR_NONE;
}
@@ -152,7 +152,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
memset(d, 0, sizeof(*d));
- d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
+ d->start_ts = args->start_time;
/* TODO(hongyu): call census_tracing_start_op here. */
grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/client_config/README.md b/src/core/ext/client_channel/README.md
index eda01e3e71..7c209db12e 100644
--- a/src/core/ext/client_config/README.md
+++ b/src/core/ext/client_channel/README.md
@@ -5,28 +5,27 @@ This library provides high level configuration machinery to construct client
channels and load balance between them.
Each grpc_channel is created with a grpc_resolver. It is the resolver's duty
-to resolve a name into configuration data for the channel. Such configuration
-data might include:
+to resolve a name into a set of arguments for the channel. Such arguments
+might include:
- a list of (ip, port) addresses to connect to
- a load balancing policy to decide which server to send a request to
- a set of filters to mutate outgoing requests (say, by adding metadata)
-The resolver provides this data as a stream of grpc_resolver_result objects to
-the channel. We represent configuration as a stream so that it can be changed
-by the resolver during execution, by reacting to external events (such as a
-new configuration file being pushed to some store).
+The resolver provides this data as a stream of grpc_channel_args objects to
+the channel. We represent arguments as a stream so that they can be changed
+by the resolver during execution, by reacting to external events (such as
+new service configuration data being pushed to some store).
Load Balancing
--------------
-Load balancing configuration is provided by a grpc_lb_policy object, stored as
-part of grpc_resolver_result.
+Load balancing configuration is provided by a grpc_lb_policy object.
-The primary job of the load balancing policies is to pick a target server given only the
-initial metadata for a request. It does this by providing a grpc_subchannel
-object to the owning channel.
+The primary job of the load balancing policies is to pick a target server
+given only the initial metadata for a request. It does this by providing
+a grpc_subchannel object to the owning channel.
Sub-Channels
diff --git a/src/core/ext/client_config/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c
index ce3c13a4ee..9797e66564 100644
--- a/src/core/ext/client_config/channel_connectivity.c
+++ b/src/core/ext/client_channel/channel_connectivity.c
@@ -36,7 +36,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include "src/core/ext/client_config/client_channel.h"
+#include "src/core/ext/client_channel/client_channel.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/completion_queue.h"
diff --git a/src/core/ext/client_config/client_channel.c b/src/core/ext/client_channel/client_channel.c
index a6056c3e8d..ff773ac334 100644
--- a/src/core/ext/client_config/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/client_channel.h"
+#include "src/core/ext/client_channel/client_channel.h"
#include <stdbool.h>
#include <stdio.h>
@@ -42,8 +42,8 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/deadline_filter.h"
@@ -53,10 +53,62 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/metadata.h"
+#include "src/core/lib/transport/metadata_batch.h"
+#include "src/core/lib/transport/method_config.h"
+#include "src/core/lib/transport/static_metadata.h"
/* Client channel implementation */
/*************************************************************************
+ * METHOD-CONFIG TABLE
+ */
+
+typedef enum {
+ WAIT_FOR_READY_UNSET,
+ WAIT_FOR_READY_FALSE,
+ WAIT_FOR_READY_TRUE
+} wait_for_ready_value;
+
+typedef struct method_parameters {
+ gpr_timespec timeout;
+ wait_for_ready_value wait_for_ready;
+} method_parameters;
+
+static void *method_parameters_copy(void *value) {
+ void *new_value = gpr_malloc(sizeof(method_parameters));
+ memcpy(new_value, value, sizeof(method_parameters));
+ return new_value;
+}
+
+static int method_parameters_cmp(void *value1, void *value2) {
+ const method_parameters *v1 = value1;
+ const method_parameters *v2 = value2;
+ const int retval = gpr_time_cmp(v1->timeout, v2->timeout);
+ if (retval != 0) return retval;
+ if (v1->wait_for_ready > v2->wait_for_ready) return 1;
+ if (v1->wait_for_ready < v2->wait_for_ready) return -1;
+ return 0;
+}
+
+static const grpc_mdstr_hash_table_vtable method_parameters_vtable = {
+ gpr_free, method_parameters_copy, method_parameters_cmp};
+
+static void *method_config_convert_value(
+ const grpc_method_config *method_config) {
+ method_parameters *value = gpr_malloc(sizeof(method_parameters));
+ const gpr_timespec *timeout = grpc_method_config_get_timeout(method_config);
+ value->timeout = timeout != NULL ? *timeout : gpr_time_0(GPR_TIMESPAN);
+ const bool *wait_for_ready =
+ grpc_method_config_get_wait_for_ready(method_config);
+ value->wait_for_ready =
+ wait_for_ready == NULL
+ ? WAIT_FOR_READY_UNSET
+ : (wait_for_ready ? WAIT_FOR_READY_TRUE : WAIT_FOR_READY_FALSE);
+ return value;
+}
+
+/*************************************************************************
* CHANNEL-WIDE FUNCTIONS
*/
@@ -68,13 +120,14 @@ typedef struct client_channel_channel_data {
/** client channel factory */
grpc_client_channel_factory *client_channel_factory;
- /** mutex protecting client configuration, including all
- variables below in this data structure */
+ /** mutex protecting all variables below in this data structure */
gpr_mu mu;
- /** currently active load balancer - guarded by mu */
+ /** currently active load balancer */
grpc_lb_policy *lb_policy;
- /** incoming resolver result - set by resolver.next(), guarded by mu */
- grpc_resolver_result *resolver_result;
+ /** maps method names to method_parameters structs */
+ grpc_mdstr_hash_table *method_params_table;
+ /** incoming resolver result - set by resolver.next() */
+ grpc_channel_args *resolver_result;
/** a list of closures that are all waiting for config to come in */
grpc_closure_list waiting_for_config_closures;
/** resolver callback */
@@ -172,41 +225,49 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
channel_data *chand = arg;
grpc_lb_policy *lb_policy = NULL;
grpc_lb_policy *old_lb_policy;
+ grpc_mdstr_hash_table *method_params_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
if (chand->resolver_result != NULL) {
grpc_lb_policy_args lb_policy_args;
- lb_policy_args.server_name =
- grpc_resolver_result_get_server_name(chand->resolver_result);
- lb_policy_args.addresses =
- grpc_resolver_result_get_addresses(chand->resolver_result);
- lb_policy_args.additional_args =
- grpc_resolver_result_get_lb_policy_args(chand->resolver_result);
+ lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
+ // Find LB policy name.
+ const char *lb_policy_name = NULL;
+ const grpc_arg *channel_arg =
+ grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_POLICY_NAME);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+ lb_policy_name = channel_arg->value.string;
+ }
// Special case: If all of the addresses are balancer addresses,
// assume that we should use the grpclb policy, regardless of what the
// resolver actually specified.
- const char *lb_policy_name =
- grpc_resolver_result_get_lb_policy_name(chand->resolver_result);
- bool found_backend_address = false;
- for (size_t i = 0; i < lb_policy_args.addresses->num_addresses; ++i) {
- if (!lb_policy_args.addresses->addresses[i].is_balancer) {
- found_backend_address = true;
- break;
+ channel_arg =
+ grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_ADDRESSES);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
+ bool found_backend_address = false;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) {
+ found_backend_address = true;
+ break;
+ }
}
- }
- if (!found_backend_address) {
- if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
- gpr_log(GPR_INFO,
- "resolver requested LB policy %s but provided only balancer "
- "addresses, no backend addresses -- forcing use of grpclb LB "
- "policy",
- (lb_policy_name == NULL ? "(none)" : lb_policy_name));
+ if (!found_backend_address) {
+ if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
+ gpr_log(GPR_INFO,
+ "resolver requested LB policy %s but provided only balancer "
+ "addresses, no backend addresses -- forcing use of grpclb LB "
+ "policy",
+ lb_policy_name);
+ }
+ lb_policy_name = "grpclb";
}
- lb_policy_name = "grpclb";
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
@@ -220,7 +281,15 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
state =
grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
}
- grpc_resolver_result_unref(exec_ctx, chand->resolver_result);
+ channel_arg =
+ grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_SERVICE_CONFIG);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
+ method_params_table = grpc_method_config_table_convert(
+ (grpc_method_config_table *)channel_arg->value.pointer.p,
+ method_config_convert_value, &method_parameters_vtable);
+ }
+ grpc_channel_args_destroy(chand->resolver_result);
chand->resolver_result = NULL;
}
@@ -232,6 +301,10 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&chand->mu);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
+ if (chand->method_params_table != NULL) {
+ grpc_mdstr_hash_table_unref(chand->method_params_table);
+ }
+ chand->method_params_table = method_params_table;
if (lb_policy != NULL) {
grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
NULL);
@@ -392,6 +465,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
+ if (chand->method_params_table != NULL) {
+ grpc_mdstr_hash_table_unref(chand->method_params_table);
+ }
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
gpr_mu_destroy(&chand->mu);
@@ -424,7 +500,12 @@ typedef struct client_channel_call_data {
// stack and each has its own mutex. If/when we have time, find a way
// to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
+
+ grpc_mdstr *path; // Request path.
+ gpr_timespec call_start_time;
gpr_timespec deadline;
+ wait_for_ready_value wait_for_ready_from_service_config;
+ grpc_closure read_service_config;
grpc_error *cancel_error;
@@ -513,10 +594,14 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ grpc_call_element *elem = arg;
+ call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
gpr_mu_lock(&calld->mu);
GPR_ASSERT(calld->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
@@ -528,10 +613,11 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_REFERENCING(
"Cancelled before creating subchannel", &error, 1));
} else {
+ /* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
- &subchannel_call);
+ exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
+ calld->deadline, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@@ -564,6 +650,9 @@ typedef struct {
grpc_closure closure;
} continue_picking_args;
+/** Return true if subchannel is available immediately (in which case on_ready
+ should not be called), or false otherwise (in which case on_ready should be
+ called when the subchannel is available). */
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata,
uint32_t initial_metadata_flags,
@@ -577,11 +666,15 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
/* cancelled, do nothing */
} else if (error != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
- } else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
- cpa->initial_metadata_flags,
- cpa->connected_subchannel, cpa->on_ready,
- GRPC_ERROR_NONE)) {
- grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+ } else {
+ call_data *calld = cpa->elem->call_data;
+ gpr_mu_lock(&calld->mu);
+ if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
+ cpa->initial_metadata_flags, cpa->connected_subchannel,
+ cpa->on_ready, GRPC_ERROR_NONE)) {
+ grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+ }
+ gpr_mu_unlock(&calld->mu);
}
gpr_free(cpa);
}
@@ -624,18 +717,33 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
GPR_ASSERT(error == GRPC_ERROR_NONE);
if (chand->lb_policy != NULL) {
grpc_lb_policy *lb_policy = chand->lb_policy;
- int r;
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
gpr_mu_unlock(&chand->mu);
+ // If the application explicitly set wait_for_ready, use that.
+ // Otherwise, if the service config specified a value for this
+ // method, use that.
+ const bool wait_for_ready_set_from_api =
+ initial_metadata_flags &
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
+ const bool wait_for_ready_set_from_service_config =
+ calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
+ if (!wait_for_ready_set_from_api &&
+ wait_for_ready_set_from_service_config) {
+ if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
+ initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ } else {
+ initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ }
+ }
// TODO(dgq): make this deadline configurable somehow.
const grpc_lb_policy_pick_args inputs = {
- calld->pollent, initial_metadata, initial_metadata_flags,
- &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)};
- r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel,
- NULL, on_ready);
+ initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
+ gpr_inf_future(GPR_CLOCK_MONOTONIC)};
+ const bool result = grpc_lb_policy_pick(
+ exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
GPR_TIMER_END("pick_subchannel", 0);
- return r;
+ return result;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = true;
@@ -672,6 +780,7 @@ static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
call_data *calld = elem->call_data;
+ channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
/* try to (atomically) get the call */
@@ -739,14 +848,20 @@ retry:
calld->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
- grpc_closure_init(&calld->next_step, subchannel_ready, calld);
+ grpc_closure_init(&calld->next_step, subchannel_ready, elem);
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
+ /* If a subchannel is not available immediately, the polling entity from
+ call_data should be provided to channel_data's interested_parties, so
+ that IO of the lb_policy and resolver could be done under it. */
if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
op->send_initial_metadata_flags,
&calld->connected_subchannel, &calld->next_step,
GRPC_ERROR_NONE)) {
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
+ } else {
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
}
}
/* if we've got a subchannel, then let's ask it to create a call */
@@ -754,8 +869,8 @@ retry:
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->deadline,
- &subchannel_call);
+ exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
+ calld->deadline, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@@ -772,13 +887,67 @@ retry:
GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
+// Gets data from the service config. Invoked when the resolver returns
+// its initial result.
+static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = arg;
+ channel_data *chand = elem->channel_data;
+ call_data *calld = elem->call_data;
+ // If this is an error, there's no point in looking at the service config.
+ if (error == GRPC_ERROR_NONE) {
+ // Get the method config table from channel data.
+ gpr_mu_lock(&chand->mu);
+ grpc_mdstr_hash_table *method_params_table = NULL;
+ if (chand->method_params_table != NULL) {
+ method_params_table =
+ grpc_mdstr_hash_table_ref(chand->method_params_table);
+ }
+ gpr_mu_unlock(&chand->mu);
+ // If the method config table was present, use it.
+ if (method_params_table != NULL) {
+ const method_parameters *method_params =
+ grpc_method_config_table_get(method_params_table, calld->path);
+ if (method_params != NULL) {
+ const bool have_method_timeout =
+ gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0;
+ if (have_method_timeout ||
+ method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
+ gpr_mu_lock(&calld->mu);
+ if (have_method_timeout) {
+ const gpr_timespec per_method_deadline =
+ gpr_time_add(calld->call_start_time, method_params->timeout);
+ if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->deadline = per_method_deadline;
+ // Reset deadline timer.
+ grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+ }
+ }
+ if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
+ calld->wait_for_ready_from_service_config =
+ method_params->wait_for_ready;
+ }
+ gpr_mu_unlock(&calld->mu);
+ }
+ }
+ grpc_mdstr_hash_table_unref(method_params_table);
+ }
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
+}
+
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_call_element_args *args) {
+ channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
- grpc_deadline_state_init(exec_ctx, elem, args);
- calld->deadline = args->deadline;
+ // Initialize data members.
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
+ calld->path = GRPC_MDSTR_REF(args->path);
+ calld->call_start_time = args->start_time;
+ calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
calld->cancel_error = GRPC_ERROR_NONE;
gpr_atm_rel_store(&calld->subchannel_call, 0);
gpr_mu_init(&calld->mu);
@@ -789,6 +958,51 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
calld->pollent = NULL;
+ // If the resolver has already returned results, then we can access
+ // the service config parameters immediately. Otherwise, we need to
+ // defer that work until the resolver returns an initial result.
+ // TODO(roth): This code is almost but not quite identical to the code
+ // in read_service_config() above. It would be nice to find a way to
+ // combine them, to avoid having to maintain it twice.
+ gpr_mu_lock(&chand->mu);
+ if (chand->lb_policy != NULL) {
+ // We already have a resolver result, so check for service config.
+ if (chand->method_params_table != NULL) {
+ grpc_mdstr_hash_table *method_params_table =
+ grpc_mdstr_hash_table_ref(chand->method_params_table);
+ gpr_mu_unlock(&chand->mu);
+ method_parameters *method_params =
+ grpc_method_config_table_get(method_params_table, args->path);
+ if (method_params != NULL) {
+ if (gpr_time_cmp(method_params->timeout,
+ gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
+ gpr_timespec per_method_deadline =
+ gpr_time_add(calld->call_start_time, method_params->timeout);
+ calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
+ }
+ if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
+ calld->wait_for_ready_from_service_config =
+ method_params->wait_for_ready;
+ }
+ }
+ grpc_mdstr_hash_table_unref(method_params_table);
+ } else {
+ gpr_mu_unlock(&chand->mu);
+ }
+ } else {
+ // We don't yet have a resolver result, so register a callback to
+ // get the service config data once the resolver returns.
+ // Take a reference to the call stack to be owned by the callback.
+ GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
+ grpc_closure_init(&calld->read_service_config, read_service_config, elem);
+ grpc_closure_list_append(&chand->waiting_for_config_closures,
+ &calld->read_service_config, GRPC_ERROR_NONE);
+ gpr_mu_unlock(&chand->mu);
+ }
+ // Start the deadline timer with the current deadline value. If we
+ // do not yet have service config data, then the timer may be reset
+ // later.
+ grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
@@ -799,6 +1013,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
void *and_free_memory) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
+ GRPC_MDSTR_UNREF(calld->path);
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
@@ -807,6 +1022,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
gpr_mu_destroy(&calld->mu);
GPR_ASSERT(calld->waiting_ops_count == 0);
+ if (calld->connected_subchannel != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
+ "picked");
+ }
gpr_free(calld->waiting_ops);
gpr_free(and_free_memory);
}
diff --git a/src/core/ext/client_config/client_channel.h b/src/core/ext/client_channel/client_channel.h
index abb5ac4d87..ab5a84fdfb 100644
--- a/src/core/ext/client_config/client_channel.h
+++ b/src/core/ext/client_channel/client_channel.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_H
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/resolver.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/resolver.h"
#include "src/core/lib/channel/channel_stack.h"
/* A client channel is a channel that begins disconnected, and can connect
@@ -61,4 +61,4 @@ void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/ext/client_config/client_channel_factory.c b/src/core/ext/client_channel/client_channel_factory.c
index 71c64c0da1..4900832d57 100644
--- a/src/core/ext/client_config/client_channel_factory.c
+++ b/src/core/ext/client_channel/client_channel_factory.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/client_channel_factory.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) {
factory->vtable->ref(factory);
@@ -44,14 +44,14 @@ void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
grpc_subchannel* grpc_client_channel_factory_create_subchannel(
grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
- grpc_subchannel_args* args) {
+ const grpc_subchannel_args* args) {
return factory->vtable->create_subchannel(exec_ctx, factory, args);
}
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
const char* target, grpc_client_channel_type type,
- grpc_channel_args* args) {
+ const grpc_channel_args* args) {
return factory->vtable->create_client_channel(exec_ctx, factory, target, type,
args);
}
diff --git a/src/core/ext/client_config/client_channel_factory.h b/src/core/ext/client_channel/client_channel_factory.h
index 1241b9b781..2b8fc577b3 100644
--- a/src/core/ext/client_config/client_channel_factory.h
+++ b/src/core/ext/client_channel/client_channel_factory.h
@@ -31,12 +31,12 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_FACTORY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_FACTORY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H
#include <grpc/impl/codegen/grpc_types.h>
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_stack.h"
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
@@ -60,12 +60,12 @@ struct grpc_client_channel_factory_vtable {
void (*unref)(grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory);
grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory,
- grpc_subchannel_args *args);
+ const grpc_subchannel_args *args);
grpc_channel *(*create_client_channel)(grpc_exec_ctx *exec_ctx,
grpc_client_channel_factory *factory,
const char *target,
grpc_client_channel_type type,
- grpc_channel_args *args);
+ const grpc_channel_args *args);
};
void grpc_client_channel_factory_ref(grpc_client_channel_factory *factory);
@@ -75,11 +75,12 @@ void grpc_client_channel_factory_unref(grpc_exec_ctx *exec_ctx,
/** Create a new grpc_subchannel */
grpc_subchannel *grpc_client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- grpc_subchannel_args *args);
+ const grpc_subchannel_args *args);
/** Create a new grpc_channel */
grpc_channel *grpc_client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- const char *target, grpc_client_channel_type type, grpc_channel_args *args);
+ const char *target, grpc_client_channel_type type,
+ const grpc_channel_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_CLIENT_CHANNEL_FACTORY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
diff --git a/src/core/ext/client_config/client_config_plugin.c b/src/core/ext/client_channel/client_channel_plugin.c
index dc3629d383..a3e5079843 100644
--- a/src/core/ext/client_config/client_config_plugin.c
+++ b/src/core/ext/client_channel/client_channel_plugin.c
@@ -37,10 +37,10 @@
#include <grpc/support/alloc.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/resolver_registry.h"
-#include "src/core/ext/client_config/subchannel_index.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
static bool append_filter(grpc_channel_stack_builder *builder, void *arg) {
@@ -73,7 +73,7 @@ static bool set_default_host_if_unset(grpc_channel_stack_builder *builder,
return true;
}
-void grpc_client_config_init(void) {
+void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
grpc_resolver_registry_init();
grpc_subchannel_index_init();
@@ -83,7 +83,7 @@ void grpc_client_config_init(void) {
(void *)&grpc_client_channel_filter);
}
-void grpc_client_config_shutdown(void) {
+void grpc_client_channel_shutdown(void) {
grpc_subchannel_index_shutdown();
grpc_channel_init_shutdown();
grpc_resolver_registry_shutdown();
diff --git a/src/core/ext/client_config/connector.c b/src/core/ext/client_channel/connector.c
index 5b629ed5fb..0582e5b372 100644
--- a/src/core/ext/client_config/connector.c
+++ b/src/core/ext/client_channel/connector.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/connector.h"
+#include "src/core/ext/client_channel/connector.h"
grpc_connector* grpc_connector_ref(grpc_connector* connector) {
connector->vtable->ref(connector);
diff --git a/src/core/ext/client_config/connector.h b/src/core/ext/client_channel/connector.h
index ea9d23706e..ed7d5450de 100644
--- a/src/core/ext/client_config/connector.h
+++ b/src/core/ext/client_channel/connector.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_CONNECTOR_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_CONNECTOR_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_CONNECTOR_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_CONNECTOR_H
#include "src/core/lib/channel/channel_stack.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/transport/transport.h"
typedef struct grpc_connector grpc_connector;
@@ -49,7 +49,7 @@ typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
/** address to connect to */
- const struct sockaddr *addr;
+ const grpc_resolved_address *addr;
size_t addr_len;
/** initial connect string to send */
gpr_slice initial_connect_string;
@@ -89,4 +89,4 @@ void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx,
grpc_connector *connector);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_CONNECTOR_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CONNECTOR_H */
diff --git a/src/core/ext/client_config/default_initial_connect_string.c b/src/core/ext/client_channel/default_initial_connect_string.c
index a70da4a84a..0b251372fd 100644
--- a/src/core/ext/client_config/default_initial_connect_string.c
+++ b/src/core/ext/client_channel/default_initial_connect_string.c
@@ -32,8 +32,7 @@
*/
#include <grpc/support/slice.h>
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
-void grpc_set_default_initial_connect_string(struct sockaddr **addr,
- size_t *addr_len,
+void grpc_set_default_initial_connect_string(grpc_resolved_address **addr,
gpr_slice *initial_str) {}
diff --git a/src/core/ext/client_config/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index fda1df173e..ea2cbbdd97 100644
--- a/src/core/ext/client_config/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
#include <string.h>
@@ -40,7 +40,7 @@
#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/uri_parser.h"
+#include "src/core/ext/client_channel/uri_parser.h"
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/timer.h"
diff --git a/src/core/ext/client_config/http_connect_handshaker.h b/src/core/ext/client_channel/http_connect_handshaker.h
index 1fc3948267..c689df2b2b 100644
--- a/src/core/ext/client_config/http_connect_handshaker.h
+++ b/src/core/ext/client_channel/http_connect_handshaker.h
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_HTTP_CONNECT_HANDSHAKER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_HTTP_CONNECT_HANDSHAKER_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H
#include "src/core/lib/channel/handshaker.h"
@@ -44,4 +44,4 @@ grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server,
/// Caller takes ownership of result.
char* grpc_get_http_proxy_server();
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_HTTP_CONNECT_HANDSHAKER_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */
diff --git a/src/core/ext/client_config/initial_connect_string.c b/src/core/ext/client_channel/initial_connect_string.c
index 41580d2106..fb1493d77d 100644
--- a/src/core/ext/client_config/initial_connect_string.c
+++ b/src/core/ext/client_channel/initial_connect_string.c
@@ -31,13 +31,12 @@
*
*/
-#include "src/core/ext/client_config/initial_connect_string.h"
+#include "src/core/ext/client_channel/initial_connect_string.h"
#include <stddef.h>
-extern void grpc_set_default_initial_connect_string(struct sockaddr **addr,
- size_t *addr_len,
- gpr_slice *initial_str);
+extern void grpc_set_default_initial_connect_string(
+ grpc_resolved_address **addr, gpr_slice *initial_str);
static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
grpc_set_default_initial_connect_string;
@@ -47,7 +46,7 @@ void grpc_test_set_initial_connect_string_function(
g_set_initial_connect_string_func = func;
}
-void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+void grpc_set_initial_connect_string(grpc_resolved_address **addr,
gpr_slice *initial_str) {
- g_set_initial_connect_string_func(addr, addr_len, initial_str);
+ g_set_initial_connect_string_func(addr, initial_str);
}
diff --git a/src/core/ext/client_config/initial_connect_string.h b/src/core/ext/client_channel/initial_connect_string.h
index 06f0767832..68adb0373c 100644
--- a/src/core/ext/client_config/initial_connect_string.h
+++ b/src/core/ext/client_channel/initial_connect_string.h
@@ -31,20 +31,20 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
#include <grpc/support/slice.h>
-#include "src/core/lib/iomgr/sockaddr.h"
-typedef void (*grpc_set_initial_connect_string_func)(struct sockaddr **addr,
- size_t *addr_len,
- gpr_slice *initial_str);
+#include "src/core/lib/iomgr/resolve_address.h"
+
+typedef void (*grpc_set_initial_connect_string_func)(
+ grpc_resolved_address **addr, gpr_slice *initial_str);
void grpc_test_set_initial_connect_string_function(
grpc_set_initial_connect_string_func func);
/** Set a string to be sent once connected. Optionally reset addr. */
-void grpc_set_initial_connect_string(struct sockaddr **addr, size_t *addr_len,
+void grpc_set_initial_connect_string(grpc_resolved_address **addr,
gpr_slice *connect_string);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_INITIAL_CONNECT_STRING_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H */
diff --git a/src/core/ext/client_config/lb_policy.c b/src/core/ext/client_channel/lb_policy.c
index 46391272a6..45ee72e2f0 100644
--- a/src/core/ext/client_config/lb_policy.c
+++ b/src/core/ext/client_channel/lb_policy.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/lb_policy.h"
+#include "src/core/ext/client_channel/lb_policy.h"
#define WEAK_REF_BITS 16
diff --git a/src/core/ext/client_config/lb_policy.h b/src/core/ext/client_channel/lb_policy.h
index 110d08fcac..54ad779792 100644
--- a/src/core/ext/client_config/lb_policy.h
+++ b/src/core/ext/client_channel/lb_policy.h
@@ -31,10 +31,10 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_H
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -55,8 +55,6 @@ struct grpc_lb_policy {
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
- /** Parties interested in the pick's progress */
- grpc_polling_entity *pollent;
/** Initial metadata associated with the picking call. */
grpc_metadata_batch *initial_metadata;
/** Bitmask used for selective cancelling. See \a
@@ -153,7 +151,8 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
once the pick is complete with its error argument set to indicate
success or failure.
- Any I/O should be done under \a pick_args->pollent. */
+ Any IO should be done under the \a interested_parties \a grpc_pollset_set
+ in the \a grpc_lb_policy struct. */
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_pick_args *pick_args,
grpc_connected_subchannel **target, void **user_data,
@@ -194,4 +193,4 @@ grpc_connectivity_state grpc_lb_policy_check_connectivity(
grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_error **connectivity_error);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_H */
diff --git a/src/core/ext/client_config/lb_policy_factory.c b/src/core/ext/client_channel/lb_policy_factory.c
index c17af91a09..8a474c8818 100644
--- a/src/core/ext/client_config/lb_policy_factory.c
+++ b/src/core/ext/client_channel/lb_policy_factory.c
@@ -36,21 +36,22 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
-grpc_lb_addresses* grpc_lb_addresses_create(size_t num_addresses) {
+grpc_lb_addresses* grpc_lb_addresses_create(
+ size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
grpc_lb_addresses* addresses = gpr_malloc(sizeof(grpc_lb_addresses));
addresses->num_addresses = num_addresses;
+ addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
addresses->addresses = gpr_malloc(addresses_size);
memset(addresses->addresses, 0, addresses_size);
return addresses;
}
-grpc_lb_addresses* grpc_lb_addresses_copy(grpc_lb_addresses* addresses,
- void* (*user_data_copy)(void*)) {
- grpc_lb_addresses* new_addresses =
- grpc_lb_addresses_create(addresses->num_addresses);
+grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
+ grpc_lb_addresses* new_addresses = grpc_lb_addresses_create(
+ addresses->num_addresses, addresses->user_data_vtable);
memcpy(new_addresses->addresses, addresses->addresses,
sizeof(grpc_lb_address) * addresses->num_addresses);
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -58,9 +59,9 @@ grpc_lb_addresses* grpc_lb_addresses_copy(grpc_lb_addresses* addresses,
new_addresses->addresses[i].balancer_name =
gpr_strdup(new_addresses->addresses[i].balancer_name);
}
- if (user_data_copy != NULL) {
- new_addresses->addresses[i].user_data =
- user_data_copy(new_addresses->addresses[i].user_data);
+ if (new_addresses->addresses[i].user_data != NULL) {
+ new_addresses->addresses[i].user_data = addresses->user_data_vtable->copy(
+ new_addresses->addresses[i].user_data);
}
}
return new_addresses;
@@ -71,6 +72,7 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
bool is_balancer, char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
+ if (user_data != NULL) GPR_ASSERT(addresses->user_data_vtable != NULL);
grpc_lb_address* target = &addresses->addresses[index];
memcpy(target->address.addr, address, address_len);
target->address.len = address_len;
@@ -79,18 +81,70 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
target->user_data = user_data;
}
-void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses,
- void (*user_data_destroy)(void*)) {
+int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1,
+ const grpc_lb_addresses* addresses2) {
+ if (addresses1->num_addresses > addresses2->num_addresses) return 1;
+ if (addresses1->num_addresses < addresses2->num_addresses) return -1;
+ if (addresses1->user_data_vtable > addresses2->user_data_vtable) return 1;
+ if (addresses1->user_data_vtable < addresses2->user_data_vtable) return -1;
+ for (size_t i = 0; i < addresses1->num_addresses; ++i) {
+ const grpc_lb_address* target1 = &addresses1->addresses[i];
+ const grpc_lb_address* target2 = &addresses2->addresses[i];
+ if (target1->address.len > target2->address.len) return 1;
+ if (target1->address.len < target2->address.len) return -1;
+ int retval = memcmp(target1->address.addr, target2->address.addr,
+ target1->address.len);
+ if (retval != 0) return retval;
+ if (target1->is_balancer > target2->is_balancer) return 1;
+ if (target1->is_balancer < target2->is_balancer) return -1;
+ const char* balancer_name1 =
+ target1->balancer_name != NULL ? target1->balancer_name : "";
+ const char* balancer_name2 =
+ target2->balancer_name != NULL ? target2->balancer_name : "";
+ retval = strcmp(balancer_name1, balancer_name2);
+ if (retval != 0) return retval;
+ if (addresses1->user_data_vtable != NULL) {
+ retval = addresses1->user_data_vtable->cmp(target1->user_data,
+ target2->user_data);
+ if (retval != 0) return retval;
+ }
+ }
+ return 0;
+}
+
+void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses) {
for (size_t i = 0; i < addresses->num_addresses; ++i) {
gpr_free(addresses->addresses[i].balancer_name);
- if (user_data_destroy != NULL) {
- user_data_destroy(addresses->addresses[i].user_data);
+ if (addresses->addresses[i].user_data != NULL) {
+ addresses->user_data_vtable->destroy(addresses->addresses[i].user_data);
}
}
gpr_free(addresses->addresses);
gpr_free(addresses);
}
+static void* lb_addresses_copy(void* addresses) {
+ return grpc_lb_addresses_copy(addresses);
+}
+static void lb_addresses_destroy(void* addresses) {
+ grpc_lb_addresses_destroy(addresses);
+}
+static int lb_addresses_cmp(void* addresses1, void* addresses2) {
+ return grpc_lb_addresses_cmp(addresses1, addresses2);
+}
+static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
+ lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
+
+grpc_arg grpc_lb_addresses_create_channel_arg(
+ const grpc_lb_addresses* addresses) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_LB_ADDRESSES;
+ arg.value.pointer.p = (void*)addresses;
+ arg.value.pointer.vtable = &lb_addresses_arg_vtable;
+ return arg;
+}
+
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
factory->vtable->ref(factory);
}
diff --git a/src/core/ext/client_config/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h
index ade55704f2..e2b8080a32 100644
--- a/src/core/ext/client_config/lb_policy_factory.h
+++ b/src/core/ext/client_channel/lb_policy_factory.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_FACTORY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_FACTORY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_FACTORY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_FACTORY_H
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/lb_policy.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/lb_policy.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/resolve_address.h"
@@ -59,19 +59,26 @@ typedef struct grpc_lb_address {
void *user_data;
} grpc_lb_address;
+typedef struct grpc_lb_user_data_vtable {
+ void *(*copy)(void *);
+ void (*destroy)(void *);
+ int (*cmp)(void *, void *);
+} grpc_lb_user_data_vtable;
+
typedef struct grpc_lb_addresses {
size_t num_addresses;
grpc_lb_address *addresses;
+ const grpc_lb_user_data_vtable *user_data_vtable;
} grpc_lb_addresses;
/** Returns a grpc_addresses struct with enough space for
- * \a num_addresses addresses. */
-grpc_lb_addresses *grpc_lb_addresses_create(size_t num_addresses);
+ \a num_addresses addresses. The \a user_data_vtable argument may be
+ NULL if no user data will be added. */
+grpc_lb_addresses *grpc_lb_addresses_create(
+ size_t num_addresses, const grpc_lb_user_data_vtable *user_data_vtable);
-/** Creates a copy of \a addresses. If \a user_data_copy is not NULL,
- * it will be invoked to copy the \a user_data field of each address. */
-grpc_lb_addresses *grpc_lb_addresses_copy(grpc_lb_addresses *addresses,
- void *(*user_data_copy)(void *));
+/** Creates a copy of \a addresses. */
+grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
/** Sets the value of the address at index \a index of \a addresses.
* \a address is a socket address of length \a address_len.
@@ -81,20 +88,21 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
bool is_balancer, char *balancer_name,
void *user_data);
-/** Destroys \a addresses. If \a user_data_destroy is not NULL, it will
- * be invoked to destroy the \a user_data field of each address. */
-void grpc_lb_addresses_destroy(grpc_lb_addresses *addresses,
- void (*user_data_destroy)(void *));
+/** Compares \a addresses1 and \a addresses2. */
+int grpc_lb_addresses_cmp(const grpc_lb_addresses *addresses1,
+ const grpc_lb_addresses *addresses2);
+
+/** Destroys \a addresses. */
+void grpc_lb_addresses_destroy(grpc_lb_addresses *addresses);
+
+/** Returns a channel arg containing \a addresses. */
+grpc_arg grpc_lb_addresses_create_channel_arg(
+ const grpc_lb_addresses *addresses);
/** Arguments passed to LB policies. */
-/* TODO(roth, ctiller): Consider replacing this struct with
- grpc_channel_args. See comment in resolver_result.h for details. */
typedef struct grpc_lb_policy_args {
- const char *server_name;
- grpc_lb_addresses *addresses;
grpc_client_channel_factory *client_channel_factory;
- /* Can be used to pass implementation-specific parameters to the LB policy. */
- grpc_channel_args *additional_args;
+ grpc_channel_args *args;
} grpc_lb_policy_args;
struct grpc_lb_policy_factory_vtable {
@@ -118,4 +126,4 @@ grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_FACTORY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
diff --git a/src/core/ext/client_config/lb_policy_registry.c b/src/core/ext/client_channel/lb_policy_registry.c
index a23643ecc6..f46a721f9d 100644
--- a/src/core/ext/client_config/lb_policy_registry.c
+++ b/src/core/ext/client_channel/lb_policy_registry.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/lb_policy_registry.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
#include <string.h>
diff --git a/src/core/ext/client_config/lb_policy_registry.h b/src/core/ext/client_channel/lb_policy_registry.h
index 92f38d6de6..21c468e021 100644
--- a/src/core/ext/client_config/lb_policy_registry.h
+++ b/src/core/ext/client_channel/lb_policy_registry.h
@@ -31,10 +31,10 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_REGISTRY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_REGISTRY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
/** Initialize the registry and set \a default_factory as the factory to be
@@ -52,4 +52,4 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
grpc_lb_policy_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_LB_POLICY_REGISTRY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
diff --git a/src/core/ext/client_config/parse_address.c b/src/core/ext/client_channel/parse_address.c
index 8b4abe24a6..b1d55ad0f5 100644
--- a/src/core/ext/client_config/parse_address.c
+++ b/src/core/ext/client_channel/parse_address.c
@@ -31,11 +31,12 @@
*
*/
-#include "src/core/ext/client_config/parse_address.h"
+#include "src/core/ext/client_channel/parse_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include <stdio.h>
#include <string.h>
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
#include <sys/un.h>
#endif
@@ -44,33 +45,39 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#ifdef GPR_HAVE_UNIX_SOCKET
-int parse_unix(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
- struct sockaddr_un *un = (struct sockaddr_un *)addr;
+#ifdef GRPC_HAVE_UNIX_SOCKET
+
+int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
+ struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, uri->path);
- *len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
+ resolved_addr->len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
return 1;
}
-#endif
-int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
+#else /* GRPC_HAVE_UNIX_SOCKET */
+
+int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr) { abort(); }
+
+#endif /* GRPC_HAVE_UNIX_SOCKET */
+
+int parse_ipv4(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
const char *host_port = uri->path;
char *host;
char *port;
int port_num;
int result = 0;
- struct sockaddr_in *in = (struct sockaddr_in *)addr;
+ struct sockaddr_in *in = (struct sockaddr_in *)resolved_addr->addr;
if (*host_port == '/') ++host_port;
if (!gpr_split_host_port(host_port, &host, &port)) {
return 0;
}
- memset(in, 0, sizeof(*in));
- *len = sizeof(*in);
+ memset(resolved_addr, 0, sizeof(grpc_resolved_address));
+ resolved_addr->len = sizeof(struct sockaddr_in);
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@@ -96,13 +103,13 @@ done:
return result;
}
-int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
+int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr) {
const char *host_port = uri->path;
char *host;
char *port;
int port_num;
int result = 0;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr;
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)resolved_addr->addr;
if (*host_port == '/') ++host_port;
if (!gpr_split_host_port(host_port, &host, &port)) {
@@ -110,7 +117,7 @@ int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len) {
}
memset(in6, 0, sizeof(*in6));
- *len = sizeof(*in6);
+ resolved_addr->len = sizeof(*in6);
in6->sin6_family = AF_INET6;
if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host);
diff --git a/src/core/ext/client_config/parse_address.h b/src/core/ext/client_channel/parse_address.h
index 74c86f4d93..bf99c5298a 100644
--- a/src/core/ext/client_config/parse_address.h
+++ b/src/core/ext/client_channel/parse_address.h
@@ -31,26 +31,24 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_PARSE_ADDRESS_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_PARSE_ADDRESS_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_PARSE_ADDRESS_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_PARSE_ADDRESS_H
#include <stddef.h>
-#include "src/core/ext/client_config/uri_parser.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/ext/client_channel/uri_parser.h"
+#include "src/core/lib/iomgr/resolve_address.h"
-#ifdef GPR_HAVE_UNIX_SOCKET
/** Populate \a addr and \a len from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
-int parse_unix(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len);
-#endif
+int parse_unix(grpc_uri *uri, grpc_resolved_address *resolved_addr);
/** Populate /a addr and \a len from \a uri, whose path is expected to contain a
* host:port pair. Returns true upon success. */
-int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len);
+int parse_ipv4(grpc_uri *uri, grpc_resolved_address *resolved_addr);
/** Populate /a addr and \a len from \a uri, whose path is expected to contain a
* host:port pair. Returns true upon success. */
-int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, size_t *len);
+int parse_ipv6(grpc_uri *uri, grpc_resolved_address *resolved_addr);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_PARSE_ADDRESS_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_PARSE_ADDRESS_H */
diff --git a/src/core/ext/client_config/resolver.c b/src/core/ext/client_channel/resolver.c
index 7534ea62af..2ae4fe862e 100644
--- a/src/core/ext/client_config/resolver.c
+++ b/src/core/ext/client_channel/resolver.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/resolver.h"
+#include "src/core/ext/client_channel/resolver.h"
void grpc_resolver_init(grpc_resolver *resolver,
const grpc_resolver_vtable *vtable) {
@@ -76,7 +76,6 @@ void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
}
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **result,
- grpc_closure *on_complete) {
+ grpc_channel_args **result, grpc_closure *on_complete) {
resolver->vtable->next(exec_ctx, resolver, result, on_complete);
}
diff --git a/src/core/ext/client_config/resolver.h b/src/core/ext/client_channel/resolver.h
index 88ac262d51..96ece92b9d 100644
--- a/src/core/ext/client_config/resolver.h
+++ b/src/core/ext/client_channel/resolver.h
@@ -31,18 +31,16 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H
-#include "src/core/ext/client_config/resolver_result.h"
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/iomgr/iomgr.h"
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
-/** grpc_resolver provides grpc_resolver_result objects to grpc_channel
- objects */
+/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
const grpc_resolver_vtable *vtable;
gpr_refcount refs;
@@ -53,7 +51,7 @@ struct grpc_resolver_vtable {
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **result, grpc_closure *on_complete);
+ grpc_channel_args **result, grpc_closure *on_complete);
};
#ifdef GRPC_RESOLVER_REFCOUNT_DEBUG
@@ -81,14 +79,12 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver);
-/** Get the next client config. Called by the channel to fetch a new
- configuration. Expected to set *result with a new configuration,
- and then schedule on_complete for execution.
+/** Get the next result from the resolver. Expected to set \a *result with
+ new channel args and then schedule \a on_complete for execution.
- If resolution is fatally broken, set *result to NULL and
- schedule on_complete. */
+ If resolution is fatally broken, set \a *result to NULL and
+ schedule \a on_complete. */
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **result,
- grpc_closure *on_complete);
+ grpc_channel_args **result, grpc_closure *on_complete);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/client_config/resolver_factory.c b/src/core/ext/client_channel/resolver_factory.c
index 67832dcf59..7c3d644257 100644
--- a/src/core/ext/client_config/resolver_factory.c
+++ b/src/core/ext/client_channel/resolver_factory.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/resolver_factory.h"
+#include "src/core/ext/client_channel/resolver_factory.h"
void grpc_resolver_factory_ref(grpc_resolver_factory* factory) {
factory->vtable->ref(factory);
diff --git a/src/core/ext/client_config/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h
index 9ec5b9a70e..4da42e84d2 100644
--- a/src/core/ext/client_config/resolver_factory.h
+++ b/src/core/ext/client_channel/resolver_factory.h
@@ -31,23 +31,24 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_FACTORY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_FACTORY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_FACTORY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_FACTORY_H
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/resolver.h"
-#include "src/core/ext/client_config/uri_parser.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/resolver.h"
+#include "src/core/ext/client_channel/uri_parser.h"
typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
-/** grpc_resolver provides grpc_resolver_result objects to grpc_channel
- objects */
struct grpc_resolver_factory {
const grpc_resolver_factory_vtable *vtable;
};
-typedef struct grpc_resolver_args { grpc_uri *uri; } grpc_resolver_args;
+typedef struct grpc_resolver_args {
+ grpc_uri *uri;
+ const grpc_channel_args *args;
+} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
void (*ref)(grpc_resolver_factory *factory);
@@ -76,4 +77,4 @@ grpc_resolver *grpc_resolver_factory_create_resolver(
char *grpc_resolver_factory_get_default_authority(
grpc_resolver_factory *factory, grpc_uri *uri);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_FACTORY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
diff --git a/src/core/ext/client_config/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index bd5c683878..d0f0fc3f33 100644
--- a/src/core/ext/client_config/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include <string.h>
@@ -55,7 +55,7 @@ void grpc_resolver_registry_shutdown(void) {
grpc_resolver_factory_unref(g_all_of_the_resolvers[i]);
}
// FIXME(ctiller): this should live in grpc_resolver_registry_init,
- // however that would have the client_config plugin call this AFTER we start
+ // however that would have the client_channel plugin call this AFTER we start
// registering resolvers from third party plugins, and so they'd never show
// up.
// We likely need some kind of dependency system for plugins.... what form
@@ -131,14 +131,16 @@ static grpc_resolver_factory *resolve_factory(const char *target,
return factory;
}
-grpc_resolver *grpc_resolver_create(const char *target) {
+grpc_resolver *grpc_resolver_create(const char *target,
+ const grpc_channel_args *args) {
grpc_uri *uri = NULL;
grpc_resolver_factory *factory = resolve_factory(target, &uri);
grpc_resolver *resolver;
- grpc_resolver_args args;
- memset(&args, 0, sizeof(args));
- args.uri = uri;
- resolver = grpc_resolver_factory_create_resolver(factory, &args);
+ grpc_resolver_args resolver_args;
+ memset(&resolver_args, 0, sizeof(resolver_args));
+ resolver_args.uri = uri;
+ resolver_args.args = args;
+ resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args);
grpc_uri_destroy(uri);
return resolver;
}
diff --git a/src/core/ext/client_config/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h
index 4c6279b978..2a95a669f0 100644
--- a/src/core/ext/client_config/resolver_registry.h
+++ b/src/core/ext/client_channel/resolver_registry.h
@@ -31,10 +31,10 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_REGISTRY_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_REGISTRY_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H
-#include "src/core/ext/client_config/resolver_factory.h"
+#include "src/core/ext/client_channel/resolver_factory.h"
void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
@@ -57,8 +57,11 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
was not NULL).
If a resolver factory was found, use it to instantiate a resolver and
return it.
- If a resolver factory was not found, return NULL. */
-grpc_resolver *grpc_resolver_create(const char *target);
+ If a resolver factory was not found, return NULL.
+ \a args is a set of channel arguments to be included in the result
+ (typically the set of arguments passed in from the client API). */
+grpc_resolver *grpc_resolver_create(const char *target,
+ const grpc_channel_args *args);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
@@ -68,4 +71,4 @@ grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name);
representing the default authority to pass from a client. */
char *grpc_get_default_authority(const char *target);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_REGISTRY_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
diff --git a/src/core/ext/client_config/subchannel.c b/src/core/ext/client_channel/subchannel.c
index 0bbaa3e382..789966cb69 100644
--- a/src/core/ext/client_config/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/subchannel.h"
#include <limits.h>
#include <string.h>
@@ -39,9 +39,9 @@
#include <grpc/support/alloc.h>
#include <grpc/support/avl.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/initial_connect_string.h"
-#include "src/core/ext/client_config/subchannel_index.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/initial_connect_string.h"
+#include "src/core/ext/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/iomgr/timer.h"
@@ -95,8 +95,7 @@ struct grpc_subchannel {
/** channel arguments */
grpc_channel_args *args;
/** address to connect to */
- struct sockaddr *addr;
- size_t addr_len;
+ grpc_resolved_address *addr;
grpc_subchannel_key *key;
@@ -184,9 +183,10 @@ static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
-void grpc_connected_subchannel_ref(
+grpc_connected_subchannel *grpc_connected_subchannel_ref(
grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
+ return c;
}
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
@@ -220,8 +220,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "SUBCHANNEL: %p %12s 0x%08d -> 0x%08d [%s]", c, purpose, (int)old_val,
- (int)(old_val + delta), reason);
+ "SUBCHANNEL: %p %s 0x%08" PRIxPTR " -> 0x%08" PRIxPTR " [%s]", c,
+ purpose, old_val, old_val + delta, reason);
#endif
return old_val;
}
@@ -298,7 +298,7 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_connector *connector,
- grpc_subchannel_args *args) {
+ const grpc_subchannel_args *args) {
grpc_subchannel_key *key = grpc_subchannel_key_create(connector, args);
grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key);
if (c) {
@@ -320,12 +320,11 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
} else {
c->filters = NULL;
}
- c->addr = gpr_malloc(args->addr_len);
- if (args->addr_len) memcpy(c->addr, args->addr, args->addr_len);
+ c->addr = gpr_malloc(sizeof(grpc_resolved_address));
+ if (args->addr->len)
+ memcpy(c->addr, args->addr, sizeof(grpc_resolved_address));
c->pollset_set = grpc_pollset_set_create();
- c->addr_len = args->addr_len;
- grpc_set_initial_connect_string(&c->addr, &c->addr_len,
- &c->initial_connect_string);
+ grpc_set_initial_connect_string(&c->addr, &c->initial_connect_string);
c->args = grpc_channel_args_copy(args->args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
@@ -376,7 +375,6 @@ static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
args.interested_parties = c->pollset_set;
args.addr = c->addr;
- args.addr_len = c->addr_len;
args.deadline = c->next_attempt;
args.channel_args = c->args;
args.initial_connect_string = c->initial_connect_string;
@@ -704,7 +702,7 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_polling_entity *pollent, gpr_timespec deadline,
+ grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_malloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
@@ -712,7 +710,7 @@ grpc_error *grpc_connected_subchannel_create_call(
(*call)->connection = con; // Ref is added below.
grpc_error *error =
grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
- NULL, NULL, deadline, callstk);
+ NULL, NULL, path, deadline, callstk);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
diff --git a/src/core/ext/client_config/subchannel.h b/src/core/ext/client_channel/subchannel.h
index 3330621071..93bd72d20d 100644
--- a/src/core/ext/client_config/subchannel.h
+++ b/src/core/ext/client_channel/subchannel.h
@@ -31,13 +31,14 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H
-#include "src/core/ext/client_config/connector.h"
+#include "src/core/ext/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
+#include "src/core/lib/transport/metadata.h"
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
@@ -96,7 +97,7 @@ grpc_subchannel *grpc_subchannel_weak_ref(
void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_connected_subchannel_ref(
+grpc_connected_subchannel *grpc_connected_subchannel_ref(
grpc_connected_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel
@@ -110,7 +111,7 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
/** construct a subchannel call */
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_polling_entity *pollent, gpr_timespec deadline,
+ grpc_polling_entity *pollent, grpc_mdstr *path, gpr_timespec deadline,
grpc_subchannel_call **subchannel_call);
/** process a transport level op */
@@ -166,13 +167,12 @@ struct grpc_subchannel_args {
/** Server name */
const char *server_name;
/** Address to connect to */
- struct sockaddr *addr;
- size_t addr_len;
+ grpc_resolved_address *addr;
};
/** create a subchannel given a connector */
grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
grpc_connector *connector,
- grpc_subchannel_args *args);
+ const grpc_subchannel_args *args);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_H */
diff --git a/src/core/ext/client_config/subchannel_index.c b/src/core/ext/client_channel/subchannel_index.c
index 673f85b8cb..227013a7d7 100644
--- a/src/core/ext/client_config/subchannel_index.c
+++ b/src/core/ext/client_channel/subchannel_index.c
@@ -31,7 +31,7 @@
//
//
-#include "src/core/ext/client_config/subchannel_index.h"
+#include "src/core/ext/client_channel/subchannel_index.h"
#include <stdbool.h>
#include <string.h>
@@ -73,7 +73,7 @@ static grpc_exec_ctx *current_ctx() {
}
static grpc_subchannel_key *create_key(
- grpc_connector *connector, grpc_subchannel_args *args,
+ grpc_connector *connector, const grpc_subchannel_args *args,
grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
k->connector = grpc_connector_ref(connector);
@@ -87,17 +87,17 @@ static grpc_subchannel_key *create_key(
k->args.filters = NULL;
}
k->args.server_name = gpr_strdup(args->server_name);
- k->args.addr_len = args->addr_len;
- k->args.addr = gpr_malloc(args->addr_len);
- if (k->args.addr_len > 0) {
- memcpy(k->args.addr, args->addr, k->args.addr_len);
+ k->args.addr = gpr_malloc(sizeof(grpc_resolved_address));
+ k->args.addr->len = args->addr->len;
+ if (k->args.addr->len > 0) {
+ memcpy(k->args.addr, args->addr, sizeof(grpc_resolved_address));
}
k->args.args = copy_channel_args(args->args);
return k;
}
-grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *connector,
- grpc_subchannel_args *args) {
+grpc_subchannel_key *grpc_subchannel_key_create(
+ grpc_connector *connector, const grpc_subchannel_args *args) {
return create_key(connector, args, grpc_channel_args_normalize);
}
@@ -109,14 +109,14 @@ static int subchannel_key_compare(grpc_subchannel_key *a,
grpc_subchannel_key *b) {
int c = GPR_ICMP(a->connector, b->connector);
if (c != 0) return c;
- c = GPR_ICMP(a->args.addr_len, b->args.addr_len);
+ c = GPR_ICMP(a->args.addr->len, b->args.addr->len);
if (c != 0) return c;
c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
c = strcmp(a->args.server_name, b->args.server_name);
if (c != 0) return c;
- if (a->args.addr_len) {
- c = memcmp(a->args.addr, b->args.addr, a->args.addr_len);
+ if (a->args.addr->len) {
+ c = memcmp(a->args.addr->addr, b->args.addr->addr, a->args.addr->len);
if (c != 0) return c;
}
if (a->args.filter_count > 0) {
diff --git a/src/core/ext/client_config/subchannel_index.h b/src/core/ext/client_channel/subchannel_index.h
index 6b8d063855..a67bd5e219 100644
--- a/src/core/ext/client_config/subchannel_index.h
+++ b/src/core/ext/client_channel/subchannel_index.h
@@ -31,11 +31,11 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_INDEX_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H
-#include "src/core/ext/client_config/connector.h"
-#include "src/core/ext/client_config/subchannel.h"
+#include "src/core/ext/client_channel/connector.h"
+#include "src/core/ext/client_channel/subchannel.h"
/** \file Provides an index of active subchannels so that they can be
shared amongst channels */
@@ -43,8 +43,8 @@
typedef struct grpc_subchannel_key grpc_subchannel_key;
/** Create a key that can be used to uniquely identify a subchannel */
-grpc_subchannel_key *grpc_subchannel_key_create(grpc_connector *con,
- grpc_subchannel_args *args);
+grpc_subchannel_key *grpc_subchannel_key_create(
+ grpc_connector *con, const grpc_subchannel_args *args);
/** Destroy a subchannel key */
void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
@@ -74,4 +74,4 @@ void grpc_subchannel_index_init(void);
/** Shutdown the subchannel index (global) */
void grpc_subchannel_index_shutdown(void);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_SUBCHANNEL_INDEX_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */
diff --git a/src/core/ext/client_config/uri_parser.c b/src/core/ext/client_channel/uri_parser.c
index 3ca1a58e69..bcb6a1dee4 100644
--- a/src/core/ext/client_config/uri_parser.c
+++ b/src/core/ext/client_channel/uri_parser.c
@@ -31,7 +31,7 @@
*
*/
-#include "src/core/ext/client_config/uri_parser.h"
+#include "src/core/ext/client_channel/uri_parser.h"
#include <string.h>
diff --git a/src/core/ext/client_config/uri_parser.h b/src/core/ext/client_channel/uri_parser.h
index 875a7cb07c..5fe0e8f35e 100644
--- a/src/core/ext/client_config/uri_parser.h
+++ b/src/core/ext/client_channel/uri_parser.h
@@ -31,8 +31,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_URI_PARSER_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_URI_PARSER_H
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_URI_PARSER_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_URI_PARSER_H
#include <stddef.h>
@@ -60,4 +60,4 @@ const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key);
/** destroy a uri */
void grpc_uri_destroy(grpc_uri *uri);
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_URI_PARSER_H */
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_URI_PARSER_H */
diff --git a/src/core/ext/client_config/resolver_result.c b/src/core/ext/client_config/resolver_result.c
deleted file mode 100644
index 63480d152b..0000000000
--- a/src/core/ext/client_config/resolver_result.c
+++ /dev/null
@@ -1,94 +0,0 @@
-//
-// Copyright 2015, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#include "src/core/ext/client_config/resolver_result.h"
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/lib/channel/channel_args.h"
-
-struct grpc_resolver_result {
- gpr_refcount refs;
- char* server_name;
- grpc_lb_addresses* addresses;
- char* lb_policy_name;
- grpc_channel_args* lb_policy_args;
-};
-
-grpc_resolver_result* grpc_resolver_result_create(
- const char* server_name, grpc_lb_addresses* addresses,
- const char* lb_policy_name, grpc_channel_args* lb_policy_args) {
- grpc_resolver_result* result = gpr_malloc(sizeof(*result));
- memset(result, 0, sizeof(*result));
- gpr_ref_init(&result->refs, 1);
- result->server_name = gpr_strdup(server_name);
- result->addresses = addresses;
- result->lb_policy_name = gpr_strdup(lb_policy_name);
- result->lb_policy_args = lb_policy_args;
- return result;
-}
-
-void grpc_resolver_result_ref(grpc_resolver_result* result) {
- gpr_ref(&result->refs);
-}
-
-void grpc_resolver_result_unref(grpc_exec_ctx* exec_ctx,
- grpc_resolver_result* result) {
- if (gpr_unref(&result->refs)) {
- gpr_free(result->server_name);
- grpc_lb_addresses_destroy(result->addresses, NULL /* user_data_destroy */);
- gpr_free(result->lb_policy_name);
- grpc_channel_args_destroy(result->lb_policy_args);
- gpr_free(result);
- }
-}
-
-const char* grpc_resolver_result_get_server_name(grpc_resolver_result* result) {
- return result->server_name;
-}
-
-grpc_lb_addresses* grpc_resolver_result_get_addresses(
- grpc_resolver_result* result) {
- return result->addresses;
-}
-
-const char* grpc_resolver_result_get_lb_policy_name(
- grpc_resolver_result* result) {
- return result->lb_policy_name;
-}
-
-grpc_channel_args* grpc_resolver_result_get_lb_policy_args(
- grpc_resolver_result* result) {
- return result->lb_policy_args;
-}
diff --git a/src/core/ext/client_config/resolver_result.h b/src/core/ext/client_config/resolver_result.h
deleted file mode 100644
index 414c2e2482..0000000000
--- a/src/core/ext/client_config/resolver_result.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//
-// Copyright 2015, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-#ifndef GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_RESULT_H
-#define GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_RESULT_H
-
-#include "src/core/ext/client_config/lb_policy_factory.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-
-// TODO(roth, ctiller): In the long term, we are considering replacing
-// the resolver_result data structure with grpc_channel_args. The idea is
-// that the resolver will return a set of channel args that contains the
-// information that is currently in the resolver_result struct. For
-// example, there will be specific args indicating the set of addresses
-// and the name of the LB policy to instantiate. Note that if we did
-// this, we would probably want to change the data structure of
-// grpc_channel_args such to a hash table or AVL or some other data
-// structure that does not require linear search to find keys.
-
-/// Results reported from a grpc_resolver.
-typedef struct grpc_resolver_result grpc_resolver_result;
-
-/// Takes ownership of \a addresses and \a lb_policy_args.
-grpc_resolver_result* grpc_resolver_result_create(
- const char* server_name, grpc_lb_addresses* addresses,
- const char* lb_policy_name, grpc_channel_args* lb_policy_args);
-void grpc_resolver_result_ref(grpc_resolver_result* result);
-void grpc_resolver_result_unref(grpc_exec_ctx* exec_ctx,
- grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-const char* grpc_resolver_result_get_server_name(grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-grpc_lb_addresses* grpc_resolver_result_get_addresses(
- grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-const char* grpc_resolver_result_get_lb_policy_name(
- grpc_resolver_result* result);
-
-/// Caller does NOT take ownership of result.
-grpc_channel_args* grpc_resolver_result_get_lb_policy_args(
- grpc_resolver_result* result);
-
-#endif /* GRPC_CORE_EXT_CLIENT_CONFIG_RESOLVER_RESULT_H */
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index 626f285b90..6da4febf26 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -69,8 +69,8 @@
* possible scenarios:
*
* 1. This is the first server list received. There was no previous instance of
- * the Round Robin policy. \a rr_handover() will instantiate the RR policy
- * and perform all the pending operations over it.
+ * the Round Robin policy. \a rr_handover_locked() will instantiate the RR
+ * policy and perform all the pending operations over it.
* 2. There's already a RR policy instance active. We need to introduce the new
* one build from the new serverlist, but taking care not to disrupt the
* operations in progress over the old RR instance. This is done by
@@ -78,7 +78,7 @@
* references are held on the old RR policy, it'll be destroyed and \a
* glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
* state. At this point we can transition to a new RR instance safely, which
- * is done once again via \a rr_handover().
+ * is done once again via \a rr_handover_locked().
*
*
* Once a RR policy instance is in place (and getting updated as described),
@@ -86,8 +86,8 @@
* forwarding them to the RR instance. Any time there's no RR policy available
* (ie, right after the creation of the gRPCLB policy, if an empty serverlist
* is received, etc), pick/ping requests are added to a list of pending
- * picks/pings to be flushed and serviced as part of \a rr_handover() the moment
- * the RR policy instance becomes available.
+ * picks/pings to be flushed and serviced as part of \a rr_handover_locked() the
+ * moment the RR policy instance becomes available.
*
* \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
* high level design and details. */
@@ -96,6 +96,12 @@
* - Implement LB service forwarding (point 2c. in the doc's diagram).
*/
+/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
+ using that endpoint. Because of various transitive includes in uv.h,
+ including windows.h on Windows, uv.h must be included before other system
+ headers. Therefore, sockaddr.h must always be included first */
+#include "src/core/lib/iomgr/sockaddr.h"
+
#include <errno.h>
#include <string.h>
@@ -107,13 +113,13 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
-#include "src/core/ext/client_config/client_channel_factory.h"
-#include "src/core/ext/client_config/lb_policy_factory.h"
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/parse_address.h"
+#include "src/core/ext/client_channel/client_channel_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
@@ -134,6 +140,9 @@ static void initial_metadata_add_lb_token(
}
typedef struct wrapped_rr_closure_arg {
+ /* the closure instance using this struct as argument */
+ grpc_closure wrapper_closure;
+
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
* calls against the internal RR instance, respectively. */
grpc_closure *wrapped_closure;
@@ -155,9 +164,8 @@ typedef struct wrapped_rr_closure_arg {
/* The RR instance related to the closure */
grpc_lb_policy *rr_policy;
- /* when not NULL, represents a pending_{pick,ping} node to be freed upon
- * closure execution */
- void *owning_pending_node; /* to be freed if not NULL */
+ /* heap memory to be freed upon closure execution. */
+ void *free_when_done;
} wrapped_rr_closure_arg;
/* The \a on_complete closure passed as part of the pick requires keeping a
@@ -183,10 +191,10 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
}
}
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
-
grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
NULL);
- gpr_free(wc_arg->owning_pending_node);
+ GPR_ASSERT(wc_arg->free_when_done != NULL);
+ gpr_free(wc_arg->free_when_done);
}
/* Linked list of pending pick requests. It stores all information needed to
@@ -207,10 +215,6 @@ typedef struct pending_pick {
* upon error. */
grpc_connected_subchannel **target;
- /* a closure wrapping the original on_complete one to be invoked once the
- * pick() has completed (regardless of success) */
- grpc_closure wrapped_on_complete;
-
/* args for wrapped_on_complete */
wrapped_rr_closure_arg wrapped_on_complete_arg;
} pending_pick;
@@ -230,8 +234,9 @@ static void add_pending_pick(pending_pick **root,
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
pick_args->lb_token_mdelem_storage;
- grpc_closure_init(&pp->wrapped_on_complete, wrapped_rr_closure,
- &pp->wrapped_on_complete_arg);
+ pp->wrapped_on_complete_arg.free_when_done = pp;
+ grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
+ wrapped_rr_closure, &pp->wrapped_on_complete_arg);
*root = pp;
}
@@ -239,10 +244,6 @@ static void add_pending_pick(pending_pick **root,
typedef struct pending_ping {
struct pending_ping *next;
- /* a closure wrapping the original on_complete one to be invoked once the
- * ping() has completed (regardless of success) */
- grpc_closure wrapped_notify;
-
/* args for wrapped_notify */
wrapped_rr_closure_arg wrapped_notify_arg;
} pending_ping;
@@ -251,10 +252,11 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pending_ping *pping = gpr_malloc(sizeof(*pping));
memset(pping, 0, sizeof(pending_ping));
memset(&pping->wrapped_notify_arg, 0, sizeof(wrapped_rr_closure_arg));
- pping->next = *root;
- grpc_closure_init(&pping->wrapped_notify, wrapped_rr_closure,
- &pping->wrapped_notify_arg);
pping->wrapped_notify_arg.wrapped_closure = notify;
+ pping->wrapped_notify_arg.free_when_done = pping;
+ pping->next = *root;
+ grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
+ wrapped_rr_closure, &pping->wrapped_notify_arg);
*root = pping;
}
@@ -274,6 +276,7 @@ typedef struct glb_lb_policy {
/** who the client is trying to communicate with */
const char *server_name;
grpc_client_channel_factory *cc_factory;
+ grpc_channel_args *args;
/** deadline for the LB's call */
gpr_timespec deadline;
@@ -307,13 +310,6 @@ typedef struct glb_lb_policy {
/** for tracking of the RR connectivity */
rr_connectivity_data *rr_connectivity;
-
- /* a wrapped (see \a wrapped_rr_closure) on-complete closure for readily
- * available RR picks */
- grpc_closure wrapped_on_complete;
-
- /* arguments for the wrapped_on_complete closure */
- wrapped_rr_closure_arg wc_arg;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@@ -347,6 +343,21 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
return true;
}
+/* vtable for LB tokens in grpc_lb_addresses. */
+static void *lb_token_copy(void *token) {
+ return token == NULL ? NULL : GRPC_MDELEM_REF(token);
+}
+static void lb_token_destroy(void *token) {
+ if (token != NULL) GRPC_MDELEM_UNREF(token);
+}
+static int lb_token_cmp(void *token1, void *token2) {
+ if (token1 > token2) return 1;
+ if (token1 < token2) return -1;
+ return 0;
+}
+static const grpc_lb_user_data_vtable lb_token_vtable = {
+ lb_token_copy, lb_token_destroy, lb_token_cmp};
+
/* Returns addresses extracted from \a serverlist. */
static grpc_lb_addresses *process_serverlist(
const grpc_grpclb_serverlist *serverlist) {
@@ -358,7 +369,8 @@ static grpc_lb_addresses *process_serverlist(
}
if (num_valid == 0) return NULL;
- grpc_lb_addresses *lb_addresses = grpc_lb_addresses_create(num_valid);
+ grpc_lb_addresses *lb_addresses =
+ grpc_lb_addresses_create(num_valid, &lb_token_vtable);
/* second pass: actually populate the addresses and LB tokens (aka user data
* to the outside world) to be read by the RR policy during its creation.
@@ -399,14 +411,14 @@ static grpc_lb_addresses *process_serverlist(
GPR_ARRAY_SIZE(server->load_balance_token) - 1;
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
(uint8_t *)server->load_balance_token, lb_token_size);
- user_data = grpc_mdelem_from_metadata_strings(
- GRPC_MDSTR_LOAD_REPORTING_INITIAL, lb_token_mdstr);
+ user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
+ lb_token_mdstr);
} else {
gpr_log(GPR_ERROR,
"Missing LB token for backend address '%s'. The empty token will "
"be used instead",
- grpc_sockaddr_to_uri((struct sockaddr *)&addr.addr));
- user_data = GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY;
+ grpc_sockaddr_to_uri(&addr));
+ user_data = GRPC_MDELEM_LB_TOKEN_EMPTY;
}
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
@@ -419,45 +431,85 @@ static grpc_lb_addresses *process_serverlist(
return lb_addresses;
}
-/* A plugin for grpc_lb_addresses_destroy that unrefs the LB token metadata. */
-static void lb_token_destroy(void *token) {
- if (token != NULL) GRPC_MDELEM_UNREF(token);
+/* perform a pick over \a rr_policy. Given that a pick can return immediately
+ * (ignoring its completion callback) we need to perform the cleanups this
+ * callback would be otherwise resposible for */
+static bool pick_from_internal_rr_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *rr_policy,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
+ GPR_ASSERT(rr_policy != NULL);
+ const bool pick_done =
+ grpc_lb_policy_pick(exec_ctx, rr_policy, pick_args, target,
+ (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ if (pick_done) {
+ /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
+ if (grpc_lb_glb_trace) {
+ gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick");
+
+ /* add the load reporting initial metadata */
+ initial_metadata_add_lb_token(pick_args->initial_metadata,
+ pick_args->lb_token_mdelem_storage,
+ GRPC_MDELEM_REF(wc_arg->lb_token));
+
+ gpr_free(wc_arg);
+ }
+ /* else, the pending pick will be registered and taken care of by the
+ * pending pick list inside the RR policy (glb_policy->rr_policy).
+ * Eventually, wrapped_on_complete will be called, which will -among other
+ * things- add the LB token to the call's initial metadata */
+
+ return pick_done;
}
-static grpc_lb_policy *create_rr(grpc_exec_ctx *exec_ctx,
- const grpc_grpclb_serverlist *serverlist,
- glb_lb_policy *glb_policy) {
+static grpc_lb_policy *create_rr_locked(
+ grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist,
+ glb_lb_policy *glb_policy) {
GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
+ if (glb_policy->addresses != NULL) {
+ /* dispose of the previous version */
+ grpc_lb_addresses_destroy(glb_policy->addresses);
+ }
+ glb_policy->addresses = process_serverlist(serverlist);
+
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
- args.server_name = glb_policy->server_name;
args.client_channel_factory = glb_policy->cc_factory;
- args.addresses = process_serverlist(serverlist);
- grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
+ // Replace the LB addresses in the channel args that we pass down to
+ // the subchannel.
+ static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
+ const grpc_arg arg =
+ grpc_lb_addresses_create_channel_arg(glb_policy->addresses);
+ args.args = grpc_channel_args_copy_and_add_and_remove(
+ glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
+ 1);
- if (glb_policy->addresses != NULL) {
- /* dispose of the previous version */
- grpc_lb_addresses_destroy(glb_policy->addresses, lb_token_destroy);
- }
- glb_policy->addresses = args.addresses;
+ grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
+ grpc_channel_args_destroy(args.args);
return rr;
}
-static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- grpc_error *error) {
+static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy, grpc_error *error) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
glb_policy->rr_policy =
- create_rr(exec_ctx, glb_policy->serverlist, glb_policy);
+ create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")",
(intptr_t)glb_policy->rr_policy);
}
GPR_ASSERT(glb_policy->rr_policy != NULL);
+ grpc_pollset_set_add_pollset_set(exec_ctx,
+ glb_policy->rr_policy->interested_parties,
+ glb_policy->base.interested_parties);
glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity(
exec_ctx, glb_policy->rr_policy, &error);
grpc_lb_policy_notify_on_state_change(
@@ -478,11 +530,9 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
}
- grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, &pp->pick_args,
- pp->target,
- (void **)&pp->wrapped_on_complete_arg.lb_token,
- &pp->wrapped_on_complete);
- pp->wrapped_on_complete_arg.owning_pending_node = pp;
+ pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
+ &pp->pick_args, pp->target,
+ &pp->wrapped_on_complete_arg);
}
pending_ping *pping;
@@ -495,8 +545,7 @@ static void rr_handover(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
(intptr_t)glb_policy->rr_policy);
}
grpc_lb_policy_ping_one(exec_ctx, glb_policy->rr_policy,
- &pping->wrapped_notify);
- pping->wrapped_notify_arg.owning_pending_node = pping;
+ &pping->wrapped_notify_arg.wrapper_closure);
}
}
@@ -509,13 +558,16 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* a RR policy is shutting down but there's a serverlist available ->
* perform a handover */
- rr_handover(exec_ctx, glb_policy, error);
+ gpr_mu_lock(&glb_policy->mu);
+ rr_handover_locked(exec_ctx, glb_policy, error);
+ gpr_mu_unlock(&glb_policy->mu);
} else {
/* shutting down and no new serverlist available. Bail out. */
gpr_free(rr_conn_data);
}
} else {
if (error == GRPC_ERROR_NONE) {
+ gpr_mu_lock(&glb_policy->mu);
/* RR not shutting down. Mimic the RR's policy state */
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
rr_conn_data->state, GRPC_ERROR_REF(error),
@@ -524,6 +576,7 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_conn_data->state,
&rr_conn_data->on_change);
+ gpr_mu_unlock(&glb_policy->mu);
} else { /* error */
gpr_free(rr_conn_data);
}
@@ -533,6 +586,12 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
+ /* Get server name. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
+ const char *server_name =
+ arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
+
/* Count the number of gRPC-LB addresses. There must be at least one.
* TODO(roth): For now, we ignore non-balancer addresses, but in the
* future, we may change the behavior such that we fall back to using
@@ -540,23 +599,27 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* time, this should be changed to allow a list with no balancer addresses,
* since the resolver might fail to return a balancer address even when
* this is the right LB policy to use. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; ++i) {
- if (args->addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy));
memset(glb_policy, 0, sizeof(*glb_policy));
- /* All input addresses in args->addresses come from a resolver that claims
+ /* All input addresses in addresses come from a resolver that claims
* they are LB services. It's the resolver's responsibility to make sure
* this
* policy is only instantiated and used in that case.
*
* Create a client channel over them to communicate with a LB service */
- glb_policy->server_name = gpr_strdup(args->server_name);
+ glb_policy->server_name = gpr_strdup(server_name);
glb_policy->cc_factory = args->client_channel_factory;
+ glb_policy->args = grpc_channel_args_copy(args->args);
GPR_ASSERT(glb_policy->cc_factory != NULL);
/* construct a target from the addresses in args, given in the form
@@ -564,22 +627,19 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
* TODO(dgq): support mixed ip version */
char **addr_strs = gpr_malloc(sizeof(char *) * num_grpclb_addrs);
size_t addr_index = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
- if (args->addresses->addresses[i].user_data != NULL) {
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
- if (args->addresses->addresses[i].is_balancer) {
+ if (addresses->addresses[i].is_balancer) {
if (addr_index == 0) {
- addr_strs[addr_index++] = grpc_sockaddr_to_uri(
- (const struct sockaddr *)&args->addresses->addresses[i]
- .address.addr);
+ addr_strs[addr_index++] =
+ grpc_sockaddr_to_uri(&addresses->addresses[i].address);
} else {
- GPR_ASSERT(grpc_sockaddr_to_string(
- &addr_strs[addr_index++],
- (const struct sockaddr *)&args->addresses->addresses[i]
- .address.addr,
- true) > 0);
+ GPR_ASSERT(grpc_sockaddr_to_string(&addr_strs[addr_index++],
+ &addresses->addresses[i].address,
+ true) > 0);
}
}
}
@@ -587,10 +647,29 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
char *target_uri_str = gpr_strjoin_sep((const char **)addr_strs,
num_grpclb_addrs, ",", &uri_path_len);
- /* will pick using pick_first */
+ /* Create a channel to talk to the LBs.
+ *
+ * We strip out the channel arg for the LB policy name, since we want
+ * to use the default (pick_first) in this case.
+ *
+ * We also strip out the channel arg for the resolved addresses, since
+ * that will be generated by the name resolver used in the LB channel.
+ * Note that the LB channel will use the sockaddr resolver, so this
+ * won't actually generate a query to DNS (or some other name service).
+ * However, the addresses returned by the sockaddr resolver will have
+ * is_balancer=false, whereas our own addresses have is_balancer=true.
+ * We need the LB channel to return addresses with is_balancer=false
+ * so that it does not wind up recursively using the grpclb LB policy,
+ * as per the special case logic in client_channel.c.
+ */
+ static const char *keys_to_remove[] = {GRPC_ARG_LB_POLICY_NAME,
+ GRPC_ARG_LB_ADDRESSES};
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove));
glb_policy->lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, glb_policy->cc_factory, target_uri_str,
- GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, NULL);
+ GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
+ grpc_channel_args_destroy(new_args);
gpr_free(target_uri_str);
for (size_t i = 0; i < num_grpclb_addrs; i++) {
@@ -623,6 +702,7 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GPR_ASSERT(glb_policy->pending_picks == NULL);
GPR_ASSERT(glb_policy->pending_pings == NULL);
gpr_free((void *)glb_policy->server_name);
+ grpc_channel_args_destroy(glb_policy->args);
grpc_channel_destroy(glb_policy->lb_channel);
glb_policy->lb_channel = NULL;
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
@@ -630,7 +710,7 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
gpr_mu_destroy(&glb_policy->mu);
- grpc_lb_addresses_destroy(glb_policy->addresses, lb_token_destroy);
+ grpc_lb_addresses_destroy(glb_policy->addresses);
gpr_free(glb_policy);
}
@@ -648,15 +728,15 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_NONE, NULL);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify, GRPC_ERROR_NONE,
- NULL);
+ grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_ERROR_NONE, NULL);
pping = next;
}
@@ -686,11 +766,9 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_polling_entity_del_from_pollset_set(
- exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
- exec_ctx, &pp->wrapped_on_complete,
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
} else {
pp->next = glb_policy->pending_picks;
@@ -719,10 +797,8 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_polling_entity_del_from_pollset_set(
- exec_ctx, pp->pick_args.pollent, glb_policy->base.interested_parties);
grpc_exec_ctx_sched(
- exec_ctx, &pp->wrapped_on_complete,
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
} else {
pp->next = glb_policy->pending_picks;
@@ -775,39 +851,20 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
(intptr_t)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- memset(&glb_policy->wc_arg, 0, sizeof(wrapped_rr_closure_arg));
- glb_policy->wc_arg.rr_policy = glb_policy->rr_policy;
- glb_policy->wc_arg.target = target;
- glb_policy->wc_arg.wrapped_closure = on_complete;
- glb_policy->wc_arg.lb_token_mdelem_storage =
- pick_args->lb_token_mdelem_storage;
- glb_policy->wc_arg.initial_metadata = pick_args->initial_metadata;
- glb_policy->wc_arg.owning_pending_node = NULL;
- grpc_closure_init(&glb_policy->wrapped_on_complete, wrapped_rr_closure,
- &glb_policy->wc_arg);
-
- pick_done =
- grpc_lb_policy_pick(exec_ctx, glb_policy->rr_policy, pick_args, target,
- (void **)&glb_policy->wc_arg.lb_token,
- &glb_policy->wrapped_on_complete);
- if (pick_done) {
- /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
- if (grpc_lb_glb_trace) {
- gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
- (intptr_t)glb_policy->wc_arg.rr_policy);
- }
- GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->wc_arg.rr_policy, "glb_pick");
- /* add the load reporting initial metadata */
- initial_metadata_add_lb_token(
- pick_args->initial_metadata, pick_args->lb_token_mdelem_storage,
- GRPC_MDELEM_REF(glb_policy->wc_arg.lb_token));
- }
+ wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
+ memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
+
+ grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg);
+ wc_arg->rr_policy = glb_policy->rr_policy;
+ wc_arg->target = target;
+ wc_arg->wrapped_closure = on_complete;
+ wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
+ wc_arg->initial_metadata = pick_args->initial_metadata;
+ wc_arg->free_when_done = wc_arg;
+ pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
+ pick_args, target, wc_arg);
} else {
- /* else, the pending pick will be registered and taken care of by the
- * pending pick list inside the RR policy (glb_policy->rr_policy) */
- grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
- glb_policy->base.interested_parties);
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
on_complete);
@@ -931,7 +988,7 @@ static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) {
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
- * entities passed to glb_pick(). */
+ * entities from \a client_channel. */
lb_client->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@@ -1076,6 +1133,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* update serverlist */
if (serverlist->num_servers > 0) {
+ gpr_mu_lock(&lb_client->glb_policy->mu);
if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist,
serverlist)) {
if (grpc_lb_glb_trace) {
@@ -1093,7 +1151,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
if (lb_client->glb_policy->rr_policy == NULL) {
/* initial "handover", in this case from a null RR policy, meaning
* it'll just create the first RR policy instance */
- rr_handover(exec_ctx, lb_client->glb_policy, error);
+ rr_handover_locked(exec_ctx, lb_client->glb_policy, error);
} else {
/* unref the RR policy, eventually leading to its substitution with a
* new one constructed from the received serverlist (see
@@ -1101,6 +1159,7 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy,
"serverlist_received");
}
+ gpr_mu_unlock(&lb_client->glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.h b/src/core/ext/lb_policy/grpclb/grpclb.h
index 83552b4fa0..ff23f3a545 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/lb_policy/grpclb/grpclb.h
@@ -34,7 +34,7 @@
#ifndef GRPC_CORE_EXT_LB_POLICY_GRPCLB_GRPCLB_H
#define GRPC_CORE_EXT_LB_POLICY_GRPCLB_GRPCLB_H
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
diff --git a/src/core/ext/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
index c1e73d08ef..079a64a3f3 100644
--- a/src/core/ext/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/lb_policy/grpclb/load_balancer_api.h
@@ -36,7 +36,7 @@
#include <grpc/support/slice_buffer.h>
-#include "src/core/ext/client_config/lb_policy_factory.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
#include "src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h"
#ifdef __cplusplus
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index 961a0c9b19..ac3c6a305a 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -34,12 +34,13 @@
#include <string.h>
#include <grpc/support/alloc.h>
-#include "src/core/ext/client_config/lb_policy_registry.h"
+
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/transport/connectivity_state.h"
typedef struct pending_pick {
struct pending_pick *next;
- grpc_polling_entity *pollent;
uint32_t initial_metadata_flags;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
@@ -119,8 +120,6 @@ static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
pp = next;
@@ -138,8 +137,6 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@@ -168,8 +165,6 @@ static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
@@ -214,7 +209,7 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
/* Check atomically for a selected channel */
grpc_connected_subchannel *selected = GET_SELECTED(p);
if (selected != NULL) {
- *target = selected;
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
}
@@ -223,17 +218,14 @@ static int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
selected = GET_SELECTED(p);
if (selected) {
gpr_mu_unlock(&p->mu);
- *target = selected;
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
return 1;
} else {
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
- p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
- pp->pollent = pick_args->pollent;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
pp->on_complete = on_complete;
@@ -318,9 +310,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target = selected;
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@@ -444,14 +434,22 @@ static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- GPR_ASSERT(args->addresses != NULL);
GPR_ASSERT(args->client_channel_factory != NULL);
+ /* Get server name. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
+ const char *server_name =
+ arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
+
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
- if (!args->addresses->addresses[i].is_balancer) ++num_addrs;
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ if (!addresses->addresses[i].is_balancer) ++num_addrs;
}
if (num_addrs == 0) return NULL;
@@ -462,22 +460,21 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
memset(p->subchannels, 0, sizeof(*p->subchannels) * num_addrs);
grpc_subchannel_args sc_args;
size_t subchannel_idx = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
/* Skip balancer addresses, since we only know how to handle backends. */
- if (args->addresses->addresses[i].is_balancer) continue;
+ if (addresses->addresses[i].is_balancer) continue;
- if (args->addresses->addresses[i].user_data != NULL) {
+ if (addresses->addresses[i].user_data != NULL) {
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
/* server_name will be copied as part of the subchannel creation. This makes
- * the copying of args->server_name (a borrowed pointer) OK. */
- sc_args.server_name = args->server_name;
- sc_args.addr =
- (struct sockaddr *)(&args->addresses->addresses[i].address.addr);
- sc_args.addr_len = args->addresses->addresses[i].address.len;
+ * the copying of server_name (a borrowed pointer) OK. */
+ sc_args.server_name = server_name;
+ sc_args.addr = &addresses->addresses[i].address;
+ sc_args.args = args->args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 930fa86aca..37a9b18b97 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -63,7 +63,8 @@
#include <grpc/support/alloc.h>
-#include "src/core/ext/client_config/lb_policy_registry.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/static_metadata.h"
@@ -78,9 +79,6 @@ int grpc_lb_round_robin_trace = 0;
typedef struct pending_pick {
struct pending_pick *next;
- /* polling entity for the pick()'s async notification */
- grpc_polling_entity *pollent;
-
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
@@ -318,8 +316,6 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
*target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@@ -348,8 +344,6 @@ static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
*pp->target = NULL;
grpc_exec_ctx_sched(
exec_ctx, pp->on_complete,
@@ -403,8 +397,9 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
/* readily available, report right away */
- gpr_mu_unlock(&p->mu);
- *target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "picked");
if (user_data != NULL) {
*user_data = selected->user_data;
@@ -416,17 +411,15 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
/* only advance the last picked pointer if the selection was used */
advance_last_picked_locked(p);
+ gpr_mu_unlock(&p->mu);
return 1;
} else {
/* no pick currently available. Save for later in list of pending picks */
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
- grpc_polling_entity_add_to_pollset_set(exec_ctx, pick_args->pollent,
- p->base.interested_parties);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
- pp->pollent = pick_args->pollent;
pp->target = target;
pp->on_complete = on_complete;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -472,8 +465,9 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
- *pp->target =
- grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
@@ -482,8 +476,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
(void *)selected->subchannel, (void *)selected);
}
- grpc_polling_entity_del_from_pollset_set(exec_ctx, pp->pollent,
- p->base.interested_parties);
grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
gpr_free(pp);
}
@@ -589,7 +581,9 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
- target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
+ target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(selected->subchannel),
+ "picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
} else {
gpr_mu_unlock(&p->mu);
@@ -610,14 +604,22 @@ static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
- GPR_ASSERT(args->addresses != NULL);
GPR_ASSERT(args->client_channel_factory != NULL);
+ /* Get server name. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME);
+ const char *server_name =
+ arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL;
+
/* Find the number of backend addresses. We ignore balancer
* addresses, since we don't know how to handle them. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER);
+ grpc_lb_addresses *addresses = arg->value.pointer.p;
size_t num_addrs = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
- if (!args->addresses->addresses[i].is_balancer) ++num_addrs;
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ if (!addresses->addresses[i].is_balancer) ++num_addrs;
}
if (num_addrs == 0) return NULL;
@@ -630,17 +632,16 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_subchannel_args sc_args;
size_t subchannel_idx = 0;
- for (size_t i = 0; i < args->addresses->num_addresses; i++) {
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
/* Skip balancer addresses, since we only know how to handle backends. */
- if (args->addresses->addresses[i].is_balancer) continue;
+ if (addresses->addresses[i].is_balancer) continue;
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
/* server_name will be copied as part of the subchannel creation. This makes
- * the copying of args->server_name (a borrowed pointer) OK. */
- sc_args.server_name = args->server_name;
- sc_args.addr =
- (struct sockaddr *)(&args->addresses->addresses[i].address.addr);
- sc_args.addr_len = args->addresses->addresses[i].address.len;
+ * the copying of server_name (a borrowed pointer) OK. */
+ sc_args.server_name = server_name;
+ sc_args.addr = &addresses->addresses[i].address;
+ sc_args.args = args->args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
@@ -652,7 +653,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sd->policy = p;
sd->index = subchannel_idx;
sd->subchannel = subchannel;
- sd->user_data = args->addresses->addresses[i].user_data;
+ sd->user_data = addresses->addresses[i].user_data;
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);
diff --git a/src/core/ext/load_reporting/load_reporting.h b/src/core/ext/load_reporting/load_reporting.h
index e37817d8c2..a157844734 100644
--- a/src/core/ext/load_reporting/load_reporting.h
+++ b/src/core/ext/load_reporting/load_reporting.h
@@ -37,13 +37,21 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/channel/channel_stack.h"
-/** Metadata key for initial metadata coming from clients */
-/* TODO(dgq): change to the final value TBD */
-#define GRPC_LOAD_REPORTING_INITIAL_MD_KEY "load-reporting-initial"
+/** Metadata key for the gRPC LB load balancer token.
+ *
+ * The value corresponding to this key is an opaque token that is given to the
+ * frontend as part of each pick; the frontend sends this token to the backend
+ * in each request it sends when using that pick. The token is used by the
+ * backend to verify the request and to allow the backend to report load to the
+ * gRPC LB system. */
+#define GRPC_LB_TOKEN_MD_KEY "lb-token"
-/** Metadata key for trailing metadata from servers */
-/* TODO(dgq): change to the final value TBD */
-#define GRPC_LOAD_REPORTING_TRAILING_MD_KEY "load-reporting-trailing"
+/** Metadata key for gRPC LB cost reporting.
+ *
+ * The value corresponding to this key is an opaque binary blob reported by the
+ * backend as part of its trailing metadata containing cost information for the
+ * call. */
+#define GRPC_LB_COST_MD_KEY "lb-cost-bin"
/** Identifiers for the invocation point of the users LR callback */
typedef enum grpc_load_reporting_source {
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 394f0cb832..eeae2400fb 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -75,7 +75,7 @@ static grpc_mdelem *recv_md_filter(void *user_data, grpc_mdelem *md) {
if (md->key == GRPC_MDSTR_PATH) {
calld->service_method = grpc_mdstr_as_c_string(md->value);
- } else if (md->key == GRPC_MDSTR_LOAD_REPORTING_INITIAL) {
+ } else if (md->key == GRPC_MDSTR_LB_TOKEN) {
calld->initial_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
return NULL;
}
@@ -193,7 +193,7 @@ static grpc_mdelem *lr_trailing_md_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- if (md->key == GRPC_MDSTR_LOAD_REPORTING_TRAILING) {
+ if (md->key == GRPC_MDSTR_LB_COST_BIN) {
calld->trailing_md_string = gpr_strdup(grpc_mdstr_as_c_string(md->value));
return NULL;
}
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index fa33ffd7bd..958b8af8b2 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -37,9 +37,10 @@
#include <grpc/support/host_port.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/http_connect_handshaker.h"
-#include "src/core/ext/client_config/lb_policy_registry.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
@@ -53,12 +54,12 @@
typedef struct {
/** base class: must be first */
grpc_resolver base;
- /** target name */
- char *target_name;
- /** name to resolve (usually the same as target_name) */
+ /** name to resolve */
char *name_to_resolve;
/** default port to use */
char *default_port;
+ /** channel args. */
+ grpc_channel_args *channel_args;
/** mutex guarding the rest of the state */
gpr_mu mu;
@@ -71,9 +72,9 @@ typedef struct {
/** pending next completion, or NULL */
grpc_closure *next_completion;
/** target result address for next completion */
- grpc_resolver_result **target_result;
+ grpc_channel_args **target_result;
/** current (fully resolved) result */
- grpc_resolver_result *resolved_result;
+ grpc_channel_args *resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@@ -94,7 +95,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
@@ -127,7 +128,7 @@ static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
}
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
@@ -162,22 +163,23 @@ static void dns_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
dns_resolver *r = arg;
- grpc_resolver_result *result = NULL;
+ grpc_channel_args *result = NULL;
gpr_mu_lock(&r->mu);
GPR_ASSERT(r->resolving);
r->resolving = false;
if (r->addresses != NULL) {
- grpc_lb_addresses *addresses =
- grpc_lb_addresses_create(r->addresses->naddrs);
+ grpc_lb_addresses *addresses = grpc_lb_addresses_create(
+ r->addresses->naddrs, NULL /* user_data_vtable */);
for (size_t i = 0; i < r->addresses->naddrs; ++i) {
grpc_lb_addresses_set_address(
addresses, i, &r->addresses->addrs[i].addr,
r->addresses->addrs[i].len, false /* is_balancer */,
NULL /* balancer_name */, NULL /* user_data */);
}
+ grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
+ result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
grpc_resolved_addresses_destroy(r->addresses);
- result = grpc_resolver_result_create(r->target_name, addresses,
- NULL /* lb_policy_name */, NULL);
+ grpc_lb_addresses_destroy(addresses);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
@@ -197,8 +199,8 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
grpc_timer_init(exec_ctx, &r->retry_timer, next_try, dns_on_retry_timer, r,
now);
}
- if (r->resolved_result) {
- grpc_resolver_result_unref(exec_ctx, r->resolved_result);
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(r->resolved_result);
}
r->resolved_result = result;
r->resolved_version++;
@@ -222,10 +224,9 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
- *r->target_result = r->resolved_result;
- if (r->resolved_result) {
- grpc_resolver_result_ref(r->resolved_result);
- }
+ *r->target_result = r->resolved_result == NULL
+ ? NULL
+ : grpc_channel_args_copy(r->resolved_result);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
r->published_version = r->resolved_version;
@@ -235,12 +236,12 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
dns_resolver *r = (dns_resolver *)gr;
gpr_mu_destroy(&r->mu);
- if (r->resolved_result) {
- grpc_resolver_result_unref(exec_ctx, r->resolved_result);
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(r->resolved_result);
}
- gpr_free(r->target_name);
gpr_free(r->name_to_resolve);
gpr_free(r->default_port);
+ grpc_channel_args_destroy(r->channel_args);
gpr_free(r);
}
@@ -260,9 +261,14 @@ static grpc_resolver *dns_create(grpc_resolver_args *args,
memset(r, 0, sizeof(*r));
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &dns_resolver_vtable);
- r->target_name = gpr_strdup(path);
r->name_to_resolve = proxy_name == NULL ? gpr_strdup(path) : proxy_name;
r->default_port = gpr_strdup(default_port);
+ grpc_arg server_name_arg;
+ server_name_arg.type = GRPC_ARG_STRING;
+ server_name_arg.key = GRPC_ARG_SERVER_NAME;
+ server_name_arg.value.string = (char *)path;
+ r->channel_args =
+ grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
gpr_backoff_init(&r->backoff_state, BACKOFF_MULTIPLIER, BACKOFF_JITTER,
BACKOFF_MIN_SECONDS * 1000, BACKOFF_MAX_SECONDS * 1000);
return &r->base;
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index 5a7a32d7cb..5fec03a8e4 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -33,6 +33,7 @@
#include <stdbool.h>
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -40,8 +41,10 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
-#include "src/core/ext/client_config/parse_address.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/lb_policy_factory.h"
+#include "src/core/ext/client_channel/parse_address.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
@@ -49,10 +52,10 @@
typedef struct {
/** base class: must be first */
grpc_resolver base;
- /** the path component of the uri passed in */
- char *target_name;
/** the addresses that we've 'resolved' */
grpc_lb_addresses *addresses;
+ /** channel args */
+ grpc_channel_args *channel_args;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** have we published? */
@@ -60,7 +63,7 @@ typedef struct {
/** pending next completion, or NULL */
grpc_closure *next_completion;
/** target result address for next completion */
- grpc_resolver_result **target_result;
+ grpc_channel_args **target_result;
} sockaddr_resolver;
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
@@ -72,7 +75,7 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r);
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
@@ -101,7 +104,7 @@ static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
}
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_resolver_result **target_result,
+ grpc_channel_args **target_result,
grpc_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
@@ -116,10 +119,9 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
sockaddr_resolver *r) {
if (r->next_completion != NULL && !r->published) {
r->published = true;
- *r->target_result = grpc_resolver_result_create(
- r->target_name,
- grpc_lb_addresses_copy(r->addresses, NULL /* user_data_copy */),
- NULL /* lb_policy_name */, NULL);
+ grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
+ *r->target_result =
+ grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
@@ -128,8 +130,8 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
- grpc_lb_addresses_destroy(r->addresses, NULL /* user_data_destroy */);
- gpr_free(r->target_name);
+ grpc_lb_addresses_destroy(r->addresses);
+ grpc_channel_args_destroy(r->channel_args);
gpr_free(r);
}
@@ -149,7 +151,7 @@ static char *ipv6_get_default_authority(grpc_resolver_factory *factory,
return ip_get_default_authority(uri);
}
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
char *unix_get_default_authority(grpc_resolver_factory *factory,
grpc_uri *uri) {
return gpr_strdup("localhost");
@@ -160,8 +162,7 @@ static void do_nothing(void *ignored) {}
static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
int parse(grpc_uri *uri,
- struct sockaddr_storage *dst,
- size_t *len)) {
+ grpc_resolved_address *dst)) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
args->uri->scheme);
@@ -173,17 +174,15 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
gpr_slice_buffer path_parts;
gpr_slice_buffer_init(&path_parts);
gpr_slice_split(path_slice, ",", &path_parts);
- grpc_lb_addresses *addresses = grpc_lb_addresses_create(path_parts.count);
+ grpc_lb_addresses *addresses =
+ grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_uri ith_uri = *args->uri;
char *part_str = gpr_dump_slice(path_parts.slices[i], GPR_DUMP_ASCII);
ith_uri.path = part_str;
- if (!parse(
- &ith_uri,
- (struct sockaddr_storage *)(&addresses->addresses[i].address.addr),
- &addresses->addresses[i].address.len)) {
- errors_found = true;
+ if (!parse(&ith_uri, &addresses->addresses[i].address)) {
+ errors_found = true; /* GPR_TRUE */
}
gpr_free(part_str);
if (errors_found) break;
@@ -191,14 +190,19 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args,
gpr_slice_buffer_destroy(&path_parts);
gpr_slice_unref(path_slice);
if (errors_found) {
- grpc_lb_addresses_destroy(addresses, NULL /* user_data_destroy */);
+ grpc_lb_addresses_destroy(addresses);
return NULL;
}
/* Instantiate resolver. */
sockaddr_resolver *r = gpr_malloc(sizeof(sockaddr_resolver));
memset(r, 0, sizeof(*r));
- r->target_name = gpr_strdup(args->uri->path);
r->addresses = addresses;
+ grpc_arg server_name_arg;
+ server_name_arg.type = GRPC_ARG_STRING;
+ server_name_arg.key = GRPC_ARG_SERVER_NAME;
+ server_name_arg.value.string = args->uri->path;
+ r->channel_args =
+ grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
return &r->base;
@@ -223,7 +227,7 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
static grpc_resolver_factory name##_resolver_factory = { \
&name##_factory_vtable}
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
DECL_FACTORY(unix);
#endif
DECL_FACTORY(ipv4);
@@ -232,7 +236,7 @@ DECL_FACTORY(ipv6);
void grpc_resolver_sockaddr_init(void) {
grpc_register_resolver_type(&ipv4_resolver_factory);
grpc_register_resolver_type(&ipv6_resolver_factory);
-#ifdef GPR_HAVE_UNIX_SOCKET
+#ifdef GRPC_HAVE_UNIX_SOCKET
grpc_register_resolver_type(&unix_resolver_factory);
#endif
}
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.c b/src/core/ext/transport/chttp2/alpn/alpn.c
index 48b0217265..55710dc5ae 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.c
+++ b/src/core/ext/transport/chttp2/alpn/alpn.c
@@ -36,7 +36,7 @@
#include <grpc/support/useful.h>
/* in order of preference */
-static const char *const supported_versions[] = {"h2"};
+static const char *const supported_versions[] = {"grpc-exp", "h2"};
int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size) {
size_t i;
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
index c2b59569fd..71a06e118b 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c
@@ -40,9 +40,9 @@
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/http_connect_handshaker.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/compress_filter.h"
@@ -52,6 +52,10 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
+//
+// connector
+//
+
typedef struct {
grpc_connector base;
gpr_refcount refs;
@@ -150,42 +154,27 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
c->tcp = NULL;
grpc_closure_init(&c->connected, connected, c);
grpc_tcp_client_connect(exec_ctx, &c->connected, &c->tcp,
- args->interested_parties, args->addr, args->addr_len,
- args->deadline);
+ args->interested_parties, args->channel_args,
+ args->addr, args->deadline);
}
static const grpc_connector_vtable connector_vtable = {
connector_ref, connector_unref, connector_shutdown, connector_connect};
-typedef struct {
- grpc_client_channel_factory base;
- gpr_refcount refs;
- grpc_channel_args *merge_args;
-} client_channel_factory;
+//
+// client_channel_factory
+//
static void client_channel_factory_ref(
- grpc_client_channel_factory *cc_factory) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- gpr_ref(&f->refs);
-}
+ grpc_client_channel_factory *cc_factory) {}
static void client_channel_factory_unref(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- if (gpr_unref(&f->refs)) {
- grpc_channel_args_destroy(f->merge_args);
- gpr_free(f);
- }
-}
+ grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {}
static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- grpc_subchannel_args *args) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
+ const grpc_subchannel_args *args) {
connector *c = gpr_malloc(sizeof(*c));
- grpc_channel_args *final_args =
- grpc_channel_args_merge(args->args, f->merge_args);
- grpc_subchannel *s;
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
gpr_ref_init(&c->refs, 1);
@@ -197,23 +186,18 @@ static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_http_connect_handshaker_create(proxy_name, args->server_name));
gpr_free(proxy_name);
}
- args->args = final_args;
- s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_subchannel *s = grpc_subchannel_create(exec_ctx, &c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
- grpc_channel_args_destroy(final_args);
return s;
}
static grpc_channel *client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const char *target, grpc_client_channel_type type,
- grpc_channel_args *args) {
- client_channel_factory *f = (client_channel_factory *)cc_factory;
- grpc_channel_args *final_args = grpc_channel_args_merge(args, f->merge_args);
- grpc_channel *channel = grpc_channel_create(exec_ctx, target, final_args,
- GRPC_CLIENT_CHANNEL, NULL);
- grpc_channel_args_destroy(final_args);
- grpc_resolver *resolver = grpc_resolver_create(target);
+ const grpc_channel_args *args) {
+ grpc_channel *channel =
+ grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
+ grpc_resolver *resolver = grpc_resolver_create(target, args);
if (!resolver) {
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel,
"client_channel_factory_create_channel");
@@ -221,7 +205,7 @@ static grpc_channel *client_channel_factory_create_channel(
}
grpc_client_channel_finish_initialization(
- exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base);
+ exec_ctx, grpc_channel_get_channel_stack(channel), resolver, cc_factory);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel");
return channel;
@@ -232,6 +216,9 @@ static const grpc_client_channel_factory_vtable client_channel_factory_vtable =
client_channel_factory_create_subchannel,
client_channel_factory_create_channel};
+static grpc_client_channel_factory client_channel_factory = {
+ &client_channel_factory_vtable};
+
/* Create a client channel:
Asynchronously: - resolve target
- connect to it (trying alternatives as presented)
@@ -245,16 +232,12 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
(target, args, reserved));
GPR_ASSERT(!reserved);
- client_channel_factory *f = gpr_malloc(sizeof(*f));
- memset(f, 0, sizeof(*f));
- f->base.vtable = &client_channel_factory_vtable;
- gpr_ref_init(&f->refs, 1);
- f->merge_args = grpc_channel_args_copy(args);
-
+ grpc_client_channel_factory *factory =
+ (grpc_client_channel_factory *)&client_channel_factory;
grpc_channel *channel = client_channel_factory_create_channel(
- &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, NULL);
+ &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args);
- grpc_client_channel_factory_unref(&exec_ctx, &f->base);
+ grpc_client_channel_factory_unref(&exec_ctx, factory);
grpc_exec_ctx_finish(&exec_ctx);
return channel != NULL ? channel : grpc_lame_client_channel_create(
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
index b2c5e5b088..1e5b1c22e3 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
@@ -44,6 +44,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/tcp_client_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
@@ -65,9 +66,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
int flags = fcntl(fd, F_GETFL, 0);
GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
- grpc_endpoint *client =
- grpc_tcp_create(grpc_fd_create(fd, "client"),
- GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "fd-client");
+ grpc_endpoint *client = grpc_tcp_client_create_from_fd(
+ &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");
grpc_transport *transport =
grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
index 31c54ff74c..57e1a8ec01 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
@@ -40,9 +40,9 @@
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
-#include "src/core/ext/client_config/client_channel.h"
-#include "src/core/ext/client_config/http_connect_handshaker.h"
-#include "src/core/ext/client_config/resolver_registry.h"
+#include "src/core/ext/client_channel/client_channel.h"
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
@@ -54,6 +54,10 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/tsi/transport_security_interface.h"
+//
+// connector
+//
+
typedef struct {
grpc_connector base;
gpr_refcount refs;
@@ -210,16 +214,19 @@ static void connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *con,
grpc_closure_init(&c->connected_closure, connected, c);
grpc_tcp_client_connect(
exec_ctx, &c->connected_closure, &c->newly_connecting_endpoint,
- args->interested_parties, args->addr, args->addr_len, args->deadline);
+ args->interested_parties, args->channel_args, args->addr, args->deadline);
}
static const grpc_connector_vtable connector_vtable = {
connector_ref, connector_unref, connector_shutdown, connector_connect};
+//
+// client_channel_factory
+//
+
typedef struct {
grpc_client_channel_factory base;
gpr_refcount refs;
- grpc_channel_args *merge_args;
grpc_channel_security_connector *security_connector;
} client_channel_factory;
@@ -235,19 +242,15 @@ static void client_channel_factory_unref(
if (gpr_unref(&f->refs)) {
GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
"client_channel_factory");
- grpc_channel_args_destroy(f->merge_args);
gpr_free(f);
}
}
static grpc_subchannel *client_channel_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- grpc_subchannel_args *args) {
+ const grpc_subchannel_args *args) {
client_channel_factory *f = (client_channel_factory *)cc_factory;
connector *c = gpr_malloc(sizeof(*c));
- grpc_channel_args *final_args =
- grpc_channel_args_merge(args->args, f->merge_args);
- grpc_subchannel *s;
memset(c, 0, sizeof(*c));
c->base.vtable = &connector_vtable;
c->security_connector = f->security_connector;
@@ -261,25 +264,19 @@ static grpc_subchannel *client_channel_factory_create_subchannel(
}
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
- args->args = final_args;
- s = grpc_subchannel_create(exec_ctx, &c->base, args);
+ grpc_subchannel *s = grpc_subchannel_create(exec_ctx, &c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
- grpc_channel_args_destroy(final_args);
return s;
}
static grpc_channel *client_channel_factory_create_channel(
grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
const char *target, grpc_client_channel_type type,
- grpc_channel_args *args) {
+ const grpc_channel_args *args) {
client_channel_factory *f = (client_channel_factory *)cc_factory;
-
- grpc_channel_args *final_args = grpc_channel_args_merge(args, f->merge_args);
- grpc_channel *channel = grpc_channel_create(exec_ctx, target, final_args,
- GRPC_CLIENT_CHANNEL, NULL);
- grpc_channel_args_destroy(final_args);
-
- grpc_resolver *resolver = grpc_resolver_create(target);
+ grpc_channel *channel =
+ grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL);
+ grpc_resolver *resolver = grpc_resolver_create(target, args);
if (resolver != NULL) {
grpc_client_channel_finish_initialization(
exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base);
@@ -289,9 +286,6 @@ static grpc_channel *client_channel_factory_create_channel(
"client_channel_factory_create_channel");
channel = NULL;
}
-
- GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
- "client_channel_factory_create_channel");
return channel;
}
@@ -308,19 +302,13 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
const char *target,
const grpc_channel_args *args,
void *reserved) {
- grpc_arg connector_arg;
- grpc_channel_args *args_copy;
- grpc_channel_args *new_args_from_connector;
- grpc_channel_security_connector *security_connector;
- client_channel_factory *f;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-
GRPC_API_TRACE(
"grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
"reserved=%p)",
4, (creds, target, args, reserved));
GPR_ASSERT(reserved == NULL);
-
+ // Make sure security connector does not already exist in args.
if (grpc_find_security_connector_in_args(args) != NULL) {
gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
grpc_exec_ctx_finish(&exec_ctx);
@@ -328,7 +316,9 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
target, GRPC_STATUS_INTERNAL,
"Security connector exists in channel args.");
}
-
+ // Create security connector and construct new channel args.
+ grpc_channel_security_connector *security_connector;
+ grpc_channel_args *new_args_from_connector;
if (grpc_channel_credentials_create_security_connector(
creds, target, args, &security_connector, &new_args_from_connector) !=
GRPC_SECURITY_OK) {
@@ -336,32 +326,30 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
return grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL, "Failed to create security connector.");
}
-
- connector_arg = grpc_security_connector_to_arg(&security_connector->base);
- args_copy = grpc_channel_args_copy_and_add(
+ grpc_arg connector_arg =
+ grpc_security_connector_to_arg(&security_connector->base);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add(
new_args_from_connector != NULL ? new_args_from_connector : args,
&connector_arg, 1);
-
- f = gpr_malloc(sizeof(*f));
- memset(f, 0, sizeof(*f));
- f->base.vtable = &client_channel_factory_vtable;
- gpr_ref_init(&f->refs, 1);
-
- f->merge_args = grpc_channel_args_copy(args_copy);
- grpc_channel_args_destroy(args_copy);
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(new_args_from_connector);
}
-
+ // Create client channel factory.
+ client_channel_factory *f = gpr_malloc(sizeof(*f));
+ memset(f, 0, sizeof(*f));
+ f->base.vtable = &client_channel_factory_vtable;
+ gpr_ref_init(&f->refs, 1);
GRPC_SECURITY_CONNECTOR_REF(&security_connector->base,
"grpc_secure_channel_create");
f->security_connector = security_connector;
-
+ // Create channel.
grpc_channel *channel = client_channel_factory_create_channel(
- &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, NULL);
-
+ &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
+ // Clean up.
+ GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
+ "client_channel_factory_create_channel");
+ grpc_channel_args_destroy(new_args);
grpc_client_channel_factory_unref(&exec_ctx, &f->base);
grpc_exec_ctx_finish(&exec_ctx);
-
return channel; /* may be NULL */
}
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
index f0e07429fa..d42611b863 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
@@ -139,8 +139,8 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
goto error;
}
- err =
- grpc_tcp_server_create(NULL, grpc_server_get_channel_args(server), &tcp);
+ err = grpc_tcp_server_create(&exec_ctx, NULL,
+ grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) {
goto error;
}
@@ -148,9 +148,7 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
const size_t naddrs = resolved->naddrs;
errors = gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
- errors[i] = grpc_tcp_server_add_port(
- tcp, (struct sockaddr *)&resolved->addrs[i].addr,
- resolved->addrs[i].len, &port_temp);
+ errors[i] = grpc_tcp_server_add_port(tcp, &resolved->addrs[i], &port_temp);
if (errors[i] == GRPC_ERROR_NONE) {
if (port_num == -1) {
port_num = port_temp;
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
index 9af17fb5ae..aa2ecf5743 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
@@ -57,8 +57,12 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
char *name;
gpr_asprintf(&name, "fd:%d", fd);
- grpc_endpoint *server_endpoint = grpc_tcp_create(
- grpc_fd_create(fd, name), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+ grpc_resource_quota *resource_quota = grpc_resource_quota_from_channel_args(
+ grpc_server_get_channel_args(server));
+ grpc_endpoint *server_endpoint =
+ grpc_tcp_create(grpc_fd_create(fd, name), resource_quota,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_free(name);
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index 563271f4f8..7ad687042d 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -271,7 +271,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
memset(server_state, 0, sizeof(*server_state));
grpc_closure_init(&server_state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, server_state);
- err = grpc_tcp_server_create(&server_state->tcp_server_shutdown_complete,
+ err = grpc_tcp_server_create(&exec_ctx,
+ &server_state->tcp_server_shutdown_complete,
grpc_server_get_channel_args(server), &tcp);
if (err != GRPC_ERROR_NONE) {
goto error;
@@ -286,9 +287,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
errors = gpr_malloc(sizeof(*errors) * resolved->naddrs);
for (i = 0; i < resolved->naddrs; i++) {
- errors[i] = grpc_tcp_server_add_port(
- tcp, (struct sockaddr *)&resolved->addrs[i].addr,
- resolved->addrs[i].len, &port_temp);
+ errors[i] = grpc_tcp_server_add_port(tcp, &resolved->addrs[i], &port_temp);
if (errors[i] == GRPC_ERROR_NONE) {
if (port_num == -1) {
port_num = port_temp;
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index 7d5279b9da..bd87253ed3 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -36,14 +36,11 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
-extern int grpc_http_write_state_trace;
-
void grpc_chttp2_plugin_init(void) {
grpc_chttp2_base64_encode_and_huffman_compress =
grpc_chttp2_base64_encode_and_huffman_compress_impl;
grpc_register_tracer("http", &grpc_http_trace);
grpc_register_tracer("flowctl", &grpc_flowctl_trace);
- grpc_register_tracer("http_write_state", &grpc_http_write_state_trace);
}
void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 1dd7fef76f..4a9f806354 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -65,91 +65,73 @@
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
int grpc_http_trace = 0;
int grpc_flowctl_trace = 0;
-int grpc_http_write_state_trace = 0;
-
-#define TRANSPORT_FROM_WRITING(tw) \
- ((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
- writing)))
-
-#define TRANSPORT_FROM_PARSING(tp) \
- ((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
- parsing)))
-
-#define TRANSPORT_FROM_GLOBAL(tg) \
- ((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
- global)))
-
-#define STREAM_FROM_GLOBAL(sg) \
- ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
-
-#define STREAM_FROM_PARSING(sg) \
- ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, parsing)))
static const grpc_transport_vtable vtable;
/* forward declarations of various callbacks that we'll build closures around */
-static void writing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void reading_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void parsing_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void reading_action_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void post_parse_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void initiate_writing_locked(grpc_exec_ctx *exec_ctx, void *t,
+static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
+static void write_action_end(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
-static void initiate_read_flush_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
-static void end_waiting_for_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_error *error);
+static void read_action_begin(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error);
+static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error);
/** Set a transport level setting, and push it to our peer */
static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_setting_id id, uint32_t value);
-/** Start disconnection chain */
-static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error);
-
-/** Cancel a stream: coming from the transport API */
-static void cancel_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *error);
-
-static void close_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *error);
+static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_error *error);
/** Start new streams that have been created if we can */
-static void maybe_start_some_streams(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global);
-
-static void connectivity_state_set(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_connectivity_state state, grpc_error *error, const char *reason);
+static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
-static void check_read_ops(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global);
-
-static void incoming_byte_stream_update_flow_control(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
- size_t have_already);
+static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_connectivity_state state,
+ grpc_error *error, const char *reason);
+
+static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ size_t max_size_hint,
+ size_t have_already);
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored);
static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *error);
-static void set_write_state(grpc_chttp2_transport *t,
- grpc_chttp2_write_state state, const char *reason);
+static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
+ grpc_error *error);
+
+static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+
+static void close_transport_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_error *error);
+static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error);
/*******************************************************************************
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
@@ -161,34 +143,31 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
grpc_endpoint_destroy(exec_ctx, t->ep);
- gpr_slice_buffer_destroy(&t->global.qbuf);
+ gpr_slice_buffer_destroy(&t->qbuf);
- gpr_slice_buffer_destroy(&t->writing.outbuf);
- grpc_chttp2_hpack_compressor_destroy(&t->writing.hpack_compressor);
+ gpr_slice_buffer_destroy(&t->outbuf);
+ grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor);
- gpr_slice_buffer_destroy(&t->parsing.qbuf);
gpr_slice_buffer_destroy(&t->read_buffer);
- grpc_chttp2_hpack_parser_destroy(&t->parsing.hpack_parser);
- grpc_chttp2_goaway_parser_destroy(&t->parsing.goaway_parser);
+ grpc_chttp2_hpack_parser_destroy(&t->hpack_parser);
+ grpc_chttp2_goaway_parser_destroy(&t->goaway_parser);
for (i = 0; i < STREAM_LIST_COUNT; i++) {
GPR_ASSERT(t->lists[i].head == NULL);
GPR_ASSERT(t->lists[i].tail == NULL);
}
- GPR_ASSERT(grpc_chttp2_stream_map_size(&t->parsing_stream_map) == 0);
- GPR_ASSERT(grpc_chttp2_stream_map_size(&t->new_stream_map) == 0);
+ GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0);
- grpc_chttp2_stream_map_destroy(&t->parsing_stream_map);
- grpc_chttp2_stream_map_destroy(&t->new_stream_map);
+ grpc_chttp2_stream_map_destroy(&t->stream_map);
grpc_connectivity_state_destroy(exec_ctx, &t->channel_callback.state_tracker);
- grpc_combiner_destroy(exec_ctx, t->executor.combiner);
+ grpc_combiner_destroy(exec_ctx, t->combiner);
/* callback remaining pings: they're not allowed to call into the transpot,
and maybe they hold resources that need to be freed */
- while (t->global.pings.next != &t->global.pings) {
- grpc_chttp2_outstanding_ping *ping = t->global.pings.next;
+ while (t->pings.next != &t->pings) {
+ grpc_chttp2_outstanding_ping *ping = t->pings.next;
grpc_exec_ctx_sched(exec_ctx, ping->on_recv,
GRPC_ERROR_CREATE("Transport closed"), NULL);
ping->next->prev = ping->prev;
@@ -196,37 +175,40 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
gpr_free(ping);
}
+ while (t->write_cb_pool) {
+ grpc_chttp2_write_cb *next = t->write_cb_pool->next;
+ gpr_free(t->write_cb_pool);
+ t->write_cb_pool = next;
+ }
+
gpr_free(t->peer_string);
gpr_free(t);
}
-/*#define REFCOUNTING_DEBUG 1*/
-#ifdef REFCOUNTING_DEBUG
-#define REF_TRANSPORT(t, r) ref_transport(t, r, __FILE__, __LINE__)
-#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t, r, __FILE__, __LINE__)
-static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- const char *reason, const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2:unref:%p %d->%d %s [%s:%d]", t, t->refs.count,
- t->refs.count - 1, reason, file, line);
+#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line) {
+ gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
+ t->refs.count, t->refs.count - 1, reason, file, line);
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
-static void ref_transport(grpc_chttp2_transport *t, const char *reason,
- const char *file, int line) {
- gpr_log(GPR_DEBUG, "chttp2: ref:%p %d->%d %s [%s:%d]", t, t->refs.count,
- t->refs.count + 1, reason, file, line);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line) {
+ gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t,
+ t->refs.count, t->refs.count + 1, reason, file, line);
gpr_ref(&t->refs);
}
#else
-#define REF_TRANSPORT(t, r) ref_transport(t)
-#define UNREF_TRANSPORT(cl, t, r) unref_transport(cl, t)
-static void unref_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
-static void ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -241,51 +223,46 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
memset(t, 0, sizeof(*t));
t->base.vtable = &vtable;
- t->executor.write_state = GRPC_CHTTP2_WRITES_CORKED;
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
- /* ref is dropped at transport close() */
- gpr_ref_init(&t->shutdown_ep_refs, 1);
- t->executor.combiner = grpc_combiner_create(grpc_endpoint_get_workqueue(ep));
+ t->combiner = grpc_combiner_create(grpc_endpoint_get_workqueue(ep));
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
- t->global.next_stream_id = is_client ? 1 : 2;
- t->global.is_client = is_client;
- t->writing.outgoing_window = DEFAULT_WINDOW;
- t->parsing.incoming_window = DEFAULT_WINDOW;
- t->global.stream_lookahead = DEFAULT_WINDOW;
- t->global.connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
- t->global.ping_counter = 1;
- t->global.pings.next = t->global.pings.prev = &t->global.pings;
- t->parsing.is_client = is_client;
- t->parsing.deframe_state =
- is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
- t->parsing.is_first_frame = true;
- t->writing.is_client = is_client;
+ t->next_stream_id = is_client ? 1 : 2;
+ t->is_client = is_client;
+ t->outgoing_window = DEFAULT_WINDOW;
+ t->incoming_window = DEFAULT_WINDOW;
+ t->stream_lookahead = DEFAULT_WINDOW;
+ t->connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
+ t->ping_counter = 1;
+ t->pings.next = t->pings.prev = &t->pings;
+ t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
+ t->is_first_frame = true;
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
- gpr_slice_buffer_init(&t->global.qbuf);
-
- gpr_slice_buffer_init(&t->writing.outbuf);
- grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor);
- grpc_closure_init(&t->writing_action, writing_action, t);
- grpc_closure_init(&t->reading_action, reading_action, t);
- grpc_closure_init(&t->reading_action_locked, reading_action_locked, t);
- grpc_closure_init(&t->parsing_action, parsing_action, t);
- grpc_closure_init(&t->post_parse_locked, post_parse_locked, t);
- grpc_closure_init(&t->initiate_writing, initiate_writing_locked, t);
- grpc_closure_init(&t->terminate_writing, terminate_writing_with_lock, t);
- grpc_closure_init(&t->initiate_read_flush_locked, initiate_read_flush_locked,
- t);
- grpc_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing,
- &t->writing);
+ gpr_slice_buffer_init(&t->qbuf);
- gpr_slice_buffer_init(&t->parsing.qbuf);
- grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
- grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser);
+ gpr_slice_buffer_init(&t->outbuf);
+ grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
+
+ grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked,
+ t);
+ grpc_closure_init(&t->write_action, write_action, t);
+ grpc_closure_init(&t->write_action_end, write_action_end, t);
+ grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t);
+ grpc_closure_init(&t->read_action_begin, read_action_begin, t);
+ grpc_closure_init(&t->read_action_locked, read_action_locked, t);
+ grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
+ grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
+ grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
+ grpc_closure_init(&t->destructive_reclaimer_locked,
+ destructive_reclaimer_locked, t);
+
+ grpc_chttp2_goaway_parser_init(&t->goaway_parser);
+ grpc_chttp2_hpack_parser_init(&t->hpack_parser);
gpr_slice_buffer_init(&t->read_buffer);
@@ -294,28 +271,24 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
large enough that the exponential growth should happen nicely when it's
needed.
TODO(ctiller): tune this */
- grpc_chttp2_stream_map_init(&t->parsing_stream_map, 8);
- grpc_chttp2_stream_map_init(&t->new_stream_map, 8);
+ grpc_chttp2_stream_map_init(&t->stream_map, 8);
/* copy in initial settings to all setting sets */
for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
- t->parsing.settings[i] = grpc_chttp2_settings_parameters[i].default_value;
for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
- t->global.settings[j][i] =
- grpc_chttp2_settings_parameters[i].default_value;
+ t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
}
}
- t->global.dirtied_local_settings = 1;
+ t->dirtied_local_settings = 1;
/* Hack: it's common for implementations to assume 65536 bytes initial send
window -- this should by rights be 0 */
- t->global.force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
- t->global.sent_local_settings = 0;
+ t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ t->sent_local_settings = 0;
if (is_client) {
- gpr_slice_buffer_add(
- &t->writing.outbuf,
- gpr_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING));
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "initial_write");
+ gpr_slice_buffer_add(&t->outbuf, gpr_slice_from_copied_string(
+ GRPC_CHTTP2_CLIENT_CONNECT_STRING));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "initial_write");
}
/* configure http2 the way we like it */
@@ -330,34 +303,18 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
- if (0 ==
- strcmp(channel_args->args[i].key, GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
- if (is_client) {
- gpr_log(GPR_ERROR, "%s: is ignored on the client",
- GRPC_ARG_MAX_CONCURRENT_STREAMS);
- } else {
- const grpc_integer_options options = {-1, 0, INT_MAX};
- const int value =
- grpc_channel_arg_get_integer(&channel_args->args[i], options);
- if (value >= 0) {
- push_setting(exec_ctx, t,
- GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
- (uint32_t)value);
- }
- }
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
+ if (0 == strcmp(channel_args->args[i].key,
+ GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
const grpc_integer_options options = {-1, 0, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
- if ((t->global.next_stream_id & 1) != (value & 1)) {
+ if ((t->next_stream_id & 1) != (value & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER,
- t->global.next_stream_id & 1,
- is_client ? "client" : "server");
+ t->next_stream_id & 1, is_client ? "client" : "server");
} else {
- t->global.next_stream_id = (uint32_t)value;
+ t->next_stream_id = (uint32_t)value;
}
}
} else if (0 == strcmp(channel_args->args[i].key,
@@ -366,16 +323,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
- t->global.stream_lookahead = (uint32_t)value;
- }
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER)) {
- const grpc_integer_options options = {-1, 0, INT_MAX};
- const int value =
- grpc_channel_arg_get_integer(&channel_args->args[i], options);
- if (value >= 0) {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
- (uint32_t)value);
+ t->stream_lookahead = (uint32_t)value;
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
@@ -383,118 +331,126 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
- grpc_chttp2_hpack_compressor_set_max_usable_size(
- &t->writing.hpack_compressor, (uint32_t)value);
+ grpc_chttp2_hpack_compressor_set_max_usable_size(&t->hpack_compressor,
+ (uint32_t)value);
}
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_MAX_METADATA_SIZE)) {
- const grpc_integer_options options = {-1, 0, INT_MAX};
- const int value =
- grpc_channel_arg_get_integer(&channel_args->args[i], options);
- if (value >= 0) {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
- (uint32_t)value);
- }
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MAX_FRAME_SIZE)) {
- if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
- gpr_log(GPR_ERROR, "%s: must be an integer",
- GRPC_ARG_HTTP2_MAX_FRAME_SIZE);
- } else if (channel_args->args[i].value.integer < 16384 ||
- channel_args->args[i].value.integer > 16777215) {
- gpr_log(GPR_ERROR, "%s: must be between 16384 and 16777215",
- GRPC_ARG_HTTP2_MAX_FRAME_SIZE);
- } else {
- push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
- (uint32_t)channel_args->args[i].value.integer);
+ } else {
+ static const struct {
+ const char *channel_arg_name;
+ grpc_chttp2_setting_id setting_id;
+ grpc_integer_options integer_options;
+ bool availability[2] /* server, client */;
+ } settings_map[] = {
+ {GRPC_ARG_MAX_CONCURRENT_STREAMS,
+ GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
+ {-1, 0, INT_MAX},
+ {true, false}},
+ {GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER,
+ GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
+ {-1, 0, INT_MAX},
+ {true, true}},
+ {GRPC_ARG_MAX_METADATA_SIZE,
+ GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ {-1, 0, INT_MAX},
+ {true, true}},
+ {GRPC_ARG_HTTP2_MAX_FRAME_SIZE,
+ GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ {-1, 16384, 16777215},
+ {true, true}},
+ };
+ for (j = 0; j < (int)GPR_ARRAY_SIZE(settings_map); j++) {
+ if (0 == strcmp(channel_args->args[i].key,
+ settings_map[j].channel_arg_name)) {
+ if (!settings_map[j].availability[is_client]) {
+ gpr_log(GPR_DEBUG, "%s is not available on %s",
+ settings_map[j].channel_arg_name,
+ is_client ? "clients" : "servers");
+ } else {
+ int value = grpc_channel_arg_get_integer(
+ &channel_args->args[i], settings_map[j].integer_options);
+ if (value >= 0) {
+ push_setting(exec_ctx, t, settings_map[j].setting_id,
+ (uint32_t)value);
+ }
+ }
+ break;
+ }
}
}
}
}
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, "uncork");
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "init");
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "init");
+ post_benign_reclaimer(exec_ctx, t);
}
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
t->destroying = 1;
- drop_connection(exec_ctx, t, GRPC_ERROR_CREATE("Transport destroyed"));
- UNREF_TRANSPORT(exec_ctx, t, "destroy");
+ close_transport_locked(
+ exec_ctx, t,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Transport destroyed"),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy");
}
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
+ grpc_combiner_execute(exec_ctx, t->combiner,
grpc_closure_create(destroy_transport_locked, t),
- GRPC_ERROR_NONE);
-}
-
-/** block grpc_endpoint_shutdown being called until a paired
- allow_endpoint_shutdown is made */
-static void prevent_endpoint_shutdown(grpc_chttp2_transport *t) {
- gpr_ref(&t->shutdown_ep_refs);
-}
-
-static void allow_endpoint_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- if (gpr_unref(&t->shutdown_ep_refs)) {
- grpc_endpoint_shutdown(exec_ctx, t->ep);
- }
+ GRPC_ERROR_NONE, false);
}
static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_error *error) {
if (!t->closed) {
- if (grpc_http_write_state_trace) {
- gpr_log(GPR_DEBUG, "W:%p close transport", t);
+ if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) {
+ if (t->close_transport_on_writes_finished == NULL) {
+ t->close_transport_on_writes_finished =
+ GRPC_ERROR_CREATE("Delayed close due to in-progress write");
+ }
+ t->close_transport_on_writes_finished =
+ grpc_error_add_child(t->close_transport_on_writes_finished, error);
+ return;
+ }
+ if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE);
}
t->closed = 1;
- connectivity_state_set(exec_ctx, &t->global, GRPC_CHANNEL_SHUTDOWN,
+ connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
- allow_endpoint_shutdown_locked(exec_ctx, t);
+ grpc_endpoint_shutdown(exec_ctx, t->ep);
/* flush writable stream list to avoid dangling references */
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_writing *stream_writing;
- while (grpc_chttp2_list_pop_writable_stream(
- &t->global, &t->writing, &stream_global, &stream_writing)) {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
+ grpc_chttp2_stream *s;
+ while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close");
}
+ end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
- const char *reason) {
- grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount, reason);
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason) {
+ grpc_stream_ref(s->refcount, reason);
}
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global,
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason) {
- grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount,
- reason);
+ grpc_stream_unref(exec_ctx, s->refcount, reason);
}
#else
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global) {
- grpc_stream_ref(STREAM_FROM_GLOBAL(stream_global)->refcount);
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s) {
+ grpc_stream_ref(s->refcount);
}
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global) {
- grpc_stream_unref(exec_ctx, STREAM_FROM_GLOBAL(stream_global)->refcount);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) {
+ grpc_stream_unref(exec_ctx, s->refcount);
}
#endif
-static void finish_init_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
- grpc_error *error) {
- grpc_chttp2_stream *s = sp;
- grpc_chttp2_register_stream(s->t, s);
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "init");
-}
-
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
const void *server_data) {
@@ -509,41 +465,31 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
/* We reserve one 'active stream' that's dropped when the stream is
read-closed. The others are for incoming_byte_streams that are actively
reading */
- gpr_ref_init(&s->global.active_streams, 1);
- GRPC_CHTTP2_STREAM_REF(&s->global, "chttp2");
+ gpr_ref_init(&s->active_streams, 1);
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2");
- grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_init(&s->parsing.metadata_buffer[1]);
- grpc_chttp2_incoming_metadata_buffer_init(
- &s->global.received_initial_metadata);
- grpc_chttp2_incoming_metadata_buffer_init(
- &s->global.received_trailing_metadata);
- grpc_chttp2_data_parser_init(&s->parsing.data_parser);
- gpr_slice_buffer_init(&s->writing.flow_controlled_buffer);
- s->global.deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1]);
+ grpc_chttp2_data_parser_init(&s->data_parser);
+ gpr_slice_buffer_init(&s->flow_controlled_buffer);
+ s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_closure_init(&s->complete_fetch, complete_fetch, s);
+ grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s);
- REF_TRANSPORT(t, "stream");
+ GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
if (server_data) {
- GPR_ASSERT(t->executor.parsing_active);
- s->global.id = (uint32_t)(uintptr_t)server_data;
- s->parsing.id = s->global.id;
- s->global.outgoing_window =
- t->global.settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- s->parsing.incoming_window = s->global.max_recv_bytes =
- t->global.settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->id = (uint32_t)(uintptr_t)server_data;
+ s->outgoing_window = t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->incoming_window = s->max_recv_bytes =
+ t->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
- grpc_chttp2_stream_map_add(&t->parsing_stream_map, s->global.id, s);
- s->global.in_stream_map = true;
+ grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ post_destructive_reclaimer(exec_ctx, t);
}
- grpc_closure_init(&s->init_stream, finish_init_stream_locked, s);
- GRPC_CHTTP2_STREAM_REF(&s->global, "init");
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &s->init_stream,
- GRPC_ERROR_NONE);
-
GPR_TIMER_END("init_stream", 0);
return 0;
@@ -557,55 +503,39 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GPR_TIMER_BEGIN("destroy_stream", 0);
- GPR_ASSERT((s->global.write_closed && s->global.read_closed) ||
- s->global.id == 0);
- GPR_ASSERT(!s->global.in_stream_map);
- if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
- close_transport_locked(
- exec_ctx, t,
- GRPC_ERROR_CREATE("Last stream closed after sending goaway"));
- }
- if (!t->executor.parsing_active && s->global.id) {
- GPR_ASSERT(grpc_chttp2_stream_map_find(&t->parsing_stream_map,
- s->global.id) == NULL);
+ GPR_ASSERT((s->write_closed && s->read_closed) || s->id == 0);
+ if (s->id != 0) {
+ GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == NULL);
}
- while (
- (bs = grpc_chttp2_incoming_frame_queue_pop(&s->global.incoming_frames))) {
+ while ((bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames))) {
incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
- grpc_chttp2_list_remove_unannounced_incoming_window_available(&t->global,
- &s->global);
- grpc_chttp2_list_remove_stalled_by_transport(&t->global, &s->global);
- grpc_chttp2_list_remove_check_read_ops(&t->global, &s->global);
+ grpc_chttp2_list_remove_stalled_by_transport(t, s);
for (int i = 0; i < STREAM_LIST_COUNT; i++) {
if (s->included[i]) {
gpr_log(GPR_ERROR, "%s stream %d still included in list %d",
- t->global.is_client ? "client" : "server", s->global.id, i);
+ t->is_client ? "client" : "server", s->id, i);
abort();
}
}
- GPR_ASSERT(s->global.send_initial_metadata_finished == NULL);
- GPR_ASSERT(s->global.send_message_finished == NULL);
- GPR_ASSERT(s->global.send_trailing_metadata_finished == NULL);
- GPR_ASSERT(s->global.recv_initial_metadata_ready == NULL);
- GPR_ASSERT(s->global.recv_message_ready == NULL);
- GPR_ASSERT(s->global.recv_trailing_metadata_finished == NULL);
- grpc_chttp2_data_parser_destroy(exec_ctx, &s->parsing.data_parser);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.metadata_buffer[1]);
- grpc_chttp2_incoming_metadata_buffer_destroy(
- &s->global.received_initial_metadata);
- grpc_chttp2_incoming_metadata_buffer_destroy(
- &s->global.received_trailing_metadata);
- gpr_slice_buffer_destroy(&s->writing.flow_controlled_buffer);
- GRPC_ERROR_UNREF(s->global.read_closed_error);
- GRPC_ERROR_UNREF(s->global.write_closed_error);
-
- UNREF_TRANSPORT(exec_ctx, t, "stream");
+ GPR_ASSERT(s->send_initial_metadata_finished == NULL);
+ GPR_ASSERT(s->fetching_send_message == NULL);
+ GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
+ GPR_ASSERT(s->recv_initial_metadata_ready == NULL);
+ GPR_ASSERT(s->recv_message_ready == NULL);
+ GPR_ASSERT(s->recv_trailing_metadata_finished == NULL);
+ grpc_chttp2_data_parser_destroy(exec_ctx, &s->data_parser);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]);
+ grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]);
+ gpr_slice_buffer_destroy(&s->flow_controlled_buffer);
+ GRPC_ERROR_UNREF(s->read_closed_error);
+ GRPC_ERROR_UNREF(s->write_closed_error);
+
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
GPR_TIMER_END("destroy_stream", 0);
@@ -620,280 +550,222 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->destroy_stream_arg = and_free_memory;
grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s);
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &s->destroy_stream,
- GRPC_ERROR_NONE);
+ grpc_combiner_execute(exec_ctx, t->combiner, &s->destroy_stream,
+ GRPC_ERROR_NONE, false);
GPR_TIMER_END("destroy_stream", 0);
}
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
- grpc_chttp2_transport_parsing *transport_parsing, uint32_t id) {
- grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
- grpc_chttp2_stream *s =
- grpc_chttp2_stream_map_find(&t->parsing_stream_map, id);
- return s ? &s->parsing : NULL;
+grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
+ uint32_t id) {
+ return grpc_chttp2_stream_map_find(&t->stream_map, id);
}
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- uint32_t id) {
+grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t id) {
+ if (t->channel_callback.accept_stream == NULL) {
+ return NULL;
+ }
grpc_chttp2_stream *accepting;
- grpc_chttp2_transport *t = TRANSPORT_FROM_PARSING(transport_parsing);
GPR_ASSERT(t->accepting_stream == NULL);
t->accepting_stream = &accepting;
t->channel_callback.accept_stream(exec_ctx,
t->channel_callback.accept_stream_user_data,
&t->base, (void *)(uintptr_t)id);
t->accepting_stream = NULL;
- return &accepting->parsing;
+ return accepting;
}
/*******************************************************************************
- * LOCK MANAGEMENT
+ * OUTPUT PROCESSING
*/
-static const char *write_state_name(grpc_chttp2_write_state state) {
- switch (state) {
- case GRPC_CHTTP2_WRITES_CORKED:
- return "CORKED";
- case GRPC_CHTTP2_WRITING_INACTIVE:
- return "INACTIVE";
- case GRPC_CHTTP2_WRITE_SCHEDULED:
- return "SCHEDULED";
- case GRPC_CHTTP2_WRITING:
+static const char *write_state_name(grpc_chttp2_write_state st) {
+ switch (st) {
+ case GRPC_CHTTP2_WRITE_STATE_IDLE:
+ return "IDLE";
+ case GRPC_CHTTP2_WRITE_STATE_WRITING:
return "WRITING";
- case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
- return "WRITING[p=1]";
- case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
- return "WRITING[p=0]";
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
+ return "WRITING+MORE";
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
+ return "WRITING+MORE+COVERED";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-static void set_write_state(grpc_chttp2_transport *t,
- grpc_chttp2_write_state state, const char *reason) {
- if (grpc_http_write_state_trace) {
- gpr_log(GPR_DEBUG, "W:%p %s -> %s because %s", t,
- write_state_name(t->executor.write_state), write_state_name(state),
- reason);
+static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_write_state st, const char *reason) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
+ t->is_client ? "CLIENT" : "SERVER",
+ write_state_name(t->write_state),
+ write_state_name(st), reason));
+ t->write_state = st;
+ if (st == GRPC_CHTTP2_WRITE_STATE_IDLE &&
+ t->close_transport_on_writes_finished != NULL) {
+ grpc_error *err = t->close_transport_on_writes_finished;
+ t->close_transport_on_writes_finished = NULL;
+ close_transport_locked(exec_ctx, t, err);
}
- t->executor.write_state = state;
-}
-
-static void initiate_writing_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = tp;
- GPR_ASSERT(t->executor.write_state == GRPC_CHTTP2_WRITE_SCHEDULED);
- start_writing(exec_ctx, t);
}
-static void initiate_read_flush_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = tp;
- t->executor.check_read_ops_scheduled = false;
- check_read_ops(exec_ctx, &t->global);
-}
-
-/*******************************************************************************
- * OUTPUT PROCESSING
- */
-
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
- /* Perform state checks, and transition to a scheduled state if appropriate.
- If we are inactive, schedule a write chain to begin once the transport
- combiner finishes any executions in its current batch (which may be
- scheduled AFTER this code executes). The write chain will:
- - call start_writing, which verifies (under the global lock) that there
- are things that need to be written by calling
- grpc_chttp2_unlocking_check_writes, and if so schedules writing_action
- against the current exec_ctx, to be executed OUTSIDE of the global lock
- - eventually writing_action results in grpc_chttp2_terminate_writing being
- called, which re-takes the global lock, updates state, checks if we need
- to do *another* write immediately, and if so loops back to
- start_writing.
-
- Current problems:
- - too much lock entry/exiting
- - the writing thread can become stuck indefinitely (punt through the
- workqueue periodically to fix) */
-
- grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
- switch (t->executor.write_state) {
- case GRPC_CHTTP2_WRITES_CORKED:
+ switch (t->write_state) {
+ case GRPC_CHTTP2_WRITE_STATE_IDLE:
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
+ grpc_combiner_execute_finally(exec_ctx, t->combiner,
+ &t->write_action_begin_locked,
+ GRPC_ERROR_NONE, covered_by_poller);
break;
- case GRPC_CHTTP2_WRITING_INACTIVE:
- set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, reason);
- REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_writing, GRPC_ERROR_NONE,
- covered_by_poller);
+ case GRPC_CHTTP2_WRITE_STATE_WRITING:
+ set_write_state(
+ exec_ctx, t,
+ covered_by_poller
+ ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER
+ : GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
+ reason);
break;
- case GRPC_CHTTP2_WRITE_SCHEDULED:
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
if (covered_by_poller) {
- /* upgrade to note poller is available to cover the write */
- grpc_combiner_force_async_finally(t->executor.combiner);
+ set_write_state(
+ exec_ctx, t,
+ GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
+ reason);
}
break;
- case GRPC_CHTTP2_WRITING:
- set_write_state(t,
- covered_by_poller ? GRPC_CHTTP2_WRITING_STALE_WITH_POLLER
- : GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
- reason);
- break;
- case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
- /* nothing to do: write already requested */
- break;
- case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
- if (covered_by_poller) {
- /* upgrade to note poller is available to cover the write */
- set_write_state(t, GRPC_CHTTP2_WRITING_STALE_WITH_POLLER, reason);
- }
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
break;
}
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
-static void start_writing(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
- GPR_TIMER_BEGIN("start_writing", 0);
- GPR_ASSERT(t->executor.write_state == GRPC_CHTTP2_WRITE_SCHEDULED);
- if (!t->closed &&
- grpc_chttp2_unlocking_check_writes(exec_ctx, &t->global, &t->writing)) {
- set_write_state(t, GRPC_CHTTP2_WRITING, "start_writing");
- prevent_endpoint_shutdown(t);
- grpc_exec_ctx_sched(exec_ctx, &t->writing_action, GRPC_ERROR_NONE, NULL);
- } else {
- if (t->closed) {
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE,
- "start_writing:transport_closed");
- } else {
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE,
- "start_writing:nothing_to_write");
- }
- end_waiting_for_write(exec_ctx, t, GRPC_ERROR_NONE);
- UNREF_TRANSPORT(exec_ctx, t, "writing");
+void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, bool covered_by_poller,
+ const char *reason) {
+ if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
+ grpc_chttp2_initiate_write(exec_ctx, t, covered_by_poller, reason);
}
- GPR_TIMER_END("start_writing", 0);
}
-void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- bool covered_by_poller, const char *reason) {
- if (!TRANSPORT_FROM_GLOBAL(transport_global)->closed &&
- grpc_chttp2_list_add_writable_stream(transport_global, stream_global)) {
- GRPC_CHTTP2_STREAM_REF(stream_global, "chttp2_writing");
- grpc_chttp2_initiate_write(exec_ctx, transport_global, covered_by_poller,
- reason);
+static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
+ grpc_error *error_ignored) {
+ GPR_TIMER_BEGIN("write_action_begin_locked", 0);
+ grpc_chttp2_transport *t = gt;
+ GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
+ if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) {
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ "begin writing");
+ grpc_exec_ctx_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE, NULL);
+ } else {
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "begin writing nothing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
+ GPR_TIMER_END("write_action_begin_locked", 0);
}
-static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_setting_id id, uint32_t value) {
- const grpc_chttp2_setting_parameters *sp =
- &grpc_chttp2_settings_parameters[id];
- uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
- if (use_value != value) {
- gpr_log(GPR_INFO, "Requested parameter %s clamped from %d to %d", sp->name,
- value, use_value);
- }
- if (use_value != t->global.settings[GRPC_LOCAL_SETTINGS][id]) {
- t->global.settings[GRPC_LOCAL_SETTINGS][id] = use_value;
- t->global.dirtied_local_settings = 1;
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "push_setting");
- }
+static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
+ grpc_chttp2_transport *t = gt;
+ GPR_TIMER_BEGIN("write_action", 0);
+ grpc_endpoint_write(exec_ctx, t->ep, &t->outbuf, &t->write_action_end);
+ GPR_TIMER_END("write_action", 0);
}
-/* error may be GRPC_ERROR_NONE if there is no error allocated yet.
- In that case, use "reason" as the text for a new error. */
-static void end_waiting_for_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_error *error) {
- grpc_chttp2_stream_global *stream_global;
- while (grpc_chttp2_list_pop_closed_waiting_for_writing(&t->global,
- &stream_global)) {
- fail_pending_writes(exec_ctx, &t->global, stream_global,
- GRPC_ERROR_REF(error));
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "finish_writes");
- }
- GRPC_ERROR_UNREF(error);
+static void write_action_end(grpc_exec_ctx *exec_ctx, void *gt,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = gt;
+ GPR_TIMER_BEGIN("write_action_end", 0);
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->write_action_end_locked,
+ GRPC_ERROR_REF(error), false);
+ GPR_TIMER_END("write_action_end", 0);
}
-static void terminate_writing_with_lock(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
+static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
grpc_chttp2_transport *t = tp;
- allow_endpoint_shutdown_locked(exec_ctx, t);
if (error != GRPC_ERROR_NONE) {
- drop_connection(exec_ctx, t, GRPC_ERROR_REF(error));
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
}
- grpc_chttp2_cleanup_writing(exec_ctx, &t->global, &t->writing);
+ if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
+ t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
+ if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE("goaway sent"));
+ }
+ }
- end_waiting_for_write(exec_ctx, t, GRPC_ERROR_REF(error));
+ grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error));
- switch (t->executor.write_state) {
- case GRPC_CHTTP2_WRITES_CORKED:
- case GRPC_CHTTP2_WRITING_INACTIVE:
- case GRPC_CHTTP2_WRITE_SCHEDULED:
+ switch (t->write_state) {
+ case GRPC_CHTTP2_WRITE_STATE_IDLE:
GPR_UNREACHABLE_CODE(break);
- case GRPC_CHTTP2_WRITING:
+ case GRPC_CHTTP2_WRITE_STATE_WRITING:
GPR_TIMER_MARK("state=writing", 0);
- set_write_state(t, GRPC_CHTTP2_WRITING_INACTIVE, "terminate_writing");
- break;
- case GRPC_CHTTP2_WRITING_STALE_WITH_POLLER:
- GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
- set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, "terminate_writing");
- REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_writing, GRPC_ERROR_NONE,
- true);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "finish writing");
break;
- case GRPC_CHTTP2_WRITING_STALE_NO_POLLER:
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
- set_write_state(t, GRPC_CHTTP2_WRITE_SCHEDULED, "terminate_writing");
- REF_TRANSPORT(t, "writing");
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_writing, GRPC_ERROR_NONE,
- false);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ "continue writing [!covered]");
+ GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
+ grpc_combiner_execute_finally(exec_ctx, t->combiner,
+ &t->write_action_begin_locked,
+ GRPC_ERROR_NONE, false);
+ break;
+ case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
+ GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ "continue writing [covered]");
+ GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
+ grpc_combiner_execute_finally(exec_ctx, t->combiner,
+ &t->write_action_begin_locked,
+ GRPC_ERROR_NONE, true);
break;
}
- UNREF_TRANSPORT(exec_ctx, t, "writing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
GPR_TIMER_END("terminate_writing_with_lock", 0);
}
-void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
- void *transport_writing, grpc_error *error) {
- GPR_TIMER_BEGIN("grpc_chttp2_terminate_writing", 0);
- grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &t->terminate_writing,
- GRPC_ERROR_REF(error));
- GPR_TIMER_END("grpc_chttp2_terminate_writing", 0);
-}
-
-static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
- grpc_error *error) {
- grpc_chttp2_transport *t = gt;
- GPR_TIMER_BEGIN("writing_action", 0);
- grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
- GPR_TIMER_END("writing_action", 0);
+static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_setting_id id, uint32_t value) {
+ const grpc_chttp2_setting_parameters *sp =
+ &grpc_chttp2_settings_parameters[id];
+ uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
+ if (use_value != value) {
+ gpr_log(GPR_INFO, "Requested parameter %s clamped from %d to %d", sp->name,
+ value, use_value);
+ }
+ if (use_value != t->settings[GRPC_LOCAL_SETTINGS][id]) {
+ t->settings[GRPC_LOCAL_SETTINGS][id] = use_value;
+ t->dirtied_local_settings = 1;
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "push_setting");
+ }
}
-void grpc_chttp2_add_incoming_goaway(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- uint32_t goaway_error, gpr_slice goaway_text) {
+void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t goaway_error,
+ gpr_slice goaway_text) {
char *msg = gpr_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
gpr_slice_unref(goaway_text);
- transport_global->seen_goaway = 1;
+ t->seen_goaway = 1;
/* lie: use transient failure from the transport to indicate goaway has been
* received */
connectivity_state_set(
- exec_ctx, transport_global, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_str(
grpc_error_set_int(GRPC_ERROR_CREATE("GOAWAY received"),
GRPC_ERROR_INT_HTTP2_ERROR,
@@ -903,61 +775,50 @@ void grpc_chttp2_add_incoming_goaway(
gpr_free(msg);
}
-static void maybe_start_some_streams(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global) {
- grpc_chttp2_stream_global *stream_global;
+static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_stream *s;
uint32_t stream_incoming_window;
/* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */
- while (transport_global->next_stream_id <= MAX_CLIENT_STREAM_ID &&
- transport_global->concurrent_stream_count <
- transport_global
- ->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] &&
- grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
- &stream_global)) {
+ while (t->next_stream_id <= MAX_CLIENT_STREAM_ID &&
+ grpc_chttp2_stream_map_size(&t->stream_map) <
+ t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] &&
+ grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
/* safe since we can't (legally) be parsing this stream yet */
- grpc_chttp2_stream_parsing *stream_parsing =
- &STREAM_FROM_GLOBAL(stream_global)->parsing;
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
- transport_global->is_client ? "CLI" : "SVR", stream_global,
- transport_global->next_stream_id));
+ t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
- GPR_ASSERT(stream_global->id == 0);
- stream_global->id = stream_parsing->id = transport_global->next_stream_id;
- transport_global->next_stream_id += 2;
+ GPR_ASSERT(s->id == 0);
+ s->id = t->next_stream_id;
+ t->next_stream_id += 2;
- if (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID) {
- connectivity_state_set(
- exec_ctx, transport_global, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE("Stream IDs exhausted"), "no_more_stream_ids");
+ if (t->next_stream_id >= MAX_CLIENT_STREAM_ID) {
+ connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_CREATE("Stream IDs exhausted"),
+ "no_more_stream_ids");
}
- stream_global->outgoing_window =
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- stream_parsing->incoming_window = stream_incoming_window =
- transport_global->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- stream_global->max_recv_bytes =
- GPR_MAX(stream_incoming_window, stream_global->max_recv_bytes);
- grpc_chttp2_stream_map_add(
- &TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map,
- stream_global->id, STREAM_FROM_GLOBAL(stream_global));
- stream_global->in_stream_map = true;
- transport_global->concurrent_stream_count++;
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global, true,
- "new_stream");
+ s->outgoing_window = t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->incoming_window = stream_incoming_window =
+ t->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ s->max_recv_bytes = GPR_MAX(stream_incoming_window, s->max_recv_bytes);
+ grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+ post_destructive_reclaimer(exec_ctx, t);
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
}
/* cancel out streams that will never be started */
- while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
- grpc_chttp2_list_pop_waiting_for_concurrency(transport_global,
- &stream_global)) {
- cancel_from_api(exec_ctx, transport_global, stream_global,
- grpc_error_set_int(
- GRPC_ERROR_CREATE("Stream IDs exhausted"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
+ while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
+ grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(GRPC_ERROR_CREATE("Stream IDs exhausted"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAVAILABLE));
}
}
@@ -969,48 +830,125 @@ static grpc_closure *add_closure_barrier(grpc_closure *closure) {
return closure;
}
-void grpc_chttp2_complete_closure_step(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
- grpc_error *error) {
+static void null_then_run_closure(grpc_exec_ctx *exec_ctx,
+ grpc_closure **closure, grpc_error *error) {
+ grpc_closure *c = *closure;
+ *closure = NULL;
+ grpc_closure_run(exec_ctx, c, error);
+}
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ grpc_closure **pclosure,
+ grpc_error *error, const char *desc) {
grpc_closure *closure = *pclosure;
+ *pclosure = NULL;
if (closure == NULL) {
GRPC_ERROR_UNREF(error);
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (error != GRPC_ERROR_NONE) {
- if (closure->error == GRPC_ERROR_NONE) {
- closure->error =
+ if (closure->error_data.error == GRPC_ERROR_NONE) {
+ closure->error_data.error =
GRPC_ERROR_CREATE("Error in HTTP transport completing operation");
- closure->error = grpc_error_set_str(
- closure->error, GRPC_ERROR_STR_TARGET_ADDRESS,
- TRANSPORT_FROM_GLOBAL(transport_global)->peer_string);
+ closure->error_data.error =
+ grpc_error_set_str(closure->error_data.error,
+ GRPC_ERROR_STR_TARGET_ADDRESS, t->peer_string);
}
- closure->error = grpc_error_add_child(closure->error, error);
+ closure->error_data.error =
+ grpc_error_add_child(closure->error_data.error, error);
}
if (closure->next_data.scratch < CLOSURE_BARRIER_FIRST_REF_BIT) {
if (closure->next_data.scratch & CLOSURE_BARRIER_STATS_BIT) {
- grpc_transport_move_stats(&stream_global->stats,
- stream_global->collecting_stats);
- stream_global->collecting_stats = NULL;
+ grpc_transport_move_stats(&s->stats, s->collecting_stats);
+ s->collecting_stats = NULL;
}
- grpc_exec_ctx_sched(exec_ctx, closure, closure->error, NULL);
+ grpc_closure_run(exec_ctx, closure, closure->error_data.error);
}
- *pclosure = NULL;
}
-static int contains_non_ok_status(
- grpc_chttp2_transport_global *transport_global,
- grpc_metadata_batch *batch) {
+static bool contains_non_ok_status(grpc_metadata_batch *batch) {
grpc_linked_mdelem *l;
for (l = batch->list.head; l; l = l->next) {
if (l->md->key == GRPC_MDSTR_GRPC_STATUS &&
l->md != GRPC_MDELEM_GRPC_STATUS_0) {
- return 1;
+ return true;
}
}
- return 0;
+ return false;
+}
+
+static void add_fetched_slice_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ s->fetched_send_message_length +=
+ (uint32_t)GPR_SLICE_LENGTH(s->fetching_slice);
+ gpr_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice);
+ if (s->id != 0) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
+ }
+}
+
+static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ for (;;) {
+ if (s->fetching_send_message == NULL) {
+ /* Stream was cancelled before message fetch completed */
+ abort(); /* TODO(ctiller): what cleanup here? */
+ return; /* early out */
+ }
+ if (s->fetched_send_message_length == s->fetching_send_message->length) {
+ int64_t notify_offset = s->next_message_end_offset;
+ if (notify_offset <= s->flow_controlled_bytes_written) {
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE,
+ "fetching_send_message_finished");
+ } else {
+ grpc_chttp2_write_cb *cb = t->write_cb_pool;
+ if (cb == NULL) {
+ cb = gpr_malloc(sizeof(*cb));
+ } else {
+ t->write_cb_pool = cb->next;
+ }
+ cb->call_at_byte = notify_offset;
+ cb->closure = s->fetching_send_message_finished;
+ s->fetching_send_message_finished = NULL;
+ cb->next = s->on_write_finished_cbs;
+ s->on_write_finished_cbs = cb;
+ }
+ s->fetching_send_message = NULL;
+ return; /* early out */
+ } else if (grpc_byte_stream_next(exec_ctx, s->fetching_send_message,
+ &s->fetching_slice, UINT32_MAX,
+ &s->complete_fetch)) {
+ add_fetched_slice_locked(exec_ctx, t, s);
+ }
+ }
+}
+
+static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error) {
+ grpc_chttp2_stream *s = gs;
+ grpc_chttp2_transport *t = s->t;
+ if (error == GRPC_ERROR_NONE) {
+ add_fetched_slice_locked(exec_ctx, t, s);
+ continue_fetching_send_locked(exec_ctx, t, s);
+ } else {
+ /* TODO(ctiller): what to do here */
+ abort();
+ }
+}
+
+static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
+ grpc_error *error) {
+ grpc_chttp2_stream *s = gs;
+ grpc_chttp2_transport *t = s->t;
+ grpc_combiner_execute(exec_ctx, t->combiner, &s->complete_fetch_locked,
+ GRPC_ERROR_REF(error),
+ s->complete_fetch_covered_by_poller);
}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
@@ -1022,12 +960,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_transport_stream_op *op = stream_op;
grpc_chttp2_transport *t = op->transport_private.args[0];
grpc_chttp2_stream *s = op->transport_private.args[1];
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global = &s->global;
if (grpc_http_trace) {
char *str = grpc_transport_stream_op_string(op);
- gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s", str);
+ gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
+ op->on_complete);
gpr_free(str);
}
@@ -1035,45 +972,42 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (on_complete == NULL) {
on_complete = grpc_closure_create(do_nothing, NULL);
}
+
/* use final_data as a barrier until enqueue time; the inital counter is
dropped at the end of this function */
on_complete->next_data.scratch = CLOSURE_BARRIER_FIRST_REF_BIT;
- on_complete->error = GRPC_ERROR_NONE;
+ on_complete->error_data.error = GRPC_ERROR_NONE;
if (op->collect_stats != NULL) {
- GPR_ASSERT(stream_global->collecting_stats == NULL);
- stream_global->collecting_stats = op->collect_stats;
+ GPR_ASSERT(s->collecting_stats == NULL);
+ s->collecting_stats = op->collect_stats;
on_complete->next_data.scratch |= CLOSURE_BARRIER_STATS_BIT;
}
if (op->cancel_error != GRPC_ERROR_NONE) {
- cancel_from_api(exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(op->cancel_error));
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, GRPC_ERROR_REF(op->cancel_error));
}
if (op->close_error != GRPC_ERROR_NONE) {
- close_from_api(exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(op->close_error));
+ close_from_api(exec_ctx, t, s, GRPC_ERROR_REF(op->close_error));
}
if (op->send_initial_metadata != NULL) {
- GPR_ASSERT(stream_global->send_initial_metadata_finished == NULL);
- stream_global->send_initial_metadata_finished =
- add_closure_barrier(on_complete);
- stream_global->send_initial_metadata = op->send_initial_metadata;
+ GPR_ASSERT(s->send_initial_metadata_finished == NULL);
+ s->send_initial_metadata_finished = add_closure_barrier(on_complete);
+ s->send_initial_metadata = op->send_initial_metadata;
const size_t metadata_size =
grpc_metadata_batch_size(op->send_initial_metadata);
const size_t metadata_peer_limit =
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
- if (transport_global->is_client) {
- stream_global->deadline =
- gpr_time_min(stream_global->deadline,
- stream_global->send_initial_metadata->deadline);
+ t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ if (t->is_client) {
+ s->deadline =
+ gpr_time_min(s->deadline, s->send_initial_metadata->deadline);
}
if (metadata_size > metadata_peer_limit) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
grpc_error_set_int(
@@ -1083,64 +1017,83 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
- if (contains_non_ok_status(transport_global, op->send_initial_metadata)) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ if (contains_non_ok_status(op->send_initial_metadata)) {
+ s->seen_error = true;
}
- if (!stream_global->write_closed) {
- if (transport_global->is_client) {
- GPR_ASSERT(stream_global->id == 0);
- grpc_chttp2_list_add_waiting_for_concurrency(transport_global,
- stream_global);
- maybe_start_some_streams(exec_ctx, transport_global);
+ if (!s->write_closed) {
+ if (t->is_client) {
+ if (!t->closed) {
+ GPR_ASSERT(s->id == 0);
+ grpc_chttp2_list_add_waiting_for_concurrency(t, s);
+ maybe_start_some_streams(exec_ctx, t);
+ } else {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s,
+ GRPC_ERROR_CREATE("Transport closed"));
+ }
} else {
- GPR_ASSERT(stream_global->id != 0);
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- true, "op.send_initial_metadata");
+ GPR_ASSERT(s->id != 0);
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
+ "op.send_initial_metadata");
}
} else {
- stream_global->send_trailing_metadata = NULL;
+ s->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_initial_metadata_finished,
+ exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_CREATE(
- "Attempt to send initial metadata after stream was closed"));
+ "Attempt to send initial metadata after stream was closed"),
+ "send_initial_metadata_finished");
}
}
}
if (op->send_message != NULL) {
- GPR_ASSERT(stream_global->send_message_finished == NULL);
- GPR_ASSERT(stream_global->send_message == NULL);
- stream_global->send_message_finished = add_closure_barrier(on_complete);
- if (stream_global->write_closed) {
+ s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
+ if (s->write_closed) {
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_message_finished,
- GRPC_ERROR_CREATE("Attempt to send message after stream was closed"));
+ exec_ctx, t, s, &s->fetching_send_message_finished,
+ GRPC_ERROR_CREATE("Attempt to send message after stream was closed"),
+ "fetching_send_message_finished");
} else {
- stream_global->send_message = op->send_message;
- if (stream_global->id != 0) {
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- true, "op.send_message");
+ GPR_ASSERT(s->fetching_send_message == NULL);
+ uint8_t *frame_hdr =
+ gpr_slice_buffer_tiny_add(&s->flow_controlled_buffer, 5);
+ uint32_t flags = op->send_message->flags;
+ frame_hdr[0] = (flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0;
+ size_t len = op->send_message->length;
+ frame_hdr[1] = (uint8_t)(len >> 24);
+ frame_hdr[2] = (uint8_t)(len >> 16);
+ frame_hdr[3] = (uint8_t)(len >> 8);
+ frame_hdr[4] = (uint8_t)(len);
+ s->fetching_send_message = op->send_message;
+ s->fetched_send_message_length = 0;
+ s->next_message_end_offset = s->flow_controlled_bytes_written +
+ (int64_t)s->flow_controlled_buffer.length +
+ (int64_t)len;
+ s->complete_fetch_covered_by_poller = op->covered_by_poller;
+ if (flags & GRPC_WRITE_BUFFER_HINT) {
+ /* allow up to 64kb to be buffered */
+ /* TODO(ctiller): make this configurable */
+ s->next_message_end_offset -= 65536;
+ }
+ continue_fetching_send_locked(exec_ctx, t, s);
+ if (s->id != 0) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
}
}
}
if (op->send_trailing_metadata != NULL) {
- GPR_ASSERT(stream_global->send_trailing_metadata_finished == NULL);
- stream_global->send_trailing_metadata_finished =
- add_closure_barrier(on_complete);
- stream_global->send_trailing_metadata = op->send_trailing_metadata;
+ GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
+ s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
+ s->send_trailing_metadata = op->send_trailing_metadata;
const size_t metadata_size =
grpc_metadata_batch_size(op->send_trailing_metadata);
const size_t metadata_peer_limit =
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (metadata_size > metadata_peer_limit) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
grpc_error_set_int(
@@ -1150,69 +1103,59 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
- if (contains_non_ok_status(transport_global,
- op->send_trailing_metadata)) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ if (contains_non_ok_status(op->send_trailing_metadata)) {
+ s->seen_error = true;
}
- if (stream_global->write_closed) {
- stream_global->send_trailing_metadata = NULL;
+ if (s->write_closed) {
+ s->send_trailing_metadata = NULL;
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_trailing_metadata_finished,
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
grpc_metadata_batch_is_empty(op->send_trailing_metadata)
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE("Attempt to send trailing metadata after "
- "stream was closed"));
- } else if (stream_global->id != 0) {
+ "stream was closed"),
+ "send_trailing_metadata_finished");
+ } else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- true, "op.send_trailing_metadata");
+ grpc_chttp2_become_writable(exec_ctx, t, s, true,
+ "op.send_trailing_metadata");
}
}
}
if (op->recv_initial_metadata != NULL) {
- GPR_ASSERT(stream_global->recv_initial_metadata_ready == NULL);
- stream_global->recv_initial_metadata_ready =
- op->recv_initial_metadata_ready;
- stream_global->recv_initial_metadata = op->recv_initial_metadata;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ GPR_ASSERT(s->recv_initial_metadata_ready == NULL);
+ s->recv_initial_metadata_ready = op->recv_initial_metadata_ready;
+ s->recv_initial_metadata = op->recv_initial_metadata;
+ grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
if (op->recv_message != NULL) {
- GPR_ASSERT(stream_global->recv_message_ready == NULL);
- stream_global->recv_message_ready = op->recv_message_ready;
- stream_global->recv_message = op->recv_message;
- if (stream_global->id != 0 &&
- (stream_global->incoming_frames.head == NULL ||
- stream_global->incoming_frames.head->is_tail)) {
- incoming_byte_stream_update_flow_control(
- exec_ctx, transport_global, stream_global,
- transport_global->stream_lookahead, 0);
+ GPR_ASSERT(s->recv_message_ready == NULL);
+ s->recv_message_ready = op->recv_message_ready;
+ s->recv_message = op->recv_message;
+ if (s->id != 0 &&
+ (s->incoming_frames.head == NULL || s->incoming_frames.head->is_tail)) {
+ incoming_byte_stream_update_flow_control(exec_ctx, t, s,
+ t->stream_lookahead, 0);
}
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
}
if (op->recv_trailing_metadata != NULL) {
- GPR_ASSERT(stream_global->recv_trailing_metadata_finished == NULL);
- stream_global->recv_trailing_metadata_finished =
- add_closure_barrier(on_complete);
- stream_global->recv_trailing_metadata = op->recv_trailing_metadata;
- stream_global->final_metadata_requested = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ GPR_ASSERT(s->recv_trailing_metadata_finished == NULL);
+ s->recv_trailing_metadata_finished = add_closure_barrier(on_complete);
+ s->recv_trailing_metadata = op->recv_trailing_metadata;
+ s->final_metadata_requested = true;
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
- grpc_chttp2_complete_closure_step(exec_ctx, transport_global, stream_global,
- &on_complete, GRPC_ERROR_NONE);
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &on_complete,
+ GRPC_ERROR_NONE, "op->on_complete");
GPR_TIMER_END("perform_stream_op_locked", 0);
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "perform_stream_op");
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "perform_stream_op");
}
static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@@ -1220,70 +1163,68 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GPR_TIMER_BEGIN("perform_stream_op", 0);
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
+
+ if (grpc_http_trace) {
+ char *str = grpc_transport_stream_op_string(op);
+ gpr_log(GPR_DEBUG, "perform_stream_op[s=%p/%d]: %s", s, s->id, str);
+ gpr_free(str);
+ }
+
grpc_closure_init(&op->transport_private.closure, perform_stream_op_locked,
op);
op->transport_private.args[0] = gt;
op->transport_private.args[1] = gs;
- GRPC_CHTTP2_STREAM_REF(&s->global, "perform_stream_op");
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
- &op->transport_private.closure, GRPC_ERROR_NONE);
+ GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
+ grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
+ GRPC_ERROR_NONE, op->covered_by_poller);
GPR_TIMER_END("perform_stream_op", 0);
}
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_closure *on_recv) {
grpc_chttp2_outstanding_ping *p = gpr_malloc(sizeof(*p));
- p->next = &t->global.pings;
+ p->next = &t->pings;
p->prev = p->next->prev;
p->prev->next = p->next->prev = p;
- p->id[0] = (uint8_t)((t->global.ping_counter >> 56) & 0xff);
- p->id[1] = (uint8_t)((t->global.ping_counter >> 48) & 0xff);
- p->id[2] = (uint8_t)((t->global.ping_counter >> 40) & 0xff);
- p->id[3] = (uint8_t)((t->global.ping_counter >> 32) & 0xff);
- p->id[4] = (uint8_t)((t->global.ping_counter >> 24) & 0xff);
- p->id[5] = (uint8_t)((t->global.ping_counter >> 16) & 0xff);
- p->id[6] = (uint8_t)((t->global.ping_counter >> 8) & 0xff);
- p->id[7] = (uint8_t)(t->global.ping_counter & 0xff);
+ p->id[0] = (uint8_t)((t->ping_counter >> 56) & 0xff);
+ p->id[1] = (uint8_t)((t->ping_counter >> 48) & 0xff);
+ p->id[2] = (uint8_t)((t->ping_counter >> 40) & 0xff);
+ p->id[3] = (uint8_t)((t->ping_counter >> 32) & 0xff);
+ p->id[4] = (uint8_t)((t->ping_counter >> 24) & 0xff);
+ p->id[5] = (uint8_t)((t->ping_counter >> 16) & 0xff);
+ p->id[6] = (uint8_t)((t->ping_counter >> 8) & 0xff);
+ p->id[7] = (uint8_t)(t->ping_counter & 0xff);
+ t->ping_counter++;
p->on_recv = on_recv;
- gpr_slice_buffer_add(&t->global.qbuf, grpc_chttp2_ping_create(0, p->id));
- grpc_chttp2_initiate_write(exec_ctx, &t->global, true, "send_ping");
+ gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_ping_create(0, p->id));
+ grpc_chttp2_initiate_write(exec_ctx, t, true, "send_ping");
}
-typedef struct ack_ping_args {
- grpc_closure closure;
- grpc_chttp2_transport *t;
- uint8_t opaque_8bytes[8];
-} ack_ping_args;
-
-static void ack_ping_locked(grpc_exec_ctx *exec_ctx, void *a,
- grpc_error *error_ignored) {
- ack_ping_args *args = a;
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ const uint8_t *opaque_8bytes) {
grpc_chttp2_outstanding_ping *ping;
- grpc_chttp2_transport_global *transport_global = &args->t->global;
- for (ping = transport_global->pings.next; ping != &transport_global->pings;
- ping = ping->next) {
- if (0 == memcmp(args->opaque_8bytes, ping->id, 8)) {
+ for (ping = t->pings.next; ping != &t->pings; ping = ping->next) {
+ if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL);
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
- break;
+ return;
}
}
- UNREF_TRANSPORT(exec_ctx, args->t, "ack_ping");
- gpr_free(args);
+ char *msg = gpr_dump((const char *)opaque_8bytes, 8, GPR_DUMP_HEX);
+ char *from = grpc_endpoint_get_peer(t->ep);
+ gpr_log(GPR_DEBUG, "Unknown ping response from %s: %s", from, msg);
+ gpr_free(from);
+ gpr_free(msg);
}
-void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_parsing *transport_parsing,
- const uint8_t *opaque_8bytes) {
- ack_ping_args *args = gpr_malloc(sizeof(*args));
- args->t = TRANSPORT_FROM_PARSING(transport_parsing);
- memcpy(args->opaque_8bytes, opaque_8bytes, sizeof(args->opaque_8bytes));
- grpc_closure_init(&args->closure, ack_ping_locked, args);
- REF_TRANSPORT(args->t, "ack_ping");
- grpc_combiner_execute(exec_ctx, args->t->executor.combiner, &args->closure,
- GRPC_ERROR_NONE);
+static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_error_code error, gpr_slice data) {
+ t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
+ grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)error, data,
+ &t->qbuf);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
}
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
@@ -1293,15 +1234,6 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t = op->transport_private.args[0];
grpc_error *close_transport = op->disconnect_with_error;
- /* If there's a set_accept_stream ensure that we're not parsing
- to avoid changing things out from underneath */
- if (t->executor.parsing_active && op->set_accept_stream) {
- GPR_ASSERT(t->post_parsing_op == NULL);
- t->post_parsing_op = gpr_malloc(sizeof(*op));
- memcpy(t->post_parsing_op, op, sizeof(*op));
- return;
- }
-
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
@@ -1309,15 +1241,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_goaway) {
- t->global.sent_goaway = 1;
- grpc_chttp2_goaway_append(
- t->global.last_incoming_stream_id,
- (uint32_t)grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
- gpr_slice_ref(*op->goaway_message), &t->global.qbuf);
- close_transport = grpc_chttp2_has_streams(t)
- ? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE("GOAWAY sent");
- grpc_chttp2_initiate_write(exec_ctx, &t->global, false, "goaway_sent");
+ send_goaway(exec_ctx, t,
+ grpc_chttp2_grpc_status_to_http2_error(op->goaway_status),
+ gpr_slice_ref(*op->goaway_message));
}
if (op->set_accept_stream) {
@@ -1342,154 +1268,131 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
close_transport_locked(exec_ctx, t, close_transport);
}
- grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+ grpc_closure_run(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
- UNREF_TRANSPORT(exec_ctx, t, "transport_op");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op");
}
static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_transport_op *op) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+ char *msg = grpc_transport_op_string(op);
+ gpr_free(msg);
op->transport_private.args[0] = gt;
grpc_closure_init(&op->transport_private.closure, perform_transport_op_locked,
op);
- REF_TRANSPORT(t, "transport_op");
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
- &op->transport_private.closure, GRPC_ERROR_NONE);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
+ grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
+ GRPC_ERROR_NONE, false);
}
/*******************************************************************************
* INPUT PROCESSING - GENERAL
*/
-static void check_read_ops(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global) {
- GPR_TIMER_BEGIN("check_read_ops", 0);
- grpc_chttp2_stream_global *stream_global;
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
grpc_byte_stream *bs;
- while (
- grpc_chttp2_list_pop_check_read_ops(transport_global, &stream_global)) {
- if (stream_global->recv_initial_metadata_ready != NULL &&
- stream_global->published_initial_metadata) {
- if (stream_global->seen_error) {
- while ((bs = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames)) != NULL) {
- incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
- }
- if (stream_global->exceeded_metadata_size) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
- grpc_error_set_int(
- GRPC_ERROR_CREATE(
- "received initial metadata size exceeds limit"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
- }
+ if (s->recv_initial_metadata_ready != NULL &&
+ s->published_metadata[0] != GRPC_METADATA_NOT_PUBLISHED) {
+ if (s->seen_error) {
+ while ((bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames)) !=
+ NULL) {
+ incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
- grpc_chttp2_incoming_metadata_buffer_publish(
- &stream_global->received_initial_metadata,
- stream_global->recv_initial_metadata);
- grpc_exec_ctx_sched(exec_ctx, stream_global->recv_initial_metadata_ready,
- GRPC_ERROR_NONE, NULL);
- stream_global->recv_initial_metadata_ready = NULL;
}
- if (stream_global->recv_message_ready != NULL) {
- while (stream_global->final_metadata_requested &&
- stream_global->seen_error &&
- (bs = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames)) != NULL) {
+ grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[0],
+ s->recv_initial_metadata);
+ null_then_run_closure(exec_ctx, &s->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
+ }
+}
+
+void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ grpc_byte_stream *bs;
+ if (s->recv_message_ready != NULL) {
+ while (s->final_metadata_requested && s->seen_error &&
+ (bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames)) !=
+ NULL) {
+ incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
+ }
+ if (s->incoming_frames.head != NULL) {
+ *s->recv_message =
+ grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames);
+ GPR_ASSERT(*s->recv_message != NULL);
+ null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE);
+ } else if (s->published_metadata[1] != GRPC_METADATA_NOT_PUBLISHED) {
+ *s->recv_message = NULL;
+ null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE);
+ }
+ }
+}
+
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ grpc_byte_stream *bs;
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+ if (s->recv_trailing_metadata_finished != NULL && s->read_closed &&
+ s->write_closed) {
+ if (s->seen_error) {
+ while ((bs = grpc_chttp2_incoming_frame_queue_pop(&s->incoming_frames)) !=
+ NULL) {
incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
}
- if (stream_global->incoming_frames.head != NULL) {
- *stream_global->recv_message = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames);
- GPR_ASSERT(*stream_global->recv_message != NULL);
- grpc_exec_ctx_sched(exec_ctx, stream_global->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
- stream_global->recv_message_ready = NULL;
- } else if (stream_global->published_trailing_metadata) {
- *stream_global->recv_message = NULL;
- grpc_exec_ctx_sched(exec_ctx, stream_global->recv_message_ready,
- GRPC_ERROR_NONE, NULL);
- stream_global->recv_message_ready = NULL;
- }
}
- if (stream_global->recv_trailing_metadata_finished != NULL &&
- stream_global->read_closed && stream_global->write_closed) {
- if (stream_global->seen_error) {
- while ((bs = grpc_chttp2_incoming_frame_queue_pop(
- &stream_global->incoming_frames)) != NULL) {
- incoming_byte_stream_destroy_locked(exec_ctx, bs, GRPC_ERROR_NONE);
- }
- if (stream_global->exceeded_metadata_size) {
- cancel_from_api(
- exec_ctx, transport_global, stream_global,
- grpc_error_set_int(
- GRPC_ERROR_CREATE(
- "received trailing metadata size exceeds limit"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
- }
- }
- if (stream_global->all_incoming_byte_streams_finished) {
- grpc_chttp2_incoming_metadata_buffer_publish(
- &stream_global->received_trailing_metadata,
- stream_global->recv_trailing_metadata);
- grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->recv_trailing_metadata_finished, GRPC_ERROR_NONE);
- }
+ if (s->all_incoming_byte_streams_finished &&
+ s->recv_trailing_metadata_finished != NULL) {
+ grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[1],
+ s->recv_trailing_metadata);
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE,
+ "recv_trailing_metadata_finished");
}
}
- GPR_TIMER_END("check_read_ops", 0);
}
-static void decrement_active_streams_locked(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- if ((stream_global->all_incoming_byte_streams_finished =
- gpr_unref(&stream_global->active_streams))) {
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+static void decrement_active_streams_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ if ((s->all_incoming_byte_streams_finished = gpr_unref(&s->active_streams))) {
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
}
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
- size_t new_stream_count;
- grpc_chttp2_stream *s =
- grpc_chttp2_stream_map_delete(&t->parsing_stream_map, id);
- if (!s) {
- s = grpc_chttp2_stream_map_delete(&t->new_stream_map, id);
- }
+ grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
- s->global.in_stream_map = false;
- if (t->parsing.incoming_stream == &s->parsing) {
- t->parsing.incoming_stream = NULL;
- grpc_chttp2_parsing_become_skip_parser(exec_ctx, &t->parsing);
+ if (t->incoming_stream == s) {
+ t->incoming_stream = NULL;
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
}
- if (s->parsing.data_parser.parsing_frame != NULL) {
+ if (s->data_parser.parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
- exec_ctx, s->parsing.data_parser.parsing_frame, GRPC_ERROR_REF(error),
- 0);
- s->parsing.data_parser.parsing_frame = NULL;
+ exec_ctx, s->data_parser.parsing_frame, GRPC_ERROR_REF(error));
+ s->data_parser.parsing_frame = NULL;
}
- if (grpc_chttp2_unregister_stream(t, s) && t->global.sent_goaway) {
- close_transport_locked(
- exec_ctx, t, GRPC_ERROR_CREATE_REFERENCING(
- "Last stream closed after sending GOAWAY", &error, 1));
+ if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ post_benign_reclaimer(exec_ctx, t);
+ if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
+ close_transport_locked(
+ exec_ctx, t,
+ GRPC_ERROR_CREATE_REFERENCING(
+ "Last stream closed after sending GOAWAY", &error, 1));
+ }
}
- if (grpc_chttp2_list_remove_writable_stream(&t->global, &s->global)) {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &s->global, "chttp2_writing");
+ if (grpc_chttp2_list_remove_writable_stream(t, s)) {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream");
}
- new_stream_count = grpc_chttp2_stream_map_size(&t->parsing_stream_map) +
- grpc_chttp2_stream_map_size(&t->new_stream_map);
- GPR_ASSERT(new_stream_count <= UINT32_MAX);
- if (new_stream_count != t->global.concurrent_stream_count) {
- t->global.concurrent_stream_count = (uint32_t)new_stream_count;
- maybe_start_some_streams(exec_ctx, &t->global);
- }
GRPC_ERROR_UNREF(error);
+
+ maybe_start_some_streams(exec_ctx, t);
}
static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
@@ -1519,23 +1422,20 @@ static void status_codes_from_error(grpc_error *error, gpr_timespec deadline,
}
}
-static void cancel_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *due_to_error) {
- if (!stream_global->read_closed || !stream_global->write_closed) {
+void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ grpc_error *due_to_error) {
+ if (!s->read_closed || !s->write_closed) {
grpc_status_code grpc_status;
grpc_chttp2_error_code http_error;
- status_codes_from_error(due_to_error, stream_global->deadline, &http_error,
+ status_codes_from_error(due_to_error, s->deadline, &http_error,
&grpc_status);
- if (stream_global->id != 0) {
+ if (s->id != 0) {
gpr_slice_buffer_add(
- &transport_global->qbuf,
- grpc_chttp2_rst_stream_create(stream_global->id, (uint32_t)http_error,
- &stream_global->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "rst_stream");
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
+ &s->stats.outgoing));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "rst_stream");
}
const char *msg =
@@ -1546,27 +1446,21 @@ static void cancel_from_api(grpc_exec_ctx *exec_ctx,
msg = grpc_error_string(due_to_error);
}
gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
- grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
- grpc_status, &msg_slice);
+ grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
if (free_msg) grpc_error_free_string(msg);
}
- if (due_to_error != GRPC_ERROR_NONE && !stream_global->seen_error) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
+ s->seen_error = true;
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
- 1, due_to_error);
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, due_to_error);
}
-void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_status_code status, gpr_slice *slice) {
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_status_code status,
+ gpr_slice *slice) {
if (status != GRPC_STATUS_OK) {
- stream_global->seen_error = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ s->seen_error = true;
}
/* stream_global->recv_trailing_metadata_finished gives us a
last chance replacement: we've received trailing metadata,
@@ -1574,24 +1468,23 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
to the upper layers - drop what we've got, and then publish
what we want - which is safe because we haven't told anyone
about the metadata yet */
- if (!stream_global->published_trailing_metadata ||
- stream_global->recv_trailing_metadata_finished != NULL) {
+ if (s->published_metadata[1] == GRPC_METADATA_NOT_PUBLISHED ||
+ s->recv_trailing_metadata_finished != NULL) {
char status_string[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status, status_string);
grpc_chttp2_incoming_metadata_buffer_add(
- &stream_global->received_trailing_metadata,
+ &s->metadata_buffer[1],
grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_GRPC_STATUS, grpc_mdstr_from_string(status_string)));
if (slice) {
grpc_chttp2_incoming_metadata_buffer_add(
- &stream_global->received_trailing_metadata,
+ &s->metadata_buffer[1],
grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_GRPC_MESSAGE,
grpc_mdstr_from_slice(gpr_slice_ref(*slice))));
}
- stream_global->published_trailing_metadata = true;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
+ s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
if (slice) {
gpr_slice_unref(*slice);
@@ -1609,91 +1502,88 @@ static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) {
++*nrefs;
}
-static grpc_error *removal_error(grpc_error *extra_error,
- grpc_chttp2_stream_global *stream_global) {
+static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
+ const char *master_error_msg) {
grpc_error *refs[3];
size_t nrefs = 0;
- add_error(stream_global->read_closed_error, refs, &nrefs);
- add_error(stream_global->write_closed_error, refs, &nrefs);
+ add_error(s->read_closed_error, refs, &nrefs);
+ add_error(s->write_closed_error, refs, &nrefs);
add_error(extra_error, refs, &nrefs);
grpc_error *error = GRPC_ERROR_NONE;
if (nrefs > 0) {
- error = GRPC_ERROR_CREATE_REFERENCING("Failed due to stream removal", refs,
- nrefs);
+ error = GRPC_ERROR_CREATE_REFERENCING(master_error_msg, refs, nrefs);
}
GRPC_ERROR_UNREF(extra_error);
return error;
}
static void fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_error *error) {
- error = removal_error(error, stream_global);
- stream_global->send_message = NULL;
+ error =
+ removal_error(error, s, "Pending writes failed due to stream closure");
+ s->fetching_send_message = NULL;
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_REF(error),
+ "send_initial_metadata_finished");
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_initial_metadata_finished, GRPC_ERROR_REF(error));
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
+ GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_trailing_metadata_finished, GRPC_ERROR_REF(error));
- grpc_chttp2_complete_closure_step(exec_ctx, transport_global, stream_global,
- &stream_global->send_message_finished,
- error);
+ exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
+ "fetching_send_message_finished");
+ while (s->on_write_finished_cbs) {
+ grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
+ s->on_write_finished_cbs = cb->next;
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
+ GRPC_ERROR_REF(error),
+ "on_write_finished_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+ }
+ GRPC_ERROR_UNREF(error);
}
-void grpc_chttp2_mark_stream_closed(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
- grpc_error *error) {
- if (stream_global->read_closed && stream_global->write_closed) {
+void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int close_reads,
+ int close_writes, grpc_error *error) {
+ if (s->read_closed && s->write_closed) {
/* already closed */
GRPC_ERROR_UNREF(error);
return;
}
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- if (close_reads && !stream_global->read_closed) {
- stream_global->read_closed_error = GRPC_ERROR_REF(error);
- stream_global->read_closed = true;
- stream_global->published_initial_metadata = true;
- stream_global->published_trailing_metadata = true;
- decrement_active_streams_locked(exec_ctx, transport_global, stream_global);
- }
- if (close_writes && !stream_global->write_closed) {
- stream_global->write_closed_error = GRPC_ERROR_REF(error);
- stream_global->write_closed = true;
- if (TRANSPORT_FROM_GLOBAL(transport_global)->executor.write_state !=
- GRPC_CHTTP2_WRITING_INACTIVE) {
- GRPC_CHTTP2_STREAM_REF(stream_global, "finish_writes");
- grpc_chttp2_list_add_closed_waiting_for_writing(transport_global,
- stream_global);
- } else {
- fail_pending_writes(exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(error));
- }
- }
- if (stream_global->read_closed && stream_global->write_closed) {
- if (stream_global->id != 0 &&
- TRANSPORT_FROM_GLOBAL(transport_global)->executor.parsing_active) {
- grpc_chttp2_list_add_closed_waiting_for_parsing(transport_global,
- stream_global);
- } else {
- if (stream_global->id != 0) {
- remove_stream(exec_ctx, TRANSPORT_FROM_GLOBAL(transport_global),
- stream_global->id,
- removal_error(GRPC_ERROR_REF(error), stream_global));
+ if (close_reads && !s->read_closed) {
+ s->read_closed_error = GRPC_ERROR_REF(error);
+ s->read_closed = true;
+ for (int i = 0; i < 2; i++) {
+ if (s->published_metadata[i] == GRPC_METADATA_NOT_PUBLISHED) {
+ s->published_metadata[i] = GPRC_METADATA_PUBLISHED_AT_CLOSE;
}
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
}
+ decrement_active_streams_locked(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+ }
+ if (close_writes && !s->write_closed) {
+ s->write_closed_error = GRPC_ERROR_REF(error);
+ s->write_closed = true;
+ fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error));
+ grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+ }
+ if (s->read_closed && s->write_closed) {
+ if (s->id != 0) {
+ remove_stream(exec_ctx, t, s->id,
+ removal_error(GRPC_ERROR_REF(error), s, "Stream removed"));
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2");
}
GRPC_ERROR_UNREF(error);
}
-static void close_from_api(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- grpc_error *error) {
+static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_error *error) {
gpr_slice hdr;
gpr_slice status_hdr;
gpr_slice message_pfx;
@@ -1701,16 +1591,17 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_chttp2_error_code http_error;
- status_codes_from_error(error, stream_global->deadline, &http_error,
- &grpc_status);
+ status_codes_from_error(error, s->deadline, &http_error, &grpc_status);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
- if (stream_global->id != 0 && !transport_global->is_client) {
+ if (s->id != 0 && !t->is_client) {
/* Hand roll a header block.
- This is unnecessarily ugly - at some point we should find a more elegant
+ This is unnecessarily ugly - at some point we should find a more
+ elegant
solution.
- It's complicated by the fact that our send machinery would be dead by the
+ It's complicated by the fact that our send machinery would be dead by
+ the
time we got around to sending this, so instead we ignore HPACK
compression
and just write the uncompressed bytes onto the wire. */
@@ -1775,23 +1666,22 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
*p++ = (uint8_t)(len);
*p++ = GRPC_CHTTP2_FRAME_HEADER;
*p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS;
- *p++ = (uint8_t)(stream_global->id >> 24);
- *p++ = (uint8_t)(stream_global->id >> 16);
- *p++ = (uint8_t)(stream_global->id >> 8);
- *p++ = (uint8_t)(stream_global->id);
+ *p++ = (uint8_t)(s->id >> 24);
+ *p++ = (uint8_t)(s->id >> 16);
+ *p++ = (uint8_t)(s->id >> 8);
+ *p++ = (uint8_t)(s->id);
GPR_ASSERT(p == GPR_SLICE_END_PTR(hdr));
- gpr_slice_buffer_add(&transport_global->qbuf, hdr);
- gpr_slice_buffer_add(&transport_global->qbuf, status_hdr);
+ gpr_slice_buffer_add(&t->qbuf, hdr);
+ gpr_slice_buffer_add(&t->qbuf, status_hdr);
if (optional_message) {
- gpr_slice_buffer_add(&transport_global->qbuf, message_pfx);
- gpr_slice_buffer_add(&transport_global->qbuf,
+ gpr_slice_buffer_add(&t->qbuf, message_pfx);
+ gpr_slice_buffer_add(&t->qbuf,
gpr_slice_from_copied_string(optional_message));
}
gpr_slice_buffer_add(
- &transport_global->qbuf,
- grpc_chttp2_rst_stream_create(stream_global->id, GRPC_CHTTP2_NO_ERROR,
- &stream_global->stats.outgoing));
+ &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
}
const char *msg = grpc_error_get_str(error, GRPC_ERROR_STR_GRPC_MESSAGE);
@@ -1801,46 +1691,33 @@ static void close_from_api(grpc_exec_ctx *exec_ctx,
msg = grpc_error_string(error);
}
gpr_slice msg_slice = gpr_slice_from_copied_string(msg);
- grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
- grpc_status, &msg_slice);
+ grpc_chttp2_fake_status(exec_ctx, t, s, grpc_status, &msg_slice);
if (free_msg) grpc_error_free_string(msg);
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global, 1,
- 1, error);
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "close_from_api");
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "close_from_api");
}
typedef struct {
grpc_exec_ctx *exec_ctx;
grpc_error *error;
+ grpc_chttp2_transport *t;
} cancel_stream_cb_args;
-static void cancel_stream_cb(grpc_chttp2_transport_global *transport_global,
- void *user_data,
- grpc_chttp2_stream_global *stream_global) {
+static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
cancel_stream_cb_args *args = user_data;
- cancel_from_api(args->exec_ctx, transport_global, stream_global,
- GRPC_ERROR_REF(args->error));
+ grpc_chttp2_stream *s = stream;
+ grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
+ GRPC_ERROR_REF(args->error));
}
static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
- cancel_stream_cb_args args = {exec_ctx, error};
- grpc_chttp2_for_all_streams(&t->global, &args, cancel_stream_cb);
+ cancel_stream_cb_args args = {exec_ctx, error, t};
+ grpc_chttp2_stream_map_for_each(&t->stream_map, cancel_stream_cb, &args);
GRPC_ERROR_UNREF(error);
}
-static void drop_connection(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
- if (!grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
- error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE);
- }
- close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
- end_all_the_calls(exec_ctx, t, error);
-}
-
/** update window from a settings change */
typedef struct {
grpc_chttp2_transport *t;
@@ -1851,20 +1728,23 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
update_global_window_args *a = args;
grpc_chttp2_transport *t = a->t;
grpc_chttp2_stream *s = stream;
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_stream_global *stream_global = &s->global;
int was_zero;
int is_zero;
- int64_t initial_window_update = t->parsing.initial_window_update;
+ int64_t initial_window_update = t->initial_window_update;
- was_zero = stream_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", transport_global, stream_global,
- outgoing_window, initial_window_update);
- is_zero = stream_global->outgoing_window <= 0;
+ if (initial_window_update > 0) {
+ was_zero = s->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("settings", t, s, outgoing_window,
+ initial_window_update);
+ is_zero = s->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_become_writable(a->exec_ctx, transport_global, stream_global,
- true, "update_global_window");
+ if (was_zero && !is_zero) {
+ grpc_chttp2_become_writable(a->exec_ctx, t, s, true,
+ "update_global_window");
+ }
+ } else {
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("settings", t, s, outgoing_window,
+ -initial_window_update);
}
}
@@ -1872,50 +1752,19 @@ static void update_global_window(void *args, uint32_t id, void *stream) {
* INPUT PROCESSING - PARSING
*/
-static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void post_reading_action_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void post_parse_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
-static void reading_action(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
+static void read_action_begin(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_error *error) {
/* Control flow:
reading_action_locked ->
(parse_unlocked -> post_parse_locked)? ->
post_reading_action_locked */
GPR_TIMER_BEGIN("reading_action", 0);
grpc_chttp2_transport *t = tp;
- grpc_combiner_execute(exec_ctx, t->executor.combiner,
- &t->reading_action_locked, GRPC_ERROR_REF(error));
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->read_action_locked,
+ GRPC_ERROR_REF(error), false);
GPR_TIMER_END("reading_action", 0);
}
-static void reading_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- GPR_TIMER_BEGIN("reading_action_locked", 0);
-
- grpc_chttp2_transport *t = tp;
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
-
- GPR_ASSERT(!t->executor.parsing_active);
- if (!t->closed) {
- t->executor.parsing_active = 1;
- /* merge stream lists */
- grpc_chttp2_stream_map_move_into(&t->new_stream_map,
- &t->parsing_stream_map);
- grpc_chttp2_prepare_to_read(transport_global, transport_parsing);
- grpc_exec_ctx_sched(exec_ctx, &t->parsing_action, GRPC_ERROR_REF(error),
- NULL);
- } else {
- post_reading_action_locked(exec_ctx, t, error);
- }
-
- GPR_TIMER_END("reading_action_locked", 0);
-}
-
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
grpc_http_parser parser;
@@ -1944,131 +1793,102 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
return error;
}
-static void parsing_action(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = arg;
- grpc_error *err = GRPC_ERROR_NONE;
- GPR_TIMER_BEGIN("reading_action.parse", 0);
- size_t i = 0;
- grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
- GRPC_ERROR_NONE};
- for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
- errors[1] = grpc_chttp2_perform_read(exec_ctx, &t->parsing,
- t->read_buffer.slices[i]);
- };
- if (errors[1] == GRPC_ERROR_NONE) {
- err = GRPC_ERROR_REF(error);
- } else {
- errors[2] = try_http_parsing(exec_ctx, t);
- err = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
- GPR_ARRAY_SIZE(errors));
+static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("reading_action_locked", 0);
+
+ grpc_chttp2_transport *t = tp;
+
+ GRPC_ERROR_REF(error);
+
+ grpc_error *err = error;
+ if (err != GRPC_ERROR_NONE) {
+ err = grpc_error_set_int(
+ GRPC_ERROR_CREATE_REFERENCING("Endpoint read failed", &err, 1),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state);
}
- for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
- GRPC_ERROR_UNREF(errors[i]);
+ GPR_SWAP(grpc_error *, err, error);
+ GRPC_ERROR_UNREF(err);
+ if (!t->closed) {
+ GPR_TIMER_BEGIN("reading_action.parse", 0);
+ size_t i = 0;
+ grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
+ GRPC_ERROR_NONE};
+ for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
+ errors[1] =
+ grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]);
+ };
+ if (errors[1] != GRPC_ERROR_NONE) {
+ errors[2] = try_http_parsing(exec_ctx, t);
+ GRPC_ERROR_UNREF(error);
+ error = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
+ GPR_ARRAY_SIZE(errors));
+ }
+ for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
+ GRPC_ERROR_UNREF(errors[i]);
+ }
+ GPR_TIMER_END("reading_action.parse", 0);
+
+ GPR_TIMER_BEGIN("post_parse_locked", 0);
+ if (t->initial_window_update != 0) {
+ update_global_window_args args = {t, exec_ctx};
+ grpc_chttp2_stream_map_for_each(&t->stream_map, update_global_window,
+ &args);
+ t->initial_window_update = 0;
+ }
+ /* handle higher level things */
+ if (t->incoming_window < t->connection_window_target * 3 / 4) {
+ int64_t announce_bytes = t->connection_window_target - t->incoming_window;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", t, announce_incoming_window,
+ announce_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", t, incoming_window,
+ announce_bytes);
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "global incoming window");
+ }
+
+ GPR_TIMER_END("post_parse_locked", 0);
}
- grpc_combiner_execute(exec_ctx, t->executor.combiner, &t->post_parse_locked,
- err);
- GPR_TIMER_END("reading_action.parse", 0);
-}
-static void post_parse_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- GPR_TIMER_BEGIN("post_parse_locked", 0);
- grpc_chttp2_transport *t = arg;
- grpc_chttp2_transport_global *transport_global = &t->global;
- grpc_chttp2_transport_parsing *transport_parsing = &t->parsing;
- /* copy parsing qbuf to global qbuf */
- if (t->parsing.qbuf.count > 0) {
- gpr_slice_buffer_move_into(&t->parsing.qbuf, &t->global.qbuf);
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "parsing_qbuf");
- }
- /* merge stream lists */
- grpc_chttp2_stream_map_move_into(&t->new_stream_map, &t->parsing_stream_map);
- transport_global->concurrent_stream_count =
- (uint32_t)grpc_chttp2_stream_map_size(&t->parsing_stream_map);
- if (transport_parsing->initial_window_update != 0) {
- update_global_window_args args = {t, exec_ctx};
- grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
- update_global_window, &args);
- transport_parsing->initial_window_update = 0;
- }
- /* handle higher level things */
- grpc_chttp2_publish_reads(exec_ctx, transport_global, transport_parsing);
- t->executor.parsing_active = 0;
- /* handle delayed transport ops (if there is one) */
- if (t->post_parsing_op) {
- grpc_transport_op *op = t->post_parsing_op;
- t->post_parsing_op = NULL;
- perform_transport_op_locked(exec_ctx, op, GRPC_ERROR_NONE);
- gpr_free(op);
- }
- /* if a stream is in the stream map, and gets cancelled, we need to
- * ensure we are not parsing before continuing the cancellation to keep
- * things in a sane state */
- grpc_chttp2_stream_global *stream_global;
- while (grpc_chttp2_list_pop_closed_waiting_for_parsing(transport_global,
- &stream_global)) {
- GPR_ASSERT(stream_global->in_stream_map);
- GPR_ASSERT(stream_global->write_closed);
- GPR_ASSERT(stream_global->read_closed);
- remove_stream(exec_ctx, t, stream_global->id,
- removal_error(GRPC_ERROR_NONE, stream_global));
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2");
- }
-
- post_reading_action_locked(exec_ctx, t, error);
- GPR_TIMER_END("post_parse_locked", 0);
-}
-
-static void post_reading_action_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
GPR_TIMER_BEGIN("post_reading_action_locked", 0);
- grpc_chttp2_transport *t = arg;
bool keep_reading = false;
- GRPC_ERROR_REF(error);
if (error == GRPC_ERROR_NONE && t->closed) {
error = GRPC_ERROR_CREATE("Transport closed");
}
if (error != GRPC_ERROR_NONE) {
- drop_connection(exec_ctx, t, GRPC_ERROR_REF(error));
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
t->endpoint_reading = 0;
- if (grpc_http_write_state_trace) {
- gpr_log(GPR_DEBUG, "R:%p -> 0 ws=%s", t,
- write_state_name(t->executor.write_state));
- }
} else if (!t->closed) {
keep_reading = true;
- REF_TRANSPORT(t, "keep_reading");
- prevent_endpoint_shutdown(t);
+ GRPC_CHTTP2_REF_TRANSPORT(t, "keep_reading");
}
gpr_slice_buffer_reset_and_unref(&t->read_buffer);
if (keep_reading) {
- grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->reading_action);
- allow_endpoint_shutdown_locked(exec_ctx, t);
- UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
+ grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin);
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
- UNREF_TRANSPORT(exec_ctx, t, "reading_action");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
}
- GRPC_ERROR_UNREF(error);
GPR_TIMER_END("post_reading_action_locked", 0);
+
+ GRPC_ERROR_UNREF(error);
+
+ GPR_TIMER_END("reading_action_locked", 0);
}
/*******************************************************************************
* CALLBACK LOOP
*/
-static void connectivity_state_set(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_connectivity_state state, grpc_error *error, const char *reason) {
+static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_connectivity_state state,
+ grpc_error *error, const char *reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
- grpc_connectivity_state_set(
- exec_ctx,
- &TRANSPORT_FROM_GLOBAL(transport_global)->channel_callback.state_tracker,
- state, error, reason);
+ grpc_connectivity_state_set(exec_ctx, &t->channel_callback.state_tracker,
+ state, error, reason);
}
/*******************************************************************************
@@ -2101,15 +1921,16 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
}
}
-static void incoming_byte_stream_update_flow_control(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, size_t max_size_hint,
- size_t have_already) {
+static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ size_t max_size_hint,
+ size_t have_already) {
uint32_t max_recv_bytes;
/* clamp max recv hint to an allowable size */
- if (max_size_hint >= UINT32_MAX - transport_global->stream_lookahead) {
- max_recv_bytes = UINT32_MAX - transport_global->stream_lookahead;
+ if (max_size_hint >= UINT32_MAX - t->stream_lookahead) {
+ max_recv_bytes = UINT32_MAX - t->stream_lookahead;
} else {
max_recv_bytes = (uint32_t)max_size_hint;
}
@@ -2122,23 +1943,21 @@ static void incoming_byte_stream_update_flow_control(
}
/* add some small lookahead to keep pipelines flowing */
- GPR_ASSERT(max_recv_bytes <= UINT32_MAX - transport_global->stream_lookahead);
- max_recv_bytes += transport_global->stream_lookahead;
- if (stream_global->max_recv_bytes < max_recv_bytes) {
- uint32_t add_max_recv_bytes =
- max_recv_bytes - stream_global->max_recv_bytes;
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
- max_recv_bytes, add_max_recv_bytes);
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
- unannounced_incoming_window_for_parse,
+ GPR_ASSERT(max_recv_bytes <= UINT32_MAX - t->stream_lookahead);
+ max_recv_bytes += t->stream_lookahead;
+ if (s->max_recv_bytes < max_recv_bytes) {
+ uint32_t add_max_recv_bytes = max_recv_bytes - s->max_recv_bytes;
+ bool new_window_write_is_covered_by_poller =
+ s->max_recv_bytes < have_already;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, max_recv_bytes,
+ add_max_recv_bytes);
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, incoming_window,
add_max_recv_bytes);
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", transport_global, stream_global,
- unannounced_incoming_window_for_writing,
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("op", t, s, announce_window,
add_max_recv_bytes);
- grpc_chttp2_list_add_unannounced_incoming_window_available(transport_global,
- stream_global);
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- false, "read_incoming_stream");
+ grpc_chttp2_become_writable(exec_ctx, t, s,
+ new_window_write_is_covered_by_poller,
+ "read_incoming_stream");
}
}
@@ -2146,25 +1965,23 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = argp;
- grpc_chttp2_transport_global *transport_global = &bs->transport->global;
- grpc_chttp2_stream_global *stream_global = &bs->stream->global;
+ grpc_chttp2_transport *t = bs->transport;
+ grpc_chttp2_stream *s = bs->stream;
if (bs->is_tail) {
gpr_mu_lock(&bs->slice_mu);
size_t cur_length = bs->slices.length;
gpr_mu_unlock(&bs->slice_mu);
incoming_byte_stream_update_flow_control(
- exec_ctx, transport_global, stream_global,
- bs->next_action.max_size_hint, cur_length);
+ exec_ctx, t, s, bs->next_action.max_size_hint, cur_length);
}
gpr_mu_lock(&bs->slice_mu);
if (bs->slices.count > 0) {
*bs->next_action.slice = gpr_slice_buffer_take_first(&bs->slices);
- grpc_exec_ctx_sched(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE,
- NULL);
+ grpc_closure_run(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
} else if (bs->error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, bs->next_action.on_complete,
- GRPC_ERROR_REF(bs->error), NULL);
+ grpc_closure_run(exec_ctx, bs->next_action.on_complete,
+ GRPC_ERROR_REF(bs->error));
} else {
bs->on_next = bs->next_action.on_complete;
bs->next = bs->next_action.slice;
@@ -2186,8 +2003,8 @@ static int incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
bs->next_action.on_complete = on_complete;
grpc_closure_init(&bs->next_action.closure, incoming_byte_stream_next_locked,
bs);
- grpc_combiner_execute(exec_ctx, bs->transport->executor.combiner,
- &bs->next_action.closure, GRPC_ERROR_NONE);
+ grpc_combiner_execute(exec_ctx, bs->transport->combiner,
+ &bs->next_action.closure, GRPC_ERROR_NONE, false);
GPR_TIMER_END("incoming_byte_stream_next", 0);
return 0;
}
@@ -2200,8 +2017,7 @@ static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = byte_stream;
GPR_ASSERT(bs->base.destroy == incoming_byte_stream_destroy);
- decrement_active_streams_locked(exec_ctx, &bs->transport->global,
- &bs->stream->global);
+ decrement_active_streams_locked(exec_ctx, bs->transport, bs->stream);
incoming_byte_stream_unref(exec_ctx, bs);
}
@@ -2212,126 +2028,215 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
(grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_closure_init(&bs->destroy_action, incoming_byte_stream_destroy_locked,
bs);
- grpc_combiner_execute(exec_ctx, bs->transport->executor.combiner,
- &bs->destroy_action, GRPC_ERROR_NONE);
+ grpc_combiner_execute(exec_ctx, bs->transport->combiner, &bs->destroy_action,
+ GRPC_ERROR_NONE, false);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}
-typedef struct {
- grpc_chttp2_incoming_byte_stream *byte_stream;
- gpr_slice slice;
-} incoming_byte_stream_push_arg;
+static void incoming_byte_stream_publish_error(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
+ grpc_error *error) {
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
+ grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
+ bs->on_next = NULL;
+ GRPC_ERROR_UNREF(bs->error);
+ bs->error = error;
+}
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice) {
gpr_mu_lock(&bs->slice_mu);
- if (bs->on_next != NULL) {
- *bs->next = slice;
- grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
- bs->on_next = NULL;
+ if (bs->remaining_bytes < GPR_SLICE_LENGTH(slice)) {
+ incoming_byte_stream_publish_error(
+ exec_ctx, bs, GRPC_ERROR_CREATE("Too many bytes in stream"));
} else {
- gpr_slice_buffer_add(&bs->slices, slice);
+ bs->remaining_bytes -= (uint32_t)GPR_SLICE_LENGTH(slice);
+ if (bs->on_next != NULL) {
+ *bs->next = slice;
+ grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
+ bs->on_next = NULL;
+ } else {
+ gpr_slice_buffer_add(&bs->slices, slice);
+ }
}
gpr_mu_unlock(&bs->slice_mu);
}
-static void incoming_byte_stream_finished_locked(grpc_exec_ctx *exec_ctx,
- void *bsp, grpc_error *error) {
- grpc_chttp2_incoming_byte_stream *bs = bsp;
- if (error != GRPC_ERROR_NONE) {
- grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
- bs->on_next = NULL;
- GRPC_ERROR_UNREF(bs->error);
- bs->error = error;
- }
- incoming_byte_stream_unref(exec_ctx, bs);
-}
-
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error, int from_parsing_thread) {
- GPR_TIMER_BEGIN("grpc_chttp2_incoming_byte_stream_finished", 0);
- if (from_parsing_thread) {
- grpc_closure_init(&bs->finished_action,
- incoming_byte_stream_finished_locked, bs);
- grpc_combiner_execute(exec_ctx, bs->transport->executor.combiner,
- &bs->finished_action, GRPC_ERROR_REF(error));
- } else {
- incoming_byte_stream_finished_locked(exec_ctx, bs, error);
+ grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) {
+ gpr_mu_lock(&bs->slice_mu);
+ if (bs->remaining_bytes != 0) {
+ error = GRPC_ERROR_CREATE("Truncated message");
+ }
+ gpr_mu_unlock(&bs->slice_mu);
+ }
+ if (error != GRPC_ERROR_NONE) {
+ incoming_byte_stream_publish_error(exec_ctx, bs, error);
}
- GPR_TIMER_END("grpc_chttp2_incoming_byte_stream_finished", 0);
+ incoming_byte_stream_unref(exec_ctx, bs);
}
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
- uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ uint32_t frame_size, uint32_t flags) {
grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
gpr_malloc(sizeof(*incoming_byte_stream));
incoming_byte_stream->base.length = frame_size;
+ incoming_byte_stream->remaining_bytes = frame_size;
incoming_byte_stream->base.flags = flags;
incoming_byte_stream->base.next = incoming_byte_stream_next;
incoming_byte_stream->base.destroy = incoming_byte_stream_destroy;
gpr_mu_init(&incoming_byte_stream->slice_mu);
gpr_ref_init(&incoming_byte_stream->refs, 2);
incoming_byte_stream->next_message = NULL;
- incoming_byte_stream->transport = TRANSPORT_FROM_PARSING(transport_parsing);
- incoming_byte_stream->stream = STREAM_FROM_PARSING(stream_parsing);
- gpr_ref(&incoming_byte_stream->stream->global.active_streams);
+ incoming_byte_stream->transport = t;
+ incoming_byte_stream->stream = s;
+ gpr_ref(&incoming_byte_stream->stream->active_streams);
gpr_slice_buffer_init(&incoming_byte_stream->slices);
incoming_byte_stream->on_next = NULL;
incoming_byte_stream->is_tail = 1;
incoming_byte_stream->error = GRPC_ERROR_NONE;
- if (add_to_queue->head == NULL) {
- add_to_queue->head = incoming_byte_stream;
+ grpc_chttp2_incoming_frame_queue *q = &s->incoming_frames;
+ if (q->head == NULL) {
+ q->head = incoming_byte_stream;
} else {
- add_to_queue->tail->is_tail = 0;
- add_to_queue->tail->next_message = incoming_byte_stream;
+ q->tail->is_tail = 0;
+ q->tail->next_message = incoming_byte_stream;
}
- add_to_queue->tail = incoming_byte_stream;
+ q->tail = incoming_byte_stream;
+ grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
return incoming_byte_stream;
}
/*******************************************************************************
- * TRACING
+ * RESOURCE QUOTAS
*/
-static char *format_flowctl_context_var(const char *context, const char *var,
- int64_t val, uint32_t id,
- char **scope) {
- char *underscore_pos;
- char *buf;
- char *result;
- if (context == NULL) {
- *scope = NULL;
- gpr_asprintf(&buf, "%s(%" PRId64 ")", var, val);
- result = gpr_leftpad(buf, ' ', 60);
- gpr_free(buf);
- return result;
- }
- underscore_pos = strchr(context, '_');
- *scope = gpr_strdup(context);
- (*scope)[underscore_pos - context] = 0;
- if (id != 0) {
- char *tmp = *scope;
- gpr_asprintf(scope, "%s[%d]", tmp, id);
- gpr_free(tmp);
- }
- gpr_asprintf(&buf, "%s.%s(%" PRId64 ")", underscore_pos + 1, var, val);
- result = gpr_leftpad(buf, ' ', 60);
- gpr_free(buf);
- return result;
+static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (!t->benign_reclaimer_registered) {
+ t->benign_reclaimer_registered = true;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
+ grpc_resource_user_post_reclaimer(exec_ctx,
+ grpc_endpoint_get_resource_user(t->ep),
+ false, &t->benign_reclaimer);
+ }
+}
+
+static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (!t->destructive_reclaimer_registered) {
+ t->destructive_reclaimer_registered = true;
+ GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
+ grpc_resource_user_post_reclaimer(exec_ctx,
+ grpc_endpoint_get_resource_user(t->ep),
+ true, &t->destructive_reclaimer);
+ }
+}
+
+static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
+ GRPC_ERROR_REF(error), false);
+}
+
+static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ if (error == GRPC_ERROR_NONE &&
+ grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ /* Channel with no active streams: send a goaway to try and make it
+ * disconnect cleanly */
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
+ t->peer_string);
+ }
+ send_goaway(exec_ctx, t, GRPC_CHTTP2_ENHANCE_YOUR_CALM,
+ gpr_slice_from_static_string("Buffers full"));
+ } else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG,
+ "HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
+ " streams",
+ t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
+ }
+ t->benign_reclaimer_registered = false;
+ if (error != GRPC_ERROR_CANCELLED) {
+ grpc_resource_user_finish_reclamation(
+ exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer");
}
-static int samestr(char *a, char *b) {
- if (a == NULL) {
- return b == NULL;
+static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_chttp2_transport *t = arg;
+ size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
+ t->destructive_reclaimer_registered = false;
+ if (error == GRPC_ERROR_NONE && n > 0) {
+ grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
+ s->id);
+ }
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR,
+ GRPC_CHTTP2_ENHANCE_YOUR_CALM));
+ if (n > 1) {
+ /* Since we cancel one stream per destructive reclamation, if
+ there are more streams left, we can immediately post a new
+ reclaimer in case the resource quota needs to free more
+ memory */
+ post_destructive_reclaimer(exec_ctx, t);
+ }
}
- if (b == NULL) {
- return 0;
+ if (error != GRPC_ERROR_CANCELLED) {
+ grpc_resource_user_finish_reclamation(
+ exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+ }
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer");
+}
+
+/*******************************************************************************
+ * TRACING
+ */
+
+static char *format_flowctl_context_var(const char *context, const char *var,
+ int64_t val, uint32_t id) {
+ char *name;
+ if (context == NULL) {
+ name = gpr_strdup(var);
+ } else if (0 == strcmp(context, "t")) {
+ GPR_ASSERT(id == 0);
+ gpr_asprintf(&name, "TRANSPORT:%s", var);
+ } else if (0 == strcmp(context, "s")) {
+ GPR_ASSERT(id != 0);
+ gpr_asprintf(&name, "STREAM[%d]:%s", id, var);
+ } else {
+ gpr_asprintf(&name, "BAD_CONTEXT[%s][%d]:%s", context, id, var);
}
- return 0 == strcmp(a, b);
+ char *name_fld = gpr_leftpad(name, ' ', 64);
+ char *value;
+ gpr_asprintf(&value, "%" PRId64, val);
+ char *value_fld = gpr_leftpad(value, ' ', 8);
+ char *result;
+ gpr_asprintf(&result, "%s %s", name_fld, value_fld);
+ gpr_free(name);
+ gpr_free(name_fld);
+ gpr_free(value);
+ gpr_free(value_fld);
+ return result;
}
void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
@@ -2339,26 +2244,18 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var1, const char *context2,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2) {
- char *scope1;
- char *scope2;
char *tmp_phase;
- char *tmp_scope1;
- char *label1 =
- format_flowctl_context_var(context1, var1, val1, stream_id, &scope1);
- char *label2 =
- format_flowctl_context_var(context2, var2, val2, stream_id, &scope2);
+ char *label1 = format_flowctl_context_var(context1, var1, val1, stream_id);
+ char *label2 = format_flowctl_context_var(context2, var2, val2, stream_id);
char *clisvr = is_client ? "client" : "server";
char *prefix;
tmp_phase = gpr_leftpad(phase, ' ', 8);
- tmp_scope1 = gpr_leftpad(scope1, ' ', 11);
- gpr_asprintf(&prefix, "FLOW %s: %s %s ", tmp_phase, clisvr, scope1);
+ gpr_asprintf(&prefix, "FLOW %s: %s ", tmp_phase, clisvr);
gpr_free(tmp_phase);
- gpr_free(tmp_scope1);
switch (op) {
case GRPC_CHTTP2_FLOWCTL_MOVE:
- GPR_ASSERT(samestr(scope1, scope2));
if (val2 != 0) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"%sMOVE %s <- %s giving %" PRId64, prefix, label1, label2,
@@ -2383,8 +2280,6 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
break;
}
- gpr_free(scope1);
- gpr_free(scope2);
gpr_free(label1);
gpr_free(label2);
gpr_free(prefix);
@@ -2421,10 +2316,11 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
gpr_slice_buffer *read_buffer) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
- REF_TRANSPORT(t, "reading_action"); /* matches unref inside reading_action */
+ GRPC_CHTTP2_REF_TRANSPORT(
+ t, "reading_action"); /* matches unref inside reading_action */
if (read_buffer != NULL) {
gpr_slice_buffer_move_into(read_buffer, &t->read_buffer);
gpr_free(read_buffer);
}
- reading_action(exec_ctx, t, GRPC_ERROR_NONE);
+ read_action_begin(exec_ctx, t, GRPC_ERROR_NONE);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame.h b/src/core/ext/transport/chttp2/transport/frame.h
index 507aae4100..1e444a91fd 100644
--- a/src/core/ext/transport/chttp2/transport/frame.h
+++ b/src/core/ext/transport/chttp2/transport/frame.h
@@ -40,8 +40,8 @@
#include "src/core/lib/iomgr/error.h"
/* defined in internal.h */
-typedef struct grpc_chttp2_stream_parsing grpc_chttp2_stream_parsing;
-typedef struct grpc_chttp2_transport_parsing grpc_chttp2_transport_parsing;
+typedef struct grpc_chttp2_stream grpc_chttp2_stream;
+typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_FRAME_DATA 0
#define GRPC_CHTTP2_FRAME_HEADER 1
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index 9046fbc453..8668816930 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -51,16 +51,11 @@ grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
- grpc_byte_stream *bs;
- if (parser->parsing_frame) {
+ if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
- exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"),
- 1);
- }
- while (
- (bs = grpc_chttp2_incoming_frame_queue_pop(&parser->incoming_frames))) {
- grpc_byte_stream_destroy(exec_ctx, bs);
+ exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
}
+ GRPC_ERROR_UNREF(parser->error);
}
grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
@@ -145,22 +140,17 @@ void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
-grpc_error *grpc_chttp2_data_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_data_parser *p,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ gpr_slice slice) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_data_parser *p = parser;
uint32_t message_flags;
grpc_chttp2_incoming_byte_stream *incoming_byte_stream;
char *msg;
- if (is_last && p->is_last_frame) {
- stream_parsing->received_close = 1;
- }
-
if (cur == end) {
return GRPC_ERROR_NONE;
}
@@ -171,7 +161,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
return GRPC_ERROR_REF(p->error);
fh_0:
case GRPC_CHTTP2_DATA_FH_0:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_type = *cur;
switch (p->frame_type) {
case 0:
@@ -184,7 +174,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
p->error = GRPC_ERROR_CREATE(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
- (intptr_t)stream_parsing->id);
+ (intptr_t)s->id);
gpr_free(msg);
msg = gpr_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
p->error =
@@ -201,7 +191,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_1:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size = ((uint32_t)*cur) << 24;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_2;
@@ -209,7 +199,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_2:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 16;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_3;
@@ -217,7 +207,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_3:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
p->state = GRPC_CHTTP2_DATA_FH_4;
@@ -225,7 +215,7 @@ grpc_error *grpc_chttp2_data_parser_parse(
}
/* fallthrough */
case GRPC_CHTTP2_DATA_FH_4:
- stream_parsing->stats.incoming.framing_bytes++;
+ s->stats.incoming.framing_bytes++;
p->frame_size |= ((uint32_t)*cur);
p->state = GRPC_CHTTP2_DATA_FRAME;
++cur;
@@ -234,35 +224,32 @@ grpc_error *grpc_chttp2_data_parser_parse(
message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
p->parsing_frame = incoming_byte_stream =
- grpc_chttp2_incoming_byte_stream_create(
- exec_ctx, transport_parsing, stream_parsing, p->frame_size,
- message_flags, &p->incoming_frames);
+ grpc_chttp2_incoming_byte_stream_create(exec_ctx, t, s, p->frame_size,
+ message_flags);
/* fallthrough */
case GRPC_CHTTP2_DATA_FRAME:
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
if (cur == end) {
return GRPC_ERROR_NONE;
}
uint32_t remaining = (uint32_t)(end - cur);
if (remaining == p->frame_size) {
- stream_parsing->stats.incoming.data_bytes += p->frame_size;
+ s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
- GRPC_ERROR_NONE, 1);
+ GRPC_ERROR_NONE);
p->parsing_frame = NULL;
p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_ERROR_NONE;
} else if (remaining > p->frame_size) {
- stream_parsing->stats.incoming.data_bytes += p->frame_size;
+ s->stats.incoming.data_bytes += p->frame_size;
grpc_chttp2_incoming_byte_stream_push(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg),
(size_t)(cur + p->frame_size - beg)));
grpc_chttp2_incoming_byte_stream_finished(exec_ctx, p->parsing_frame,
- GRPC_ERROR_NONE, 1);
+ GRPC_ERROR_NONE);
p->parsing_frame = NULL;
cur += p->frame_size;
goto fh_0; /* loop */
@@ -272,10 +259,25 @@ grpc_error *grpc_chttp2_data_parser_parse(
exec_ctx, p->parsing_frame,
gpr_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
p->frame_size -= remaining;
- stream_parsing->stats.incoming.data_bytes += remaining;
+ s->stats.incoming.data_bytes += remaining;
return GRPC_ERROR_NONE;
}
}
GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
}
+
+grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
+ grpc_chttp2_data_parser *p = parser;
+ grpc_error *error = parse_inner(exec_ctx, p, t, s, slice);
+
+ if (is_last && p->is_last_frame) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
+ GRPC_ERROR_NONE);
+ }
+
+ return error;
+}
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index a21a7942b9..eb2d97d898 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -69,7 +69,6 @@ typedef struct {
grpc_error *error;
int is_frame_compressed;
- grpc_chttp2_incoming_frame_queue incoming_frames;
grpc_chttp2_incoming_byte_stream *parsing_frame;
} grpc_chttp2_data_parser;
@@ -92,10 +91,10 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
-grpc_error *grpc_chttp2_data_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, gpr_slice_buffer *inbuf,
uint32_t write_bytes, int is_eof,
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index 299e27ad70..33d2269169 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -67,10 +67,11 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_goaway_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -148,12 +149,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(
p->debug_pos += (uint32_t)(end - cur);
p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
if (is_last) {
- transport_parsing->goaway_received = 1;
- transport_parsing->goaway_last_stream_index = p->last_stream_id;
- gpr_slice_unref(transport_parsing->goaway_text);
- transport_parsing->goaway_error = (grpc_status_code)p->error_code;
- transport_parsing->goaway_text =
- gpr_slice_new(p->debug_data, p->debug_length, gpr_free);
+ grpc_chttp2_add_incoming_goaway(
+ exec_ctx, t, (uint32_t)p->error_code,
+ gpr_slice_new(p->debug_data, p->debug_length, gpr_free));
p->debug_data = NULL;
}
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h
index eb4303405a..355104a5a7 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.h
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h
@@ -65,10 +65,11 @@ void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
grpc_error *grpc_chttp2_goaway_parser_begin_frame(
grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_goaway_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
gpr_slice debug_data,
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 1f814ab1bd..624f42649d 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -73,10 +73,10 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_ping_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -91,10 +91,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
- grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
+ grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
- gpr_slice_buffer_add(&transport_parsing->qbuf,
+ gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));
+ grpc_chttp2_initiate_write(exec_ctx, t, false, "ping response");
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h
index 5a8723421c..2071f647fb 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.h
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.h
@@ -48,9 +48,9 @@ gpr_slice grpc_chttp2_ping_create(uint8_t ack, uint8_t *opaque_8bytes);
grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_ping_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index e3a3c9e4a7..9eac050797 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -39,6 +39,8 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/transport/chttp2/transport/frame.h"
+#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
+#include "src/core/ext/transport/chttp2/transport/status_conversion.h"
gpr_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
grpc_transport_one_way_stats *stats) {
@@ -83,10 +85,11 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_rst_stream_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -97,19 +100,28 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(
cur++;
p->byte++;
}
- stream_parsing->stats.incoming.framing_bytes += (uint64_t)(end - cur);
+ s->stats.incoming.framing_bytes += (uint64_t)(end - cur);
if (p->byte == 4) {
GPR_ASSERT(is_last);
- stream_parsing->received_close = 1;
- if (stream_parsing->forced_close_error == GRPC_ERROR_NONE) {
- stream_parsing->forced_close_error = grpc_error_set_int(
- GRPC_ERROR_CREATE("RST_STREAM"), GRPC_ERROR_INT_HTTP2_ERROR,
- (intptr_t)((((uint32_t)p->reason_bytes[0]) << 24) |
- (((uint32_t)p->reason_bytes[1]) << 16) |
- (((uint32_t)p->reason_bytes[2]) << 8) |
- (((uint32_t)p->reason_bytes[3]))));
+ uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) |
+ (((uint32_t)p->reason_bytes[1]) << 16) |
+ (((uint32_t)p->reason_bytes[2]) << 8) |
+ (((uint32_t)p->reason_bytes[3]));
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (reason != GRPC_CHTTP2_NO_ERROR) {
+ error = grpc_error_set_int(GRPC_ERROR_CREATE("RST_STREAM"),
+ GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
+ grpc_status_code status_code = grpc_chttp2_http2_error_to_grpc_status(
+ (grpc_chttp2_error_code)reason, s->deadline);
+ char *status_details;
+ gpr_asprintf(&status_details, "Received RST_STREAM with error code %d",
+ reason);
+ gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
+ gpr_free(status_details);
+ grpc_chttp2_fake_status(exec_ctx, t, s, status_code, &slice_details);
}
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
}
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
index 11cf94f3ea..5a1f578a29 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
@@ -49,9 +49,10 @@ gpr_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_rst_stream_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 04b96c4cd9..92022f90c9 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -143,10 +143,10 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
-grpc_error *grpc_chttp2_settings_parser_parse(
- grpc_exec_ctx *exec_ctx, void *p,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
const uint8_t *cur = GPR_SLICE_START_PTR(slice);
const uint8_t *end = GPR_SLICE_END_PTR(slice);
@@ -162,11 +162,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
if (cur == end) {
parser->state = GRPC_CHTTP2_SPS_ID0;
if (is_last) {
- transport_parsing->settings_updated = 1;
memcpy(parser->target_settings, parser->incoming_settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
- gpr_slice_buffer_add(&transport_parsing->qbuf,
- grpc_chttp2_settings_ack_create());
+ gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
}
return GRPC_ERROR_NONE;
}
@@ -226,9 +224,9 @@ grpc_error *grpc_chttp2_settings_parser_parse(
break;
case GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE:
grpc_chttp2_goaway_append(
- transport_parsing->last_incoming_stream_id, sp->error_value,
+ t->last_new_stream_id, sp->error_value,
gpr_slice_from_static_string("HTTP2 settings error"),
- &transport_parsing->qbuf);
+ &t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
grpc_error *err = GRPC_ERROR_CREATE(msg);
@@ -238,18 +236,17 @@ grpc_error *grpc_chttp2_settings_parser_parse(
}
if (parser->id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[parser->id] != parser->value) {
- transport_parsing->initial_window_update =
+ t->initial_window_update =
(int64_t)parser->value - parser->incoming_settings[parser->id];
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "adding %d for initial_window change",
- (int)transport_parsing->initial_window_update);
+ (int)t->initial_window_update);
}
}
parser->incoming_settings[parser->id] = parser->value;
if (grpc_http_trace) {
gpr_log(GPR_DEBUG, "CHTTP2:%s: got setting %d = %d",
- transport_parsing->is_client ? "CLI" : "SVR", parser->id,
- parser->value);
+ t->is_client ? "CLI" : "SVR", parser->id, parser->value);
}
} else if (grpc_http_trace) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index f654c598c8..4bfa944cf1 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -95,9 +95,10 @@ gpr_slice grpc_chttp2_settings_ack_create(void);
grpc_error *grpc_chttp2_settings_parser_begin_frame(
grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
uint32_t *settings);
-grpc_error *grpc_chttp2_settings_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 3cf848fd5c..418166a6df 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -80,9 +80,8 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
}
grpc_error *grpc_chttp2_window_update_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+ grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, gpr_slice slice, int is_last) {
uint8_t *const beg = GPR_SLICE_START_PTR(slice);
uint8_t *const end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -94,8 +93,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
p->byte++;
}
- if (stream_parsing != NULL) {
- stream_parsing->stats.incoming.framing_bytes += (uint32_t)(end - cur);
+ if (s != NULL) {
+ s->stats.incoming.framing_bytes += (uint32_t)(end - cur);
}
if (p->byte == 4) {
@@ -109,17 +108,26 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
- if (transport_parsing->incoming_stream_id != 0) {
- if (stream_parsing != NULL) {
- GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", transport_parsing,
- stream_parsing, outgoing_window,
+ if (t->incoming_stream_id != 0) {
+ if (s != NULL) {
+ bool was_zero = s->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_STREAM("parse", t, s, outgoing_window,
received_update);
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
+ bool is_zero = s->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, false,
+ "stream.read_flow_control");
+ }
}
} else {
- GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", transport_parsing,
- outgoing_window, received_update);
+ bool was_zero = t->outgoing_window <= 0;
+ GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parse", t, outgoing_window,
+ received_update);
+ bool is_zero = t->outgoing_window <= 0;
+ if (was_zero && !is_zero) {
+ grpc_chttp2_initiate_write(exec_ctx, t, false,
+ "new_global_flow_control");
+ }
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h
index 1bcbbf9247..6e62f31872 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.h
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h
@@ -51,8 +51,7 @@ gpr_slice grpc_chttp2_window_update_create(uint32_t id, uint32_t window_delta,
grpc_error *grpc_chttp2_window_update_parser_begin_frame(
grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
grpc_error *grpc_chttp2_window_update_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+ grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 522455f7dc..8180f78fc0 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -78,69 +78,96 @@ typedef enum {
a set of indirect jumps, and so not waste stack space. */
/* forward declarations for parsing states */
-static grpc_error *parse_begin(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end, grpc_error *error);
-static grpc_error *still_parse_error(grpc_chttp2_hpack_parser *p,
+static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_illegal_op(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_string_prefix(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_key_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
static grpc_error *parse_value_string_with_indexed_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
static grpc_error *parse_value_string_with_literal_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end);
-static grpc_error *parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_indexed_field(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end);
-static grpc_error *parse_max_tbl_size(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end);
/* we translate the first byte of a hpack field into one of these decoding
@@ -639,8 +666,8 @@ static const uint8_t inverse_base64[256] = {
};
/* emission helpers */
-static grpc_error *on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
- int add_to_table) {
+static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
+ grpc_mdelem *md, int add_to_table) {
if (add_to_table) {
grpc_error *err = grpc_chttp2_hptbl_add(&p->table, md);
if (err != GRPC_ERROR_NONE) return err;
@@ -649,7 +676,7 @@ static grpc_error *on_hdr(grpc_chttp2_hpack_parser *p, grpc_mdelem *md,
GRPC_MDELEM_UNREF(md);
return GRPC_ERROR_CREATE("on_header callback not set");
}
- p->on_header(p->on_header_user_data, md);
+ p->on_header(exec_ctx, p->on_header_user_data, md);
return GRPC_ERROR_NONE;
}
@@ -661,78 +688,86 @@ static grpc_mdstr *take_string(grpc_chttp2_hpack_parser *p,
}
/* jump to the next state */
-static grpc_error *parse_next(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_next(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
p->state = *p->next_state++;
- return p->state(p, cur, end);
+ return p->state(exec_ctx, p, cur, end);
}
/* begin parsing a header: all functionality is encoded into lookup tables
above */
-static grpc_error *parse_begin(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_begin;
return GRPC_ERROR_NONE;
}
- return first_byte_action[first_byte_lut[*cur]](p, cur, end);
+ return first_byte_action[first_byte_lut[*cur]](exec_ctx, p, cur, end);
}
/* stream dependency and prioritization data: we just skip it */
-static grpc_error *parse_stream_weight(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_weight(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_weight;
return GRPC_ERROR_NONE;
}
- return p->after_prioritization(p, cur + 1, end);
+ return p->after_prioritization(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep3(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep3(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep3;
return GRPC_ERROR_NONE;
}
- return parse_stream_weight(p, cur + 1, end);
+ return parse_stream_weight(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep2(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep2(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep2;
return GRPC_ERROR_NONE;
}
- return parse_stream_dep3(p, cur + 1, end);
+ return parse_stream_dep3(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep1(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep1(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep1;
return GRPC_ERROR_NONE;
}
- return parse_stream_dep2(p, cur + 1, end);
+ return parse_stream_dep2(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep0(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_stream_dep0;
return GRPC_ERROR_NONE;
}
- return parse_stream_dep1(p, cur + 1, end);
+ return parse_stream_dep1(exec_ctx, p, cur + 1, end);
}
/* emit an indexed field; for now just logs it to console; jumps to
begin the next field on completion */
-static grpc_error *finish_indexed_field(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
@@ -743,21 +778,23 @@ static grpc_error *finish_indexed_field(grpc_chttp2_hpack_parser *p,
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
GRPC_MDELEM_REF(md);
- grpc_error *err = on_hdr(p, md, 0);
+ grpc_error *err = on_hdr(exec_ctx, p, md, 0);
if (err != GRPC_ERROR_NONE) return err;
- return parse_begin(p, cur, end);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse an indexed field with index < 127 */
-static grpc_error *parse_indexed_field(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
p->dynamic_table_update_allowed = 0;
p->index = (*cur) & 0x7f;
- return finish_indexed_field(p, cur + 1, end);
+ return finish_indexed_field(exec_ctx, p, cur + 1, end);
}
/* parse an indexed field with index >= 127 */
-static grpc_error *parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -766,49 +803,53 @@ static grpc_error *parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0x7f;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* finish a literal header with incremental indexing: just log, and jump to '
begin */
-static grpc_error *finish_lithdr_incidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 1);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 1);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* finish a literal header with incremental indexing with no index */
-static grpc_error *finish_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
- take_string(p, &p->value)),
- 1);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 1);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a literal header with incremental indexing; index < 63 */
-static grpc_error *parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_incidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0x3f;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* parse a literal header with incremental indexing; index >= 63 */
-static grpc_error *parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -818,11 +859,12 @@ static grpc_error *parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0x3f;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* parse a literal header with incremental indexing; index = 0 */
-static grpc_error *parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -830,48 +872,52 @@ static grpc_error *parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
parse_value_string_with_literal_key, finish_lithdr_incidx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* finish a literal header without incremental indexing */
-static grpc_error *finish_lithdr_notidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* finish a literal header without incremental indexing with index = 0 */
-static grpc_error *finish_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a literal header without incremental indexing; index < 15 */
-static grpc_error *parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_notidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* parse a literal header without incremental indexing; index >= 15 */
-static grpc_error *parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -881,11 +927,12 @@ static grpc_error *parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* parse a literal header without incremental indexing; index == 0 */
-static grpc_error *parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -893,48 +940,52 @@ static grpc_error *parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
parse_value_string_with_literal_key, finish_lithdr_notidx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* finish a literal header that is never indexed */
-static grpc_error *finish_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* finish a literal header that is never indexed with an extra value */
-static grpc_error *finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
- grpc_error *err =
- on_hdr(p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
- take_string(p, &p->value)),
- 0);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ grpc_error *err = on_hdr(
+ exec_ctx, p, grpc_mdelem_from_metadata_strings(take_string(p, &p->key),
+ take_string(p, &p->value)),
+ 0);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a literal header that is never indexed; index < 15 */
-static grpc_error *parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_nvridx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* parse a literal header that is never indexed; index >= 15 */
-static grpc_error *parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -944,11 +995,12 @@ static grpc_error *parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* parse a literal header that is never indexed; index == 0 */
-static grpc_error *parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -956,44 +1008,47 @@ static grpc_error *parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
- return parse_string_prefix(p, cur + 1, end);
+ return parse_string_prefix(exec_ctx, p, cur + 1, end);
}
/* finish parsing a max table size change */
-static grpc_error *finish_max_tbl_size(grpc_chttp2_hpack_parser *p,
+static grpc_error *finish_max_tbl_size(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (grpc_http_trace) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
}
grpc_error *err =
grpc_chttp2_hptbl_set_current_table_size(&p->table, p->index);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_begin(p, cur, end);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_begin(exec_ctx, p, cur, end);
}
/* parse a max table size change, max size < 15 */
-static grpc_error *parse_max_tbl_size(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
- p, cur, end,
+ exec_ctx, p, cur, end,
GRPC_ERROR_CREATE(
"More than two max table size changes in a single frame"));
}
p->dynamic_table_update_allowed--;
p->index = (*cur) & 0x1f;
- return finish_max_tbl_size(p, cur + 1, end);
+ return finish_max_tbl_size(exec_ctx, p, cur + 1, end);
}
/* parse a max table size change, max size >= 15 */
-static grpc_error *parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_max_tbl_size};
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
- p, cur, end,
+ exec_ctx, p, cur, end,
GRPC_ERROR_CREATE(
"More than two max table size changes in a single frame"));
}
@@ -1001,11 +1056,12 @@ static grpc_error *parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
p->next_state = and_then;
p->index = 0x1f;
p->parsing.value = &p->index;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
}
/* a parse error: jam the parse state into parse_error, and return error */
-static grpc_error *parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end, grpc_error *err) {
GPR_ASSERT(err != GRPC_ERROR_NONE);
if (p->last_error == GRPC_ERROR_NONE) {
@@ -1015,24 +1071,27 @@ static grpc_error *parse_error(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
return err;
}
-static grpc_error *still_parse_error(grpc_chttp2_hpack_parser *p,
+static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
return GRPC_ERROR_REF(p->last_error);
}
-static grpc_error *parse_illegal_op(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
GPR_ASSERT(cur != end);
char *msg;
gpr_asprintf(&msg, "Illegal hpack op code %d", *cur);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
/* parse the 1st byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value0;
@@ -1042,15 +1101,16 @@ static grpc_error *parse_value0(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (*cur) & 0x7f;
if ((*cur) & 0x80) {
- return parse_value1(p, cur + 1, end);
+ return parse_value1(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 2nd byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value1;
@@ -1060,15 +1120,16 @@ static grpc_error *parse_value1(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (((uint32_t)*cur) & 0x7f) << 7;
if ((*cur) & 0x80) {
- return parse_value2(p, cur + 1, end);
+ return parse_value2(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 3rd byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value2;
@@ -1078,15 +1139,16 @@ static grpc_error *parse_value2(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (((uint32_t)*cur) & 0x7f) << 14;
if ((*cur) & 0x80) {
- return parse_value3(p, cur + 1, end);
+ return parse_value3(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 4th byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
if (cur == end) {
p->state = parse_value3;
@@ -1096,15 +1158,16 @@ static grpc_error *parse_value3(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value += (((uint32_t)*cur) & 0x7f) << 21;
if ((*cur) & 0x80) {
- return parse_value4(p, cur + 1, end);
+ return parse_value4(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
/* parse the 5th byte of a varint into p->parsing.value
depending on the byte, we may overflow, and care must be taken */
-static grpc_error *parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
uint8_t c;
uint32_t cur_value;
@@ -1130,9 +1193,9 @@ static grpc_error *parse_value4(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
*p->parsing.value = cur_value + add_value;
if ((*cur) & 0x80) {
- return parse_value5up(p, cur + 1, end);
+ return parse_value5up(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
error:
@@ -1142,13 +1205,14 @@ error:
*p->parsing.value, *cur);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
/* parse any trailing bytes in a varint: it's possible to append an arbitrary
number of 0x80's and not affect the value - a zero will terminate - and
anything else will overflow */
-static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
while (cur != end && *cur == 0x80) {
++cur;
@@ -1160,7 +1224,7 @@ static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
}
if (*cur == 0) {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
char *msg;
@@ -1170,11 +1234,12 @@ static grpc_error *parse_value5up(grpc_chttp2_hpack_parser *p,
*p->parsing.value, *cur);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
/* parse a string prefix */
-static grpc_error *parse_string_prefix(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (cur == end) {
p->state = parse_string_prefix;
@@ -1185,9 +1250,9 @@ static grpc_error *parse_string_prefix(grpc_chttp2_hpack_parser *p,
p->huff = (*cur) >> 7;
if (p->strlen == 0x7f) {
p->parsing.value = &p->strlen;
- return parse_value0(p, cur + 1, end);
+ return parse_value0(exec_ctx, p, cur + 1, end);
} else {
- return parse_next(p, cur + 1, end);
+ return parse_next(exec_ctx, p, cur + 1, end);
}
}
@@ -1205,7 +1270,8 @@ static void append_bytes(grpc_chttp2_hpack_parser_string *str,
str->length += (uint32_t)length;
}
-static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
grpc_chttp2_hpack_parser_string *str = p->parsing.str;
uint32_t bits;
@@ -1223,7 +1289,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte0;
@@ -1238,7 +1304,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte1;
@@ -1253,7 +1319,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte2;
@@ -1268,7 +1334,7 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(p, cur, end,
+ return parse_error(exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("Illegal base64 character"));
else if (bits == 64)
goto b64_byte3;
@@ -1281,11 +1347,12 @@ static grpc_error *append_string(grpc_chttp2_hpack_parser *p,
goto b64_byte0;
}
GPR_UNREACHABLE_CODE(return parse_error(
- p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
+ exec_ctx, p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
}
/* append a null terminator to a string */
-static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
uint8_t terminator = 0;
uint8_t decoded[2];
@@ -1298,7 +1365,7 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
break;
case B64_BYTE1:
return parse_error(
- p, cur, end,
+ exec_ctx, p, cur, end,
GRPC_ERROR_CREATE("illegal base64 encoding")); /* illegal encoding */
case B64_BYTE2:
bits = p->base64_buffer;
@@ -1308,7 +1375,7 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
bits & 0xffff);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
decoded[0] = (uint8_t)(bits >> 16);
append_bytes(str, decoded, 1);
@@ -1321,7 +1388,7 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
bits & 0xff);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
- return parse_error(p, cur, end, err);
+ return parse_error(exec_ctx, p, cur, end, err);
}
decoded[0] = (uint8_t)(bits >> 16);
decoded[1] = (uint8_t)(bits >> 8);
@@ -1334,13 +1401,14 @@ static grpc_error *finish_str(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
}
/* decode a nibble from a huffman encoded stream */
-static grpc_error *huff_nibble(grpc_chttp2_hpack_parser *p, uint8_t nibble) {
+static grpc_error *huff_nibble(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, uint8_t nibble) {
int16_t emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble];
int16_t next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
if (emit != -1) {
if (emit >= 0 && emit < 256) {
uint8_t c = (uint8_t)emit;
- grpc_error *err = append_string(p, &c, (&c) + 1);
+ grpc_error *err = append_string(exec_ctx, p, &c, (&c) + 1);
if (err != GRPC_ERROR_NONE) return err;
} else {
assert(emit == 256);
@@ -1351,42 +1419,45 @@ static grpc_error *huff_nibble(grpc_chttp2_hpack_parser *p, uint8_t nibble) {
}
/* decode full bytes from a huffman encoded stream */
-static grpc_error *add_huff_bytes(grpc_chttp2_hpack_parser *p,
+static grpc_error *add_huff_bytes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
for (; cur != end; ++cur) {
- grpc_error *err = huff_nibble(p, *cur >> 4);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- err = huff_nibble(p, *cur & 0xf);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+ grpc_error *err = huff_nibble(exec_ctx, p, *cur >> 4);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ err = huff_nibble(exec_ctx, p, *cur & 0xf);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
}
return GRPC_ERROR_NONE;
}
/* decode some string bytes based on the current decoding mode
(huffman or not) */
-static grpc_error *add_str_bytes(grpc_chttp2_hpack_parser *p,
+static grpc_error *add_str_bytes(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
if (p->huff) {
- return add_huff_bytes(p, cur, end);
+ return add_huff_bytes(exec_ctx, p, cur, end);
} else {
- return append_string(p, cur, end);
+ return append_string(exec_ctx, p, cur, end);
}
}
/* parse a string - tries to do large chunks at a time */
-static grpc_error *parse_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+static grpc_error *parse_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p, const uint8_t *cur,
const uint8_t *end) {
size_t remaining = p->strlen - p->strgot;
size_t given = (size_t)(end - cur);
if (remaining <= given) {
- grpc_error *err = add_str_bytes(p, cur, cur + remaining);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- err = finish_str(p, cur + remaining, end);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_next(p, cur + remaining, end);
+ grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + remaining);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ err = finish_str(exec_ctx, p, cur + remaining, end);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_next(exec_ctx, p, cur + remaining, end);
} else {
- grpc_error *err = add_str_bytes(p, cur, cur + given);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+ grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + given);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
GPR_ASSERT(given <= UINT32_MAX - p->strgot);
p->strgot += (uint32_t)given;
p->state = parse_string;
@@ -1395,7 +1466,8 @@ static grpc_error *parse_string(grpc_chttp2_hpack_parser *p, const uint8_t *cur,
}
/* begin parsing a string - performs setup, calls parse_string */
-static grpc_error *begin_parse_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end,
uint8_t binary,
grpc_chttp2_hpack_parser_string *str) {
@@ -1404,13 +1476,14 @@ static grpc_error *begin_parse_string(grpc_chttp2_hpack_parser *p,
p->parsing.str = str;
p->huff_state = 0;
p->binary = binary;
- return parse_string(p, cur, end);
+ return parse_string(exec_ctx, p, cur, end);
}
/* parse the key string */
-static grpc_error *parse_key_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end) {
- return begin_parse_string(p, cur, end, NOT_BINARY, &p->key);
+ return begin_parse_string(exec_ctx, p, cur, end, NOT_BINARY, &p->key);
}
/* check if a key represents a binary header or not */
@@ -1435,24 +1508,27 @@ static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
}
/* parse the value string */
-static grpc_error *parse_value_string(grpc_chttp2_hpack_parser *p,
+static grpc_error *parse_value_string(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *cur, const uint8_t *end,
bool is_binary) {
- return begin_parse_string(p, cur, end, is_binary ? B64_BYTE0 : NOT_BINARY,
- &p->value);
+ return begin_parse_string(exec_ctx, p, cur, end,
+ is_binary ? B64_BYTE0 : NOT_BINARY, &p->value);
}
static grpc_error *parse_value_string_with_indexed_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end) {
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
bool is_binary = false;
grpc_error *err = is_binary_indexed_header(p, &is_binary);
- if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
- return parse_value_string(p, cur, end, is_binary);
+ if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+ return parse_value_string(exec_ctx, p, cur, end, is_binary);
}
static grpc_error *parse_value_string_with_literal_key(
- grpc_chttp2_hpack_parser *p, const uint8_t *cur, const uint8_t *end) {
- return parse_value_string(p, cur, end, is_binary_literal_header(p));
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
+ const uint8_t *end) {
+ return parse_value_string(exec_ctx, p, cur, end, is_binary_literal_header(p));
}
/* PUBLIC INTERFACE */
@@ -1484,27 +1560,36 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p) {
gpr_free(p->value.str);
}
-grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
+grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end) {
/* TODO(ctiller): limit the distance of end from beg, and perform multiple
steps in the event of a large chunk of data to limit
stack space usage when no tail call optimization is
available */
- return p->state(p, beg, end);
+ return p->state(exec_ctx, p, beg, end);
}
-grpc_error *grpc_chttp2_header_parser_parse(
- grpc_exec_ctx *exec_ctx, void *hpack_parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last) {
+typedef void (*maybe_complete_func_type)(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+static const maybe_complete_func_type maybe_complete_funcs[] = {
+ grpc_chttp2_maybe_complete_recv_initial_metadata,
+ grpc_chttp2_maybe_complete_recv_trailing_metadata};
+
+grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *hpack_parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
- if (stream_parsing != NULL) {
- stream_parsing->stats.incoming.header_bytes += GPR_SLICE_LENGTH(slice);
+ if (s != NULL) {
+ s->stats.incoming.header_bytes += GPR_SLICE_LENGTH(slice);
}
grpc_error *error = grpc_chttp2_hpack_parser_parse(
- parser, GPR_SLICE_START_PTR(slice), GPR_SLICE_END_PTR(slice));
+ exec_ctx, parser, GPR_SLICE_START_PTR(slice), GPR_SLICE_END_PTR(slice));
if (error != GRPC_ERROR_NONE) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return error;
@@ -1517,20 +1602,19 @@ grpc_error *grpc_chttp2_header_parser_parse(
}
/* need to check for null stream: this can occur if we receive an invalid
stream id on a header */
- if (stream_parsing != NULL) {
+ if (s != NULL) {
if (parser->is_boundary) {
- if (stream_parsing->header_frames_received ==
- GPR_ARRAY_SIZE(stream_parsing->got_metadata_on_parse)) {
+ if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) {
return GRPC_ERROR_CREATE("Too many trailer frames");
}
- stream_parsing
- ->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
- stream_parsing->header_frames_received++;
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
+ s->published_metadata[s->header_frames_received] =
+ GRPC_METADATA_PUBLISHED_FROM_WIRE;
+ maybe_complete_funcs[s->header_frames_received](exec_ctx, t, s);
+ s->header_frames_received++;
}
if (parser->is_eof) {
- stream_parsing->received_close = 1;
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
+ GRPC_ERROR_NONE);
}
}
parser->on_header = NULL;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 78eb38db5e..0290c78d5a 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -45,7 +45,8 @@
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
- grpc_chttp2_hpack_parser *p, const uint8_t *beg, const uint8_t *end);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
+ const uint8_t *end);
typedef struct {
char *str;
@@ -55,7 +56,7 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
- void (*on_header)(void *user_data, grpc_mdelem *md);
+ void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem *md);
void *on_header_user_data;
grpc_error *last_error;
@@ -104,15 +105,17 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser *p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
/* returns 1 on success, 0 on error */
-grpc_error *grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser *p,
+grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_parser *p,
const uint8_t *beg,
const uint8_t *end);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
-grpc_error *grpc_chttp2_header_parser_parse(
- grpc_exec_ctx *exec_ctx, void *hpack_parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
+grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
+ void *hpack_parser,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ gpr_slice slice, int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 04b788b702..e0c4a1e925 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -53,31 +53,25 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/transport_impl.h"
-typedef struct grpc_chttp2_transport grpc_chttp2_transport;
-typedef struct grpc_chttp2_stream grpc_chttp2_stream;
-
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
- GRPC_CHTTP2_LIST_ALL_STREAMS,
- GRPC_CHTTP2_LIST_CHECK_READ_OPS,
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE,
GRPC_CHTTP2_LIST_WRITABLE,
GRPC_CHTTP2_LIST_WRITING,
- GRPC_CHTTP2_LIST_WRITTEN,
- GRPC_CHTTP2_LIST_PARSING_SEEN,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING,
GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT,
- /* streams waiting for the outgoing window in the writing path, they will be
- * merged to the stalled list or writable list under transport lock. */
- GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT,
/** streams that are waiting to start because there are too many concurrent
streams on the connection */
GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
STREAM_LIST_COUNT /* must be last */
} grpc_chttp2_stream_list_id;
+typedef enum {
+ GRPC_CHTTP2_WRITE_STATE_IDLE,
+ GRPC_CHTTP2_WRITE_STATE_WRITING,
+ GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
+ GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER,
+} grpc_chttp2_write_state;
+
/* deframer state for the overall http2 stream of bytes */
typedef enum {
/* prefix: one entry per http2 connection prefix byte */
@@ -144,6 +138,12 @@ typedef enum {
GRPC_NUM_SETTING_SETS
} grpc_chttp2_setting_set;
+typedef enum {
+ GRPC_CHTTP2_NO_GOAWAY_SEND,
+ GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED,
+ GRPC_CHTTP2_GOAWAY_SENT,
+} grpc_chttp2_sent_goaway_state;
+
/* Outstanding ping request data */
typedef struct grpc_chttp2_outstanding_ping {
uint8_t id[8];
@@ -152,6 +152,12 @@ typedef struct grpc_chttp2_outstanding_ping {
struct grpc_chttp2_outstanding_ping *prev;
} grpc_chttp2_outstanding_ping;
+typedef struct grpc_chttp2_write_cb {
+ int64_t call_at_byte;
+ grpc_closure *closure;
+ struct grpc_chttp2_write_cb *next;
+} grpc_chttp2_write_cb;
+
/* forward declared in frame_data.h */
struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
@@ -161,12 +167,13 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_chttp2_transport *transport;
grpc_chttp2_stream *stream;
- int is_tail;
+ bool is_tail;
gpr_mu slice_mu; // protects slices, on_next
gpr_slice_buffer slices;
grpc_closure *on_next;
gpr_slice *next;
+ uint32_t remaining_bytes;
struct {
grpc_closure closure;
@@ -178,12 +185,68 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_closure finished_action;
};
-typedef struct {
+struct grpc_chttp2_transport {
+ grpc_transport base; /* must be first */
+ gpr_refcount refs;
+ grpc_endpoint *ep;
+ char *peer_string;
+
+ grpc_combiner *combiner;
+
+ /** write execution state of the transport */
+ grpc_chttp2_write_state write_state;
+
+ /** is the transport destroying itself? */
+ uint8_t destroying;
+ /** has the upper layer closed the transport? */
+ uint8_t closed;
+
+ /** is there a read request to the endpoint outstanding? */
+ uint8_t endpoint_reading;
+
+ /** various lists of streams */
+ grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
+
+ /** maps stream id to grpc_chttp2_stream objects */
+ grpc_chttp2_stream_map stream_map;
+
+ grpc_closure write_action_begin_locked;
+ grpc_closure write_action;
+ grpc_closure write_action_end;
+ grpc_closure write_action_end_locked;
+
+ grpc_closure read_action_begin;
+ grpc_closure read_action_locked;
+
+ /** incoming read bytes */
+ gpr_slice_buffer read_buffer;
+
+ /** address to place a newly accepted stream - set and unset by
+ grpc_chttp2_parsing_accept_stream; used by init_stream to
+ publish the accepted server stream */
+ grpc_chttp2_stream **accepting_stream;
+
+ struct {
+ /* accept stream callback */
+ void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
+ grpc_transport *transport, const void *server_data);
+ void *accept_stream_user_data;
+
+ /** connectivity tracking */
+ grpc_connectivity_state_tracker state_tracker;
+ } channel_callback;
+
+ /** data to write now */
+ gpr_slice_buffer outbuf;
+ /** hpack encoding */
+ grpc_chttp2_hpack_compressor hpack_compressor;
+ int64_t outgoing_window;
+ /** is this a client? */
+ uint8_t is_client;
+
/** data to write next write */
gpr_slice_buffer qbuf;
- /** window available for us to send to peer */
- int64_t outgoing_window;
/** window available to announce to peer */
int64_t announce_incoming_window;
/** how much window would we like to have for incoming_window */
@@ -192,10 +255,8 @@ typedef struct {
/** have we seen a goaway */
uint8_t seen_goaway;
/** have we sent a goaway */
- uint8_t sent_goaway;
+ grpc_chttp2_sent_goaway_state sent_goaway_state;
- /** is this transport a client? */
- uint8_t is_client;
/** are the local settings dirty and need to be sent? */
uint8_t dirtied_local_settings;
/** have local settings been sent? */
@@ -212,49 +273,14 @@ typedef struct {
/** how far to lookahead in a stream? */
uint32_t stream_lookahead;
- /** last received stream id */
- uint32_t last_incoming_stream_id;
+ /** last new stream id */
+ uint32_t last_new_stream_id;
/** pings awaiting responses */
grpc_chttp2_outstanding_ping pings;
/** next payload for an outgoing ping */
uint64_t ping_counter;
- /** concurrent stream count: updated when not parsing,
- so this is a strict over-estimation on the client */
- uint32_t concurrent_stream_count;
-} grpc_chttp2_transport_global;
-
-typedef struct {
- /** data to write now */
- gpr_slice_buffer outbuf;
- /** hpack encoding */
- grpc_chttp2_hpack_compressor hpack_compressor;
- int64_t outgoing_window;
- /** is this a client? */
- uint8_t is_client;
- /** callback for when writing is done */
- grpc_closure done_cb;
- /** maximum frame size */
- uint32_t max_frame_size;
-} grpc_chttp2_transport_writing;
-
-struct grpc_chttp2_transport_parsing {
- /** is this transport a client? (boolean) */
- uint8_t is_client;
-
- /** were settings updated? */
- uint8_t settings_updated;
- /** was a settings ack received? */
- uint8_t settings_ack_received;
- /** was a goaway frame received? */
- uint8_t goaway_received;
-
- /** initial window change */
- int64_t initial_window_update;
-
- /** data to write later - after parsing */
- gpr_slice_buffer qbuf;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
/** simple one shot parsers */
@@ -267,13 +293,12 @@ struct grpc_chttp2_transport_parsing {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
+ /** initial window change */
+ int64_t initial_window_update;
+
/** window available for peer to send to us */
int64_t incoming_window;
- /** next stream id available at the time of beginning parsing */
- uint32_t next_stream_id;
- uint32_t last_incoming_stream_id;
-
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
uint8_t incoming_frame_type;
@@ -284,135 +309,54 @@ struct grpc_chttp2_transport_parsing {
uint32_t incoming_frame_size;
uint32_t incoming_stream_id;
- /* current max frame size */
- uint32_t max_frame_size;
-
/* active parser */
void *parser_data;
- grpc_chttp2_stream_parsing *incoming_stream;
+ grpc_chttp2_stream *incoming_stream;
grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last);
- /* received settings */
- uint32_t settings[GRPC_CHTTP2_NUM_SETTINGS];
- /* last settings that were sent */
- uint32_t last_sent_settings[GRPC_CHTTP2_NUM_SETTINGS];
-
/* goaway data */
grpc_status_code goaway_error;
uint32_t goaway_last_stream_index;
gpr_slice goaway_text;
- int64_t outgoing_window;
+ grpc_chttp2_write_cb *write_cb_pool;
+
+ /* if non-NULL, close the transport with this error when writes are finished
+ */
+ grpc_error *close_transport_on_writes_finished;
+
+ /* buffer pool state */
+ /** have we scheduled a benign cleanup? */
+ bool benign_reclaimer_registered;
+ /** have we scheduled a destructive cleanup? */
+ bool destructive_reclaimer_registered;
+ /** benign cleanup closure */
+ grpc_closure benign_reclaimer;
+ grpc_closure benign_reclaimer_locked;
+ /** destructive cleanup closure */
+ grpc_closure destructive_reclaimer;
+ grpc_closure destructive_reclaimer_locked;
};
typedef enum {
- /** no writing activity allowed */
- GRPC_CHTTP2_WRITES_CORKED,
- /** no writing activity */
- GRPC_CHTTP2_WRITING_INACTIVE,
- /** write has been requested and scheduled against the workqueue */
- GRPC_CHTTP2_WRITE_SCHEDULED,
- /** write has been initiated after being reaped from the workqueue */
- GRPC_CHTTP2_WRITING,
- /** write has been initiated, AND another write needs to be started once it's
- done */
- GRPC_CHTTP2_WRITING_STALE_WITH_POLLER,
- GRPC_CHTTP2_WRITING_STALE_NO_POLLER,
-} grpc_chttp2_write_state;
+ GRPC_METADATA_NOT_PUBLISHED,
+ GRPC_METADATA_SYNTHESIZED_FROM_FAKE,
+ GRPC_METADATA_PUBLISHED_FROM_WIRE,
+ GPRC_METADATA_PUBLISHED_AT_CLOSE
+} grpc_published_metadata_method;
-struct grpc_chttp2_transport {
- grpc_transport base; /* must be first */
- gpr_refcount refs;
- grpc_endpoint *ep;
- char *peer_string;
-
- /** when this drops to zero it's safe to shutdown the endpoint */
- gpr_refcount shutdown_ep_refs;
-
- struct {
- grpc_combiner *combiner;
-
- /** is a thread currently in the global lock */
- bool global_active;
- /** is a thread currently parsing */
- bool parsing_active;
- /** write execution state of the transport */
- grpc_chttp2_write_state write_state;
- /** has a check_read_ops been scheduled */
- bool check_read_ops_scheduled;
- } executor;
-
- /** is the transport destroying itself? */
- uint8_t destroying;
- /** has the upper layer closed the transport? */
- uint8_t closed;
-
- /** is there a read request to the endpoint outstanding? */
- uint8_t endpoint_reading;
-
- /** various lists of streams */
- grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
-
- /** global state for reading/writing */
- grpc_chttp2_transport_global global;
- /** state only accessible by the chain of execution that
- set writing_state >= GRPC_WRITING, and only by the writing closure
- chain. */
- grpc_chttp2_transport_writing writing;
- /** state only accessible by the chain of execution that
- set parsing_active=1 */
- grpc_chttp2_transport_parsing parsing;
-
- /** maps stream id to grpc_chttp2_stream objects;
- owned by the parsing thread when parsing */
- grpc_chttp2_stream_map parsing_stream_map;
-
- /** streams created by the client (possibly during parsing);
- merged with parsing_stream_map during unlock when no
- parsing is occurring */
- grpc_chttp2_stream_map new_stream_map;
-
- /** closure to execute writing */
- grpc_closure writing_action;
- /** closure to start reading from the endpoint */
- grpc_closure reading_action;
- grpc_closure reading_action_locked;
- grpc_closure post_parse_locked;
- /** closure to actually do parsing */
- grpc_closure parsing_action;
- /** closure to initiate writing */
- grpc_closure initiate_writing;
- /** closure to finish writing */
- grpc_closure terminate_writing;
- /** closure to flush read state up the stack */
- grpc_closure initiate_read_flush_locked;
-
- /** incoming read bytes */
- gpr_slice_buffer read_buffer;
-
- /** address to place a newly accepted stream - set and unset by
- grpc_chttp2_parsing_accept_stream; used by init_stream to
- publish the accepted server stream */
- grpc_chttp2_stream **accepting_stream;
-
- struct {
- /* accept stream callback */
- void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_transport *transport, const void *server_data);
- void *accept_stream_user_data;
+struct grpc_chttp2_stream {
+ grpc_chttp2_transport *t;
+ grpc_stream_refcount *refcount;
- /** connectivity tracking */
- grpc_connectivity_state_tracker state_tracker;
- } channel_callback;
+ grpc_closure destroy_stream;
+ void *destroy_stream_arg;
- /** Transport op to be applied post-parsing */
- grpc_transport_op *post_parsing_op;
-};
+ grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
+ uint8_t included[STREAM_LIST_COUNT];
-typedef struct {
/** HTTP2 stream id for this stream, or zero if one has not been assigned */
uint32_t id;
@@ -422,20 +366,22 @@ typedef struct {
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
uint32_t max_recv_bytes;
- /** The number of bytes the upper layer has offered to read but we have
- not yet announced to HTTP2 flow control.
- As the upper layers offer to read more bytes, this value increases.
- As we advertise incoming flow control window, this value decreases. */
- uint32_t unannounced_incoming_window_for_parse;
- uint32_t unannounced_incoming_window_for_writing;
/** things the upper layers would like to send */
grpc_metadata_batch *send_initial_metadata;
grpc_closure *send_initial_metadata_finished;
- grpc_byte_stream *send_message;
- grpc_closure *send_message_finished;
grpc_metadata_batch *send_trailing_metadata;
grpc_closure *send_trailing_metadata_finished;
+ grpc_byte_stream *fetching_send_message;
+ uint32_t fetched_send_message_length;
+ gpr_slice fetching_slice;
+ int64_t next_message_end_offset;
+ int64_t flow_controlled_bytes_written;
+ bool complete_fetch_covered_by_poller;
+ grpc_closure complete_fetch;
+ grpc_closure complete_fetch_locked;
+ grpc_closure *fetching_send_message_finished;
+
grpc_metadata_batch *recv_initial_metadata;
grpc_closure *recv_initial_metadata_ready;
grpc_byte_stream **recv_message;
@@ -455,95 +401,44 @@ typedef struct {
bool read_closed;
/** Are all published incoming byte streams closed. */
bool all_incoming_byte_streams_finished;
- /** Is this stream in the stream map. */
- bool in_stream_map;
/** Has this stream seen an error.
If true, then pending incoming frames can be thrown away. */
bool seen_error;
- bool exceeded_metadata_size;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
/** the error that resulted in this stream being write-closed */
grpc_error *write_closed_error;
- bool published_initial_metadata;
- bool published_trailing_metadata;
+ grpc_published_metadata_method published_metadata[2];
bool final_metadata_requested;
- grpc_chttp2_incoming_metadata_buffer received_initial_metadata;
- grpc_chttp2_incoming_metadata_buffer received_trailing_metadata;
+ grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
grpc_chttp2_incoming_frame_queue incoming_frames;
gpr_timespec deadline;
-} grpc_chttp2_stream_global;
-
-typedef struct {
- /** HTTP2 stream id for this stream, or zero if one has not been assigned */
- uint32_t id;
- uint8_t fetching;
- bool sent_initial_metadata;
- uint8_t sent_message;
- uint8_t sent_trailing_metadata;
- uint8_t read_closed;
- /** send this initial metadata */
- grpc_metadata_batch *send_initial_metadata;
- grpc_byte_stream *send_message;
- grpc_metadata_batch *send_trailing_metadata;
- int64_t outgoing_window;
- /** how much window should we announce? */
- uint32_t announce_window;
- gpr_slice_buffer flow_controlled_buffer;
- gpr_slice fetching_slice;
- size_t stream_fetched;
- grpc_closure finished_fetch;
- /** stats gathered during the write */
- grpc_transport_one_way_stats stats;
-} grpc_chttp2_stream_writing;
-struct grpc_chttp2_stream_parsing {
/** saw some stream level error */
grpc_error *forced_close_error;
- /** HTTP2 stream id for this stream, or zero if one has not been assigned */
- uint32_t id;
- /** has this stream received a close */
- uint8_t received_close;
/** how many header frames have we received? */
uint8_t header_frames_received;
- /** which metadata did we get (on this parse) */
- uint8_t got_metadata_on_parse[2];
- /** should we raise the seen_error flag in transport_global */
- bool seen_error;
- bool exceeded_metadata_size;
/** window available for peer to send to us */
int64_t incoming_window;
/** parsing state for data frames */
grpc_chttp2_data_parser data_parser;
- /** amount of window given */
- int64_t outgoing_window;
/** number of bytes received - reset at end of parse thread execution */
int64_t received_bytes;
- /** stats gathered during the parse */
- grpc_transport_stream_stats stats;
- /** incoming metadata */
- grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
-};
-
-struct grpc_chttp2_stream {
- grpc_chttp2_transport *t;
- grpc_stream_refcount *refcount;
- grpc_chttp2_stream_global global;
- grpc_chttp2_stream_writing writing;
- grpc_chttp2_stream_parsing parsing;
-
- grpc_closure init_stream;
- grpc_closure destroy_stream;
- void *destroy_stream_arg;
+ bool sent_initial_metadata;
+ bool sent_trailing_metadata;
+ /** how much window should we announce? */
+ uint32_t announce_window;
+ gpr_slice_buffer flow_controlled_buffer;
- grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
- uint8_t included[STREAM_LIST_COUNT];
+ grpc_chttp2_write_cb *on_write_finished_cbs;
+ grpc_chttp2_write_cb *finish_after_write;
+ size_t sending_bytes;
};
/** Transport writing call flow:
@@ -559,162 +454,71 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
+ grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes
- are required, and schedule them if so */
-int grpc_chttp2_unlocking_check_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_writing *writing);
-void grpc_chttp2_perform_writes(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
- grpc_endpoint *endpoint);
-void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
- void *transport_writing, grpc_error *error);
-void grpc_chttp2_cleanup_writing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_writing *writing);
-
-void grpc_chttp2_prepare_to_read(grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_parsing *parsing);
+ are required, and frame them if so */
+bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
+void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error);
+
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
-grpc_error *grpc_chttp2_perform_read(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice);
-void grpc_chttp2_publish_reads(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *global,
- grpc_chttp2_transport_parsing *parsing);
-
-bool grpc_chttp2_list_add_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
+grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, gpr_slice slice);
+
+bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
/** Get a writable stream
returns non-zero if there was a stream available */
-int grpc_chttp2_list_pop_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing);
+int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
bool grpc_chttp2_list_remove_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) GRPC_MUST_USE_RESULT;
-
-void grpc_chttp2_list_add_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-int grpc_chttp2_list_have_writing_streams(
- grpc_chttp2_transport_writing *transport_writing);
-int grpc_chttp2_list_pop_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing **stream_writing);
-
-void grpc_chttp2_list_add_written_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-int grpc_chttp2_list_pop_written_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing);
-
-void grpc_chttp2_list_add_parsing_seen_stream(
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing);
-int grpc_chttp2_list_pop_parsing_seen_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing);
-
-void grpc_chttp2_list_add_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-void grpc_chttp2_list_add_check_read_ops(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-bool grpc_chttp2_list_remove_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-void grpc_chttp2_list_add_writing_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-bool grpc_chttp2_list_flush_writing_stalled_by_transport(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing);
-
-void grpc_chttp2_list_add_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing);
-int grpc_chttp2_list_pop_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-void grpc_chttp2_list_remove_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-
-void grpc_chttp2_list_add_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-void grpc_chttp2_list_remove_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing);
-
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-void grpc_chttp2_list_add_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global);
-int grpc_chttp2_list_pop_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global);
-
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_lookup_stream(
- grpc_chttp2_transport_parsing *transport_parsing, uint32_t id);
-grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- uint32_t id);
-
-void grpc_chttp2_add_incoming_goaway(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- uint32_t goaway_error, gpr_slice goaway_text);
-
-void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-/* returns 1 if this is the last stream, 0 otherwise */
-int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
-int grpc_chttp2_has_streams(grpc_chttp2_transport *t);
-void grpc_chttp2_for_all_streams(
- grpc_chttp2_transport_global *transport_global, void *user_data,
- void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
- grpc_chttp2_stream_global *stream_global));
-
-void grpc_chttp2_parsing_become_skip_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-
-void grpc_chttp2_complete_closure_step(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, grpc_closure **pclosure,
- grpc_error *error);
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
+
+bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
+int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+
+void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+
+void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+
+void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s);
+void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+
+grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
+ uint32_t id);
+grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t id);
+
+void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ uint32_t goaway_error,
+ gpr_slice goaway_text);
+
+void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s,
+ grpc_closure **pclosure,
+ grpc_error *error, const char *desc);
#define GRPC_CHTTP2_CLIENT_CONNECT_STRING "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
@@ -805,57 +609,83 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *phase,
const char *var2, int is_client,
uint32_t stream_id, int64_t val1, int64_t val2);
-void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream,
+void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *stream,
grpc_status_code status, gpr_slice *details);
-void grpc_chttp2_mark_stream_closed(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global, int close_reads, int close_writes,
- grpc_error *error);
+void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int close_reads,
+ int close_writes, grpc_error *error);
void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global);
+ grpc_chttp2_transport *t);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
-#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
- grpc_chttp2_stream_ref(stream_global, reason)
-#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
- grpc_chttp2_stream_unref(exec_ctx, stream_global, reason)
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global,
- const char *reason);
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global,
+#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
+ grpc_chttp2_stream_ref(stream, reason)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream, reason)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
const char *reason);
#else
-#define GRPC_CHTTP2_STREAM_REF(stream_global, reason) \
- grpc_chttp2_stream_ref(stream_global)
-#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, reason) \
- grpc_chttp2_stream_unref(exec_ctx, stream_global)
-void grpc_chttp2_stream_ref(grpc_chttp2_stream_global *stream_global);
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_stream_global *stream_global);
+#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
+#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
+ grpc_chttp2_stream_unref(exec_ctx, stream)
+void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
+void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
+#endif
+
+//#define GRPC_CHTTP2_REFCOUNTING_DEBUG 1
+#ifdef GRPC_CHTTP2_REFCOUNTING_DEBUG
+#define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
+ grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
+#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
+ grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
+ const char *file, int line);
+#else
+#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
+#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
+void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
#endif
grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing, uint32_t frame_size,
- uint32_t flags, grpc_chttp2_incoming_frame_queue *add_to_queue);
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ uint32_t frame_size, uint32_t flags);
void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream *bs,
gpr_slice slice);
void grpc_chttp2_incoming_byte_stream_finished(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error, int from_parsing_thread);
+ grpc_error *error);
-void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_parsing *parsing,
+void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const uint8_t *opaque_8bytes);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global,
- bool covered_by_poller, const char *reason);
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, bool covered_by_poller,
+ const char *reason);
+
+void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ grpc_error *due_to_error);
+
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 0e6d579ba9..8005350ae7 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -45,227 +45,34 @@
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h"
-#define TRANSPORT_FROM_PARSING(tp) \
- ((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
- parsing)))
-
-static grpc_error *init_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_header_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_continuation);
-static grpc_error *init_data_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_rst_stream_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_settings_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_window_update_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_ping_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_goaway_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing);
-static grpc_error *init_skip_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_header);
-
-static grpc_error *parse_frame_slice(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice, int is_last);
-
-void grpc_chttp2_prepare_to_read(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing) {
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_parsing *stream_parsing;
-
- GPR_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
-
- transport_parsing->next_stream_id = transport_global->next_stream_id;
- memcpy(transport_parsing->last_sent_settings,
- transport_global->settings[GRPC_SENT_SETTINGS],
- sizeof(transport_parsing->last_sent_settings));
- transport_parsing->max_frame_size =
- transport_global->settings[GRPC_ACKED_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
-
- /* update the parsing view of incoming window */
- while (grpc_chttp2_list_pop_unannounced_incoming_window_available(
- transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- GRPC_CHTTP2_FLOW_MOVE_STREAM("parse", transport_parsing, stream_parsing,
- incoming_window, stream_global,
- unannounced_incoming_window_for_parse);
- }
-
- GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
-}
-
-void grpc_chttp2_publish_reads(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing) {
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_parsing *stream_parsing;
- int was_zero;
- int is_zero;
-
- /* transport_parsing->last_incoming_stream_id is used as
- last-grpc_chttp2_stream-id when
- sending GOAWAY frame.
- https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-6.8
- says that last-grpc_chttp2_stream-id is peer-initiated grpc_chttp2_stream
- ID. So,
- since we don't have server pushed streams, client should send
- GOAWAY last-grpc_chttp2_stream-id=0 in this case. */
- if (!transport_parsing->is_client) {
- transport_global->last_incoming_stream_id =
- transport_parsing->last_incoming_stream_id;
- }
-
- /* update global settings */
- if (transport_parsing->settings_updated) {
- memcpy(transport_global->settings[GRPC_PEER_SETTINGS],
- transport_parsing->settings, sizeof(transport_parsing->settings));
- transport_parsing->settings_updated = 0;
- }
-
- /* update settings based on ack if received */
- if (transport_parsing->settings_ack_received) {
- memcpy(transport_global->settings[GRPC_ACKED_SETTINGS],
- transport_global->settings[GRPC_SENT_SETTINGS],
- GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
- transport_parsing->settings_ack_received = 0;
- transport_global->sent_local_settings = 0;
- }
-
- /* move goaway to the global state if we received one (it will be
- published later */
- if (transport_parsing->goaway_received) {
- grpc_chttp2_add_incoming_goaway(exec_ctx, transport_global,
- (uint32_t)transport_parsing->goaway_error,
- transport_parsing->goaway_text);
- transport_parsing->goaway_text = gpr_empty_slice();
- transport_parsing->goaway_received = 0;
- }
-
- /* propagate flow control tokens to global state */
- was_zero = transport_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("parsed", transport_global, outgoing_window,
- transport_parsing, outgoing_window);
- is_zero = transport_global->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "new_global_flow_control");
- }
-
- if (transport_parsing->incoming_window <
- transport_global->connection_window_target * 3 / 4) {
- int64_t announce_bytes = transport_global->connection_window_target -
- transport_parsing->incoming_window;
- GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_global,
- announce_incoming_window, announce_bytes);
- GRPC_CHTTP2_FLOW_CREDIT_TRANSPORT("parsed", transport_parsing,
- incoming_window, announce_bytes);
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "global incoming window");
- }
-
- /* for each stream that saw an update, fixup global state */
- while (grpc_chttp2_list_pop_parsing_seen_stream(
- transport_global, transport_parsing, &stream_global, &stream_parsing)) {
- if (stream_parsing->seen_error) {
- stream_global->seen_error = true;
- stream_global->exceeded_metadata_size =
- stream_parsing->exceeded_metadata_size;
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
-
- /* flush stats to global stream state */
- grpc_transport_move_stats(&stream_parsing->stats, &stream_global->stats);
-
- /* update outgoing flow control window */
- was_zero = stream_global->outgoing_window <= 0;
- GRPC_CHTTP2_FLOW_MOVE_STREAM("parsed", transport_global, stream_global,
- outgoing_window, stream_parsing,
- outgoing_window);
- is_zero = stream_global->outgoing_window <= 0;
- if (was_zero && !is_zero) {
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- false, "stream.read_flow_control");
- }
-
- stream_global->max_recv_bytes -= (uint32_t)GPR_MIN(
- stream_global->max_recv_bytes, stream_parsing->received_bytes);
- stream_parsing->received_bytes = 0;
-
- /* publish incoming stream ops */
- if (stream_global->incoming_frames.tail != NULL) {
- stream_global->incoming_frames.tail->is_tail = 0;
- }
- if (stream_parsing->data_parser.incoming_frames.head != NULL) {
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
- grpc_chttp2_incoming_frame_queue_merge(
- &stream_global->incoming_frames,
- &stream_parsing->data_parser.incoming_frames);
- if (stream_global->incoming_frames.tail != NULL) {
- stream_global->incoming_frames.tail->is_tail = 1;
- }
-
- if (!stream_global->published_initial_metadata &&
- stream_parsing->got_metadata_on_parse[0]) {
- stream_parsing->got_metadata_on_parse[0] = 0;
- stream_global->published_initial_metadata = 1;
- GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
- stream_parsing->metadata_buffer[0],
- stream_global->received_initial_metadata);
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
- if (!stream_global->published_trailing_metadata &&
- stream_parsing->got_metadata_on_parse[1]) {
- stream_parsing->got_metadata_on_parse[1] = 0;
- stream_global->published_trailing_metadata = 1;
- GPR_SWAP(grpc_chttp2_incoming_metadata_buffer,
- stream_parsing->metadata_buffer[1],
- stream_global->received_trailing_metadata);
- grpc_chttp2_list_add_check_read_ops(exec_ctx, transport_global,
- stream_global);
- }
-
- if (stream_parsing->forced_close_error != GRPC_ERROR_NONE) {
- intptr_t reason;
- bool has_reason = grpc_error_get_int(stream_parsing->forced_close_error,
- GRPC_ERROR_INT_HTTP2_ERROR, &reason);
- if (has_reason && reason != GRPC_CHTTP2_NO_ERROR) {
- grpc_status_code status_code =
- has_reason
- ? grpc_chttp2_http2_error_to_grpc_status(
- (grpc_chttp2_error_code)reason, stream_global->deadline)
- : GRPC_STATUS_INTERNAL;
- const char *status_details =
- grpc_error_string(stream_parsing->forced_close_error);
- gpr_slice slice_details = gpr_slice_from_copied_string(status_details);
- grpc_error_free_string(status_details);
- grpc_chttp2_fake_status(exec_ctx, transport_global, stream_global,
- status_code, &slice_details);
- }
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
- 1, 1, stream_parsing->forced_close_error);
- }
-
- if (stream_parsing->received_close) {
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
- 1, 0, GRPC_ERROR_NONE);
- }
- }
-}
-
-grpc_error *grpc_chttp2_perform_read(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice) {
+static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_continuation);
+static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
+static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_header);
+
+static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, gpr_slice slice,
+ int is_last);
+
+grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ gpr_slice slice) {
uint8_t *beg = GPR_SLICE_START_PTR(slice);
uint8_t *end = GPR_SLICE_END_PTR(slice);
uint8_t *cur = beg;
@@ -273,7 +80,7 @@ grpc_error *grpc_chttp2_perform_read(
if (cur == end) return GRPC_ERROR_NONE;
- switch (transport_parsing->deframe_state) {
+ switch (t->deframe_state) {
case GRPC_DTS_CLIENT_PREFIX_0:
case GRPC_DTS_CLIENT_PREFIX_1:
case GRPC_DTS_CLIENT_PREFIX_2:
@@ -298,25 +105,22 @@ grpc_error *grpc_chttp2_perform_read(
case GRPC_DTS_CLIENT_PREFIX_21:
case GRPC_DTS_CLIENT_PREFIX_22:
case GRPC_DTS_CLIENT_PREFIX_23:
- while (cur != end && transport_parsing->deframe_state != GRPC_DTS_FH_0) {
- if (*cur != GRPC_CHTTP2_CLIENT_CONNECT_STRING[transport_parsing
- ->deframe_state]) {
+ while (cur != end && t->deframe_state != GRPC_DTS_FH_0) {
+ if (*cur != GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state]) {
char *msg;
gpr_asprintf(
&msg,
"Connect string mismatch: expected '%c' (%d) got '%c' (%d) "
"at byte %d",
- GRPC_CHTTP2_CLIENT_CONNECT_STRING[transport_parsing
- ->deframe_state],
- (int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING
- [transport_parsing->deframe_state],
- *cur, (int)*cur, transport_parsing->deframe_state);
+ GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
+ (int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
+ *cur, (int)*cur, t->deframe_state);
err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
++cur;
- ++transport_parsing->deframe_state;
+ ++t->deframe_state;
}
if (cur == end) {
return GRPC_ERROR_NONE;
@@ -325,100 +129,95 @@ grpc_error *grpc_chttp2_perform_read(
dts_fh_0:
case GRPC_DTS_FH_0:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size = ((uint32_t)*cur) << 16;
+ t->incoming_frame_size = ((uint32_t)*cur) << 16;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_1;
+ t->deframe_state = GRPC_DTS_FH_1;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_1:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size |= ((uint32_t)*cur) << 8;
+ t->incoming_frame_size |= ((uint32_t)*cur) << 8;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_2;
+ t->deframe_state = GRPC_DTS_FH_2;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_2:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_size |= *cur;
+ t->incoming_frame_size |= *cur;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_3;
+ t->deframe_state = GRPC_DTS_FH_3;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_3:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_type = *cur;
+ t->incoming_frame_type = *cur;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_4;
+ t->deframe_state = GRPC_DTS_FH_4;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_4:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_frame_flags = *cur;
+ t->incoming_frame_flags = *cur;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_5;
+ t->deframe_state = GRPC_DTS_FH_5;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_5:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id = (((uint32_t)*cur) & 0x7f) << 24;
+ t->incoming_stream_id = (((uint32_t)*cur) & 0x7f) << 24;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_6;
+ t->deframe_state = GRPC_DTS_FH_6;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_6:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((uint32_t)*cur) << 16;
+ t->incoming_stream_id |= ((uint32_t)*cur) << 16;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_7;
+ t->deframe_state = GRPC_DTS_FH_7;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_7:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((uint32_t)*cur) << 8;
+ t->incoming_stream_id |= ((uint32_t)*cur) << 8;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_8;
+ t->deframe_state = GRPC_DTS_FH_8;
return GRPC_ERROR_NONE;
}
/* fallthrough */
case GRPC_DTS_FH_8:
GPR_ASSERT(cur < end);
- transport_parsing->incoming_stream_id |= ((uint32_t)*cur);
- transport_parsing->deframe_state = GRPC_DTS_FRAME;
- err = init_frame_parser(exec_ctx, transport_parsing);
+ t->incoming_stream_id |= ((uint32_t)*cur);
+ t->deframe_state = GRPC_DTS_FRAME;
+ err = init_frame_parser(exec_ctx, t);
if (err != GRPC_ERROR_NONE) {
return err;
}
- if (transport_parsing->incoming_stream_id != 0 &&
- transport_parsing->incoming_stream_id >
- transport_parsing->last_incoming_stream_id) {
- transport_parsing->last_incoming_stream_id =
- transport_parsing->incoming_stream_id;
- }
- if (transport_parsing->incoming_frame_size == 0) {
- err = parse_frame_slice(exec_ctx, transport_parsing, gpr_empty_slice(),
- 1);
+ if (t->incoming_frame_size == 0) {
+ err = parse_frame_slice(exec_ctx, t, gpr_empty_slice(), 1);
if (err != GRPC_ERROR_NONE) {
return err;
}
- transport_parsing->incoming_stream = NULL;
+ t->incoming_stream = NULL;
if (++cur == end) {
- transport_parsing->deframe_state = GRPC_DTS_FH_0;
+ t->deframe_state = GRPC_DTS_FH_0;
return GRPC_ERROR_NONE;
}
goto dts_fh_0; /* loop */
- } else if (transport_parsing->incoming_frame_size >
- transport_parsing->max_frame_size) {
+ } else if (t->incoming_frame_size >
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) {
char *msg;
gpr_asprintf(&msg, "Frame size %d is larger than max frame size %d",
- transport_parsing->incoming_frame_size,
- transport_parsing->max_frame_size);
+ t->incoming_frame_size,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]);
err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
@@ -429,41 +228,39 @@ grpc_error *grpc_chttp2_perform_read(
/* fallthrough */
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
- if ((uint32_t)(end - cur) == transport_parsing->incoming_frame_size) {
- err = parse_frame_slice(exec_ctx, transport_parsing,
+ if ((uint32_t)(end - cur) == t->incoming_frame_size) {
+ err = parse_frame_slice(exec_ctx, t,
gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
1);
if (err != GRPC_ERROR_NONE) {
return err;
}
- transport_parsing->deframe_state = GRPC_DTS_FH_0;
- transport_parsing->incoming_stream = NULL;
+ t->deframe_state = GRPC_DTS_FH_0;
+ t->incoming_stream = NULL;
return GRPC_ERROR_NONE;
- } else if ((uint32_t)(end - cur) >
- transport_parsing->incoming_frame_size) {
+ } else if ((uint32_t)(end - cur) > t->incoming_frame_size) {
size_t cur_offset = (size_t)(cur - beg);
err = parse_frame_slice(
- exec_ctx, transport_parsing,
- gpr_slice_sub_no_ref(
- slice, cur_offset,
- cur_offset + transport_parsing->incoming_frame_size),
+ exec_ctx, t,
+ gpr_slice_sub_no_ref(slice, cur_offset,
+ cur_offset + t->incoming_frame_size),
1);
if (err != GRPC_ERROR_NONE) {
return err;
}
- cur += transport_parsing->incoming_frame_size;
- transport_parsing->incoming_stream = NULL;
+ cur += t->incoming_frame_size;
+ t->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
- err = parse_frame_slice(exec_ctx, transport_parsing,
+ err = parse_frame_slice(exec_ctx, t,
gpr_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
0);
if (err != GRPC_ERROR_NONE) {
return err;
}
- transport_parsing->incoming_frame_size -= (uint32_t)(end - cur);
+ t->incoming_frame_size -= (uint32_t)(end - cur);
return GRPC_ERROR_NONE;
}
GPR_UNREACHABLE_CODE(return 0);
@@ -472,175 +269,172 @@ grpc_error *grpc_chttp2_perform_read(
GPR_UNREACHABLE_CODE(return 0);
}
-static grpc_error *init_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- if (transport_parsing->is_first_frame &&
- transport_parsing->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
+static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (t->is_first_frame &&
+ t->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
char *msg;
gpr_asprintf(
&msg, "Expected SETTINGS frame as the first frame, got frame type %d",
- transport_parsing->incoming_frame_type);
+ t->incoming_frame_type);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- transport_parsing->is_first_frame = false;
- if (transport_parsing->expect_continuation_stream_id != 0) {
- if (transport_parsing->incoming_frame_type !=
- GRPC_CHTTP2_FRAME_CONTINUATION) {
+ t->is_first_frame = false;
+ if (t->expect_continuation_stream_id != 0) {
+ if (t->incoming_frame_type != GRPC_CHTTP2_FRAME_CONTINUATION) {
char *msg;
gpr_asprintf(&msg, "Expected CONTINUATION frame, got frame type %02x",
- transport_parsing->incoming_frame_type);
+ t->incoming_frame_type);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- if (transport_parsing->expect_continuation_stream_id !=
- transport_parsing->incoming_stream_id) {
+ if (t->expect_continuation_stream_id != t->incoming_stream_id) {
char *msg;
gpr_asprintf(
&msg,
"Expected CONTINUATION frame for grpc_chttp2_stream %08x, got "
"grpc_chttp2_stream %08x",
- transport_parsing->expect_continuation_stream_id,
- transport_parsing->incoming_stream_id);
+ t->expect_continuation_stream_id, t->incoming_stream_id);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- return init_header_frame_parser(exec_ctx, transport_parsing, 1);
+ return init_header_frame_parser(exec_ctx, t, 1);
}
- switch (transport_parsing->incoming_frame_type) {
+ switch (t->incoming_frame_type) {
case GRPC_CHTTP2_FRAME_DATA:
- return init_data_frame_parser(exec_ctx, transport_parsing);
+ return init_data_frame_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_HEADER:
- return init_header_frame_parser(exec_ctx, transport_parsing, 0);
+ return init_header_frame_parser(exec_ctx, t, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
return GRPC_ERROR_CREATE("Unexpected CONTINUATION frame");
case GRPC_CHTTP2_FRAME_RST_STREAM:
- return init_rst_stream_parser(exec_ctx, transport_parsing);
+ return init_rst_stream_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_SETTINGS:
- return init_settings_frame_parser(exec_ctx, transport_parsing);
+ return init_settings_frame_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
- return init_window_update_frame_parser(exec_ctx, transport_parsing);
+ return init_window_update_frame_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_PING:
- return init_ping_parser(exec_ctx, transport_parsing);
+ return init_ping_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_GOAWAY:
- return init_goaway_parser(exec_ctx, transport_parsing);
+ return init_goaway_parser(exec_ctx, t);
default:
if (grpc_http_trace) {
- gpr_log(GPR_ERROR, "Unknown frame type %02x",
- transport_parsing->incoming_frame_type);
+ gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
}
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
}
static grpc_error *skip_parser(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s,
gpr_slice slice, int is_last) {
return GRPC_ERROR_NONE;
}
-static void skip_header(void *tp, grpc_mdelem *md) { GRPC_MDELEM_UNREF(md); }
+static void skip_header(grpc_exec_ctx *exec_ctx, void *tp, grpc_mdelem *md) {
+ GRPC_MDELEM_UNREF(md);
+}
-static grpc_error *init_skip_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_header) {
+static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_header) {
if (is_header) {
- uint8_t is_eoh = transport_parsing->expect_continuation_stream_id != 0;
- transport_parsing->parser = grpc_chttp2_header_parser_parse;
- transport_parsing->parser_data = &transport_parsing->hpack_parser;
- transport_parsing->hpack_parser.on_header = skip_header;
- transport_parsing->hpack_parser.on_header_user_data = NULL;
- transport_parsing->hpack_parser.is_boundary = is_eoh;
- transport_parsing->hpack_parser.is_eof =
- (uint8_t)(is_eoh ? transport_parsing->header_eof : 0);
+ uint8_t is_eoh = t->expect_continuation_stream_id != 0;
+ t->parser = grpc_chttp2_header_parser_parse;
+ t->parser_data = &t->hpack_parser;
+ t->hpack_parser.on_header = skip_header;
+ t->hpack_parser.on_header_user_data = NULL;
+ t->hpack_parser.is_boundary = is_eoh;
+ t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0);
} else {
- transport_parsing->parser = skip_parser;
+ t->parser = skip_parser;
}
return GRPC_ERROR_NONE;
}
-void grpc_chttp2_parsing_become_skip_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- init_skip_frame_parser(
- exec_ctx, transport_parsing,
- transport_parsing->parser == grpc_chttp2_header_parser_parse);
+void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ init_skip_frame_parser(exec_ctx, t,
+ t->parser == grpc_chttp2_header_parser_parse);
}
-static grpc_error *update_incoming_window(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing) {
- uint32_t incoming_frame_size = transport_parsing->incoming_frame_size;
- if (incoming_frame_size > transport_parsing->incoming_window) {
- char *msg;
- gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_window);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
- gpr_free(msg);
- return err;
- }
-
- if (incoming_frame_size > stream_parsing->incoming_window) {
+static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ uint32_t incoming_frame_size = t->incoming_frame_size;
+ if (incoming_frame_size > t->incoming_window) {
char *msg;
gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
- transport_parsing->incoming_frame_size,
- stream_parsing->incoming_window);
+ t->incoming_frame_size, t->incoming_window);
grpc_error *err = GRPC_ERROR_CREATE(msg);
gpr_free(msg);
return err;
}
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", transport_parsing, incoming_window,
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("parse", t, incoming_window,
incoming_frame_size);
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", transport_parsing, stream_parsing,
- incoming_window, incoming_frame_size);
- stream_parsing->received_bytes += incoming_frame_size;
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
+ if (s != NULL) {
+ if (incoming_frame_size > s->incoming_window) {
+ char *msg;
+ gpr_asprintf(&msg,
+ "frame of size %d overflows incoming window of %" PRId64,
+ t->incoming_frame_size, s->incoming_window);
+ grpc_error *err = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return err;
+ }
+
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("parse", t, s, incoming_window,
+ incoming_frame_size);
+ s->received_bytes += incoming_frame_size;
+ s->max_recv_bytes -=
+ (uint32_t)GPR_MIN(s->max_recv_bytes, incoming_frame_size);
+ }
return GRPC_ERROR_NONE;
}
-static grpc_error *init_data_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- grpc_chttp2_stream_parsing *stream_parsing =
- grpc_chttp2_parsing_lookup_stream(transport_parsing,
- transport_parsing->incoming_stream_id);
+static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_stream *s =
+ grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
grpc_error *err = GRPC_ERROR_NONE;
- if (stream_parsing == NULL) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ err = update_incoming_window(exec_ctx, t, s);
+ if (err != GRPC_ERROR_NONE) {
+ goto error_handler;
}
- stream_parsing->stats.incoming.framing_bytes += 9;
- if (stream_parsing->received_close) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ if (s == NULL) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
- if (err == GRPC_ERROR_NONE) {
- err = update_incoming_window(exec_ctx, transport_parsing, stream_parsing);
+ s->stats.incoming.framing_bytes += 9;
+ if (err == GRPC_ERROR_NONE && s->read_closed) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
if (err == GRPC_ERROR_NONE) {
- err = grpc_chttp2_data_parser_begin_frame(
- &stream_parsing->data_parser, transport_parsing->incoming_frame_flags,
- stream_parsing->id);
+ err = grpc_chttp2_data_parser_begin_frame(&s->data_parser,
+ t->incoming_frame_flags, s->id);
}
+error_handler:
if (err == GRPC_ERROR_NONE) {
- transport_parsing->incoming_stream = stream_parsing;
- transport_parsing->parser = grpc_chttp2_data_parser_parse;
- transport_parsing->parser_data = &stream_parsing->data_parser;
+ t->incoming_stream = s;
+ t->parser = grpc_chttp2_data_parser_parse;
+ t->parser_data = &s->data_parser;
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
- stream_parsing->received_close = 1;
- stream_parsing->forced_close_error = err;
+ if (s != NULL) {
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, err);
+ }
gpr_slice_buffer_add(
- &transport_parsing->qbuf,
- grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
- GRPC_CHTTP2_PROTOCOL_ERROR,
- &stream_parsing->stats.outgoing));
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
+ GRPC_CHTTP2_PROTOCOL_ERROR,
+ &s->stats.outgoing));
+ return init_skip_frame_parser(exec_ctx, t, 0);
} else {
return err;
}
@@ -648,23 +442,22 @@ static grpc_error *init_data_frame_parser(
static void free_timeout(void *p) { gpr_free(p); }
-static void on_initial_header(void *tp, grpc_mdelem *md) {
- grpc_chttp2_transport_parsing *transport_parsing = tp;
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream;
+static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_mdelem *md) {
+ grpc_chttp2_transport *t = tp;
+ grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
- GPR_ASSERT(stream_parsing);
+ GPR_ASSERT(s != NULL);
GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", stream_parsing->id,
- transport_parsing->is_client ? "CLI" : "SVR",
+ GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
/* TODO(ctiller): check for a status like " 0" */
- stream_parsing->seen_error = true;
+ s->seen_error = true;
}
if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
@@ -681,306 +474,273 @@ static void on_initial_header(void *tp, grpc_mdelem *md) {
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
grpc_chttp2_incoming_metadata_buffer_set_deadline(
- &stream_parsing->metadata_buffer[0],
+ &s->metadata_buffer[0],
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
GRPC_MDELEM_UNREF(md);
} else {
- const size_t new_size =
- stream_parsing->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
- grpc_chttp2_transport_global *transport_global =
- &TRANSPORT_FROM_PARSING(transport_parsing)->global;
+ const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
const size_t metadata_size_limit =
- transport_global->settings[GRPC_LOCAL_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (new_size > metadata_size_limit) {
- if (!stream_parsing->exceeded_metadata_size) {
- gpr_log(GPR_DEBUG,
- "received initial metadata size exceeds limit (%" PRIuPTR
- " vs. %" PRIuPTR ")",
- new_size, metadata_size_limit);
- stream_parsing->seen_error = true;
- stream_parsing->exceeded_metadata_size = true;
- }
+ gpr_log(GPR_DEBUG,
+ "received initial metadata size exceeds limit (%" PRIuPTR
+ " vs. %" PRIuPTR ")",
+ new_size, metadata_size_limit);
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE("received initial metadata size exceeds limit"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
GRPC_MDELEM_UNREF(md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(
- &stream_parsing->metadata_buffer[0], md);
+ grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md);
}
}
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
-
GPR_TIMER_END("on_initial_header", 0);
}
-static void on_trailing_header(void *tp, grpc_mdelem *md) {
- grpc_chttp2_transport_parsing *transport_parsing = tp;
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream;
+static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
+ grpc_mdelem *md) {
+ grpc_chttp2_transport *t = tp;
+ grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
- GPR_ASSERT(stream_parsing);
+ GPR_ASSERT(s != NULL);
GRPC_CHTTP2_IF_TRACING(gpr_log(
- GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", stream_parsing->id,
- transport_parsing->is_client ? "CLI" : "SVR",
+ GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
/* TODO(ctiller): check for a status like " 0" */
- stream_parsing->seen_error = true;
+ s->seen_error = true;
}
- const size_t new_size =
- stream_parsing->metadata_buffer[1].size + GRPC_MDELEM_LENGTH(md);
- grpc_chttp2_transport_global *transport_global =
- &TRANSPORT_FROM_PARSING(transport_parsing)->global;
+ const size_t new_size = s->metadata_buffer[1].size + GRPC_MDELEM_LENGTH(md);
const size_t metadata_size_limit =
- transport_global->settings[GRPC_LOCAL_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (new_size > metadata_size_limit) {
- if (!stream_parsing->exceeded_metadata_size) {
- gpr_log(GPR_DEBUG,
- "received trailing metadata size exceeds limit (%" PRIuPTR
- " vs. %" PRIuPTR ")",
- new_size, metadata_size_limit);
- stream_parsing->seen_error = true;
- stream_parsing->exceeded_metadata_size = true;
- }
+ gpr_log(GPR_DEBUG,
+ "received trailing metadata size exceeds limit (%" PRIuPTR
+ " vs. %" PRIuPTR ")",
+ new_size, metadata_size_limit);
+ grpc_chttp2_cancel_stream(
+ exec_ctx, t, s,
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE("received trailing metadata size exceeds limit"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
GRPC_MDELEM_UNREF(md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(
- &stream_parsing->metadata_buffer[1], md);
+ grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md);
}
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);
-
GPR_TIMER_END("on_trailing_header", 0);
}
-static grpc_error *init_header_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- int is_continuation) {
- uint8_t is_eoh = (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
- int via_accept = 0;
- grpc_chttp2_stream_parsing *stream_parsing;
+static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ int is_continuation) {
+ uint8_t is_eoh =
+ (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
+ grpc_chttp2_stream *s;
/* TODO(ctiller): when to increment header_frames_received? */
if (is_eoh) {
- transport_parsing->expect_continuation_stream_id = 0;
+ t->expect_continuation_stream_id = 0;
} else {
- transport_parsing->expect_continuation_stream_id =
- transport_parsing->incoming_stream_id;
+ t->expect_continuation_stream_id = t->incoming_stream_id;
}
if (!is_continuation) {
- transport_parsing->header_eof = (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
+ t->header_eof =
+ (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
- stream_parsing = grpc_chttp2_parsing_lookup_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
- if (stream_parsing == NULL) {
+ s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
+ if (s == NULL) {
if (is_continuation) {
- gpr_log(GPR_ERROR,
- "grpc_chttp2_stream disbanded before CONTINUATION received");
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_ERROR,
+ "grpc_chttp2_stream disbanded before CONTINUATION received"));
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- if (transport_parsing->is_client) {
- if ((transport_parsing->incoming_stream_id & 1) &&
- transport_parsing->incoming_stream_id <
- transport_parsing->next_stream_id) {
+ if (t->is_client) {
+ if ((t->incoming_stream_id & 1) &&
+ t->incoming_stream_id < t->next_stream_id) {
/* this is an old (probably cancelled) grpc_chttp2_stream */
} else {
- gpr_log(GPR_ERROR,
- "ignoring new grpc_chttp2_stream creation on client");
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
}
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
- } else if (transport_parsing->last_incoming_stream_id >
- transport_parsing->incoming_stream_id) {
- gpr_log(GPR_ERROR,
- "ignoring out of order new grpc_chttp2_stream request on server; "
- "last grpc_chttp2_stream "
- "id=%d, new grpc_chttp2_stream id=%d",
- transport_parsing->last_incoming_stream_id,
- transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
- } else if ((transport_parsing->incoming_stream_id & 1) == 0) {
- gpr_log(GPR_ERROR,
- "ignoring grpc_chttp2_stream with non-client generated index %d",
- transport_parsing->incoming_stream_id);
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, t, 1);
+ } else if (t->last_new_stream_id >= t->incoming_stream_id) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR,
+ "ignoring out of order new grpc_chttp2_stream request on server; "
+ "last grpc_chttp2_stream "
+ "id=%d, new grpc_chttp2_stream id=%d",
+ t->last_new_stream_id, t->incoming_stream_id));
+ return init_skip_frame_parser(exec_ctx, t, 1);
+ } else if ((t->incoming_stream_id & 1) == 0) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR,
+ "ignoring grpc_chttp2_stream with non-client generated index %d",
+ t->incoming_stream_id));
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- stream_parsing = transport_parsing->incoming_stream =
- grpc_chttp2_parsing_accept_stream(
- exec_ctx, transport_parsing, transport_parsing->incoming_stream_id);
- if (stream_parsing == NULL) {
- gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted");
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ t->last_new_stream_id = t->incoming_stream_id;
+ s = t->incoming_stream =
+ grpc_chttp2_parsing_accept_stream(exec_ctx, t, t->incoming_stream_id);
+ if (s == NULL) {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- via_accept = 1;
} else {
- transport_parsing->incoming_stream = stream_parsing;
- }
- GPR_ASSERT(stream_parsing != NULL && (via_accept == 0 || via_accept == 1));
- stream_parsing->stats.incoming.framing_bytes += 9;
- if (stream_parsing->received_close) {
- gpr_log(GPR_ERROR, "skipping already closed grpc_chttp2_stream header");
- transport_parsing->incoming_stream = NULL;
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
- }
- transport_parsing->parser = grpc_chttp2_header_parser_parse;
- transport_parsing->parser_data = &transport_parsing->hpack_parser;
- switch (stream_parsing->header_frames_received) {
+ t->incoming_stream = s;
+ }
+ GPR_ASSERT(s != NULL);
+ s->stats.incoming.framing_bytes += 9;
+ if (s->read_closed) {
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_ERROR, "skipping already closed grpc_chttp2_stream header"));
+ t->incoming_stream = NULL;
+ return init_skip_frame_parser(exec_ctx, t, 1);
+ }
+ t->parser = grpc_chttp2_header_parser_parse;
+ t->parser_data = &t->hpack_parser;
+ switch (s->header_frames_received) {
case 0:
- transport_parsing->hpack_parser.on_header = on_initial_header;
+ t->hpack_parser.on_header = on_initial_header;
break;
case 1:
- transport_parsing->hpack_parser.on_header = on_trailing_header;
+ t->hpack_parser.on_header = on_trailing_header;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
- return init_skip_frame_parser(exec_ctx, transport_parsing, 1);
+ return init_skip_frame_parser(exec_ctx, t, 1);
}
- transport_parsing->hpack_parser.on_header_user_data = transport_parsing;
- transport_parsing->hpack_parser.is_boundary = is_eoh;
- transport_parsing->hpack_parser.is_eof =
- (uint8_t)(is_eoh ? transport_parsing->header_eof : 0);
- if (!is_continuation && (transport_parsing->incoming_frame_flags &
- GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
- grpc_chttp2_hpack_parser_set_has_priority(&transport_parsing->hpack_parser);
+ t->hpack_parser.on_header_user_data = t;
+ t->hpack_parser.is_boundary = is_eoh;
+ t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0);
+ if (!is_continuation &&
+ (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
+ grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
}
return GRPC_ERROR_NONE;
}
-static grpc_error *init_window_update_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_window_update_parser_begin_frame(
- &transport_parsing->simple.window_update,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->simple.window_update, t->incoming_frame_size,
+ t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- if (transport_parsing->incoming_stream_id != 0) {
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
- if (stream_parsing == NULL) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
+ if (t->incoming_stream_id != 0) {
+ grpc_chttp2_stream *s = t->incoming_stream =
+ grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
+ if (s == NULL) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
}
- stream_parsing->stats.incoming.framing_bytes += 9;
+ s->stats.incoming.framing_bytes += 9;
}
- transport_parsing->parser = grpc_chttp2_window_update_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.window_update;
+ t->parser = grpc_chttp2_window_update_parser_parse;
+ t->parser_data = &t->simple.window_update;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_ping_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_ping_parser_begin_frame(
- &transport_parsing->simple.ping, transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->simple.ping, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- transport_parsing->parser = grpc_chttp2_ping_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.ping;
+ t->parser = grpc_chttp2_ping_parser_parse;
+ t->parser_data = &t->simple.ping;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_rst_stream_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_rst_stream_parser_begin_frame(
- &transport_parsing->simple.rst_stream,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->simple.rst_stream, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream = grpc_chttp2_parsing_lookup_stream(
- transport_parsing, transport_parsing->incoming_stream_id);
- if (!transport_parsing->incoming_stream) {
- return init_skip_frame_parser(exec_ctx, transport_parsing, 0);
- }
- stream_parsing->stats.incoming.framing_bytes += 9;
- transport_parsing->parser = grpc_chttp2_rst_stream_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.rst_stream;
+ grpc_chttp2_stream *s = t->incoming_stream =
+ grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
+ if (!t->incoming_stream) {
+ return init_skip_frame_parser(exec_ctx, t, 0);
+ }
+ s->stats.incoming.framing_bytes += 9;
+ t->parser = grpc_chttp2_rst_stream_parser_parse;
+ t->parser_data = &t->simple.rst_stream;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_goaway_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
+static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
grpc_error *err = grpc_chttp2_goaway_parser_begin_frame(
- &transport_parsing->goaway_parser, transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags);
+ &t->goaway_parser, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- transport_parsing->parser = grpc_chttp2_goaway_parser_parse;
- transport_parsing->parser_data = &transport_parsing->goaway_parser;
+ t->parser = grpc_chttp2_goaway_parser_parse;
+ t->parser_data = &t->goaway_parser;
return GRPC_ERROR_NONE;
}
-static grpc_error *init_settings_frame_parser(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing) {
- if (transport_parsing->incoming_stream_id != 0) {
+static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ if (t->incoming_stream_id != 0) {
return GRPC_ERROR_CREATE("Settings frame received for grpc_chttp2_stream");
}
grpc_error *err = grpc_chttp2_settings_parser_begin_frame(
- &transport_parsing->simple.settings,
- transport_parsing->incoming_frame_size,
- transport_parsing->incoming_frame_flags, transport_parsing->settings);
+ &t->simple.settings, t->incoming_frame_size, t->incoming_frame_flags,
+ t->settings[GRPC_PEER_SETTINGS]);
if (err != GRPC_ERROR_NONE) {
return err;
}
- if (transport_parsing->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
- transport_parsing->settings_ack_received = 1;
+ if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
+ memcpy(t->settings[GRPC_ACKED_SETTINGS], t->settings[GRPC_SENT_SETTINGS],
+ GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
grpc_chttp2_hptbl_set_max_bytes(
- &transport_parsing->hpack_parser.table,
- transport_parsing
- ->last_sent_settings[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
- transport_parsing->max_frame_size =
- transport_parsing
- ->last_sent_settings[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
+ &t->hpack_parser.table,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+ t->sent_local_settings = 0;
}
- transport_parsing->parser = grpc_chttp2_settings_parser_parse;
- transport_parsing->parser_data = &transport_parsing->simple.settings;
+ t->parser = grpc_chttp2_settings_parser_parse;
+ t->parser_data = &t->simple.settings;
return GRPC_ERROR_NONE;
}
-/*
-static int is_window_update_legal(int64_t window_update, int64_t window) {
- return window + window_update < MAX_WINDOW;
-}
-*/
-
-static grpc_error *parse_frame_slice(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_parsing *transport_parsing,
- gpr_slice slice, int is_last) {
- grpc_chttp2_stream_parsing *stream_parsing =
- transport_parsing->incoming_stream;
- grpc_error *err = transport_parsing->parser(
- exec_ctx, transport_parsing->parser_data, transport_parsing,
- stream_parsing, slice, is_last);
+static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t, gpr_slice slice,
+ int is_last) {
+ grpc_chttp2_stream *s = t->incoming_stream;
+ grpc_error *err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last);
if (err == GRPC_ERROR_NONE) {
- if (stream_parsing) {
- grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
- stream_parsing);
- }
- return GRPC_ERROR_NONE;
+ return err;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
if (grpc_http_trace) {
const char *msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
grpc_error_free_string(msg);
}
- grpc_chttp2_parsing_become_skip_parser(exec_ctx, transport_parsing);
- if (stream_parsing) {
- stream_parsing->forced_close_error = err;
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ if (s) {
+ s->forced_close_error = err;
gpr_slice_buffer_add(
- &transport_parsing->qbuf,
- grpc_chttp2_rst_stream_create(transport_parsing->incoming_stream_id,
- GRPC_CHTTP2_PROTOCOL_ERROR,
- &stream_parsing->stats.outgoing));
+ &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
+ GRPC_CHTTP2_PROTOCOL_ERROR,
+ &s->stats.outgoing));
} else {
GRPC_ERROR_UNREF(err);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c
index 4dc4968248..6d25b3ae57 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.c
@@ -35,27 +35,6 @@
#include <grpc/support/log.h>
-#define TRANSPORT_FROM_GLOBAL(tg) \
- ((grpc_chttp2_transport *)((char *)(tg)-offsetof(grpc_chttp2_transport, \
- global)))
-
-#define STREAM_FROM_GLOBAL(sg) \
- ((grpc_chttp2_stream *)((char *)(sg)-offsetof(grpc_chttp2_stream, global)))
-
-#define TRANSPORT_FROM_WRITING(tw) \
- ((grpc_chttp2_transport *)((char *)(tw)-offsetof(grpc_chttp2_transport, \
- writing)))
-
-#define STREAM_FROM_WRITING(sw) \
- ((grpc_chttp2_stream *)((char *)(sw)-offsetof(grpc_chttp2_stream, writing)))
-
-#define TRANSPORT_FROM_PARSING(tp) \
- ((grpc_chttp2_transport *)((char *)(tp)-offsetof(grpc_chttp2_transport, \
- parsing)))
-
-#define STREAM_FROM_PARSING(sp) \
- ((grpc_chttp2_stream *)((char *)(sp)-offsetof(grpc_chttp2_stream, parsing)))
-
/* core list management */
static int stream_list_empty(grpc_chttp2_transport *t,
@@ -139,321 +118,57 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
-bool grpc_chttp2_list_add_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- GPR_ASSERT(stream_global->id != 0);
- return stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WRITABLE);
-}
-
-int grpc_chttp2_list_pop_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_WRITABLE);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_writing = &stream->writing;
- }
- return r;
-}
-
-bool grpc_chttp2_list_remove_writable_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WRITABLE);
-}
-
-void grpc_chttp2_list_add_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- GPR_ASSERT(stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_WRITING));
-}
-
-int grpc_chttp2_list_have_writing_streams(
- grpc_chttp2_transport_writing *transport_writing) {
- return !stream_list_empty(TRANSPORT_FROM_WRITING(transport_writing),
- GRPC_CHTTP2_LIST_WRITING);
-}
-
-int grpc_chttp2_list_pop_writing_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing **stream_writing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
- GRPC_CHTTP2_LIST_WRITING);
- if (r != 0) {
- *stream_writing = &stream->writing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_written_stream(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_WRITTEN);
-}
-
-int grpc_chttp2_list_pop_written_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_writing **stream_writing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_WRITING(transport_writing), &stream,
- GRPC_CHTTP2_LIST_WRITTEN);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_writing = &stream->writing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- GPR_ASSERT(stream_global->id != 0);
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
-}
-
-void grpc_chttp2_list_remove_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_maybe_remove(
- TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
-}
-
-int grpc_chttp2_list_pop_unannounced_incoming_window_available(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing) {
- grpc_chttp2_stream *stream;
- int r =
- stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_UNANNOUNCED_INCOMING_WINDOW_AVAILABLE);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_parsing_seen_stream(
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_parsing *stream_parsing) {
- stream_list_add(TRANSPORT_FROM_PARSING(transport_parsing),
- STREAM_FROM_PARSING(stream_parsing),
- GRPC_CHTTP2_LIST_PARSING_SEEN);
-}
-
-int grpc_chttp2_list_pop_parsing_seen_stream(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_parsing *transport_parsing,
- grpc_chttp2_stream_global **stream_global,
- grpc_chttp2_stream_parsing **stream_parsing) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_PARSING(transport_parsing), &stream,
- GRPC_CHTTP2_LIST_PARSING_SEEN);
- if (r != 0) {
- *stream_global = &stream->global;
- *stream_parsing = &stream->parsing;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
-}
-
-int grpc_chttp2_list_pop_waiting_for_concurrency(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_check_read_ops(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
- if (!t->executor.check_read_ops_scheduled) {
- grpc_combiner_execute_finally(exec_ctx, t->executor.combiner,
- &t->initiate_read_flush_locked,
- GRPC_ERROR_NONE, false);
- t->executor.check_read_ops_scheduled = true;
- }
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CHECK_READ_OPS);
-}
-
-bool grpc_chttp2_list_remove_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- return stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CHECK_READ_OPS);
+bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ GPR_ASSERT(s->id != 0);
+ return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-int grpc_chttp2_list_pop_check_read_ops(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CHECK_READ_OPS);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
-}
-
-void grpc_chttp2_list_add_writing_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- grpc_chttp2_stream *stream = STREAM_FROM_WRITING(stream_writing);
- gpr_log(GPR_DEBUG, "writing stalled %d", stream->global.id);
- if (!stream->included[GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT]) {
- GRPC_CHTTP2_STREAM_REF(&stream->global, "chttp2_writing_stalled");
- }
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing), stream,
- GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT);
+int grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-bool grpc_chttp2_list_flush_writing_stalled_by_transport(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing) {
- grpc_chttp2_stream *stream;
- bool out = false;
- grpc_chttp2_transport *transport = TRANSPORT_FROM_WRITING(transport_writing);
- while (stream_list_pop(transport, &stream,
- GRPC_CHTTP2_LIST_WRITING_STALLED_BY_TRANSPORT)) {
- gpr_log(GPR_DEBUG, "move %d from writing stalled to just stalled",
- stream->global.id);
- grpc_chttp2_list_add_stalled_by_transport(transport_writing,
- &stream->writing);
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, &stream->global,
- "chttp2_writing_stalled");
- out = true;
- }
- return out;
+bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-void grpc_chttp2_list_add_stalled_by_transport(
- grpc_chttp2_transport_writing *transport_writing,
- grpc_chttp2_stream_writing *stream_writing) {
- gpr_log(GPR_DEBUG, "stalled %d", stream_writing->id);
- stream_list_add(TRANSPORT_FROM_WRITING(transport_writing),
- STREAM_FROM_WRITING(stream_writing),
- GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
+bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
-int grpc_chttp2_list_pop_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
+int grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
+ return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
-void grpc_chttp2_list_remove_stalled_by_transport(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_maybe_remove(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
+int grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
-void grpc_chttp2_list_add_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
+void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-int grpc_chttp2_list_pop_closed_waiting_for_parsing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_PARSING);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
+int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-void grpc_chttp2_list_add_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global *stream_global) {
- stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
- STREAM_FROM_GLOBAL(stream_global),
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
+void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-int grpc_chttp2_list_pop_closed_waiting_for_writing(
- grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_stream_global **stream_global) {
- grpc_chttp2_stream *stream;
- int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
- GRPC_CHTTP2_LIST_CLOSED_WAITING_FOR_WRITING);
- if (r != 0) {
- *stream_global = &stream->global;
- }
- return r;
+int grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream **s) {
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-void grpc_chttp2_register_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- stream_list_add_tail(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
-}
-
-int grpc_chttp2_unregister_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_ALL_STREAMS);
- return stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
-}
-
-int grpc_chttp2_has_streams(grpc_chttp2_transport *t) {
- return !stream_list_empty(t, GRPC_CHTTP2_LIST_ALL_STREAMS);
-}
-
-void grpc_chttp2_for_all_streams(
- grpc_chttp2_transport_global *transport_global, void *user_data,
- void (*cb)(grpc_chttp2_transport_global *transport_global, void *user_data,
- grpc_chttp2_stream_global *stream_global)) {
- grpc_chttp2_stream *s;
- grpc_chttp2_transport *t = TRANSPORT_FROM_GLOBAL(transport_global);
- for (s = t->lists[GRPC_CHTTP2_LIST_ALL_STREAMS].head; s != NULL;
- s = s->links[GRPC_CHTTP2_LIST_ALL_STREAMS].next) {
- cb(transport_global, user_data, &s->global);
- }
+void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.c b/src/core/ext/transport/chttp2/transport/stream_map.c
index f70791c422..5f5a28446d 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.c
+++ b/src/core/ext/transport/chttp2/transport/stream_map.c
@@ -77,6 +77,7 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
GPR_ASSERT(count == 0 || keys[count - 1] < key);
GPR_ASSERT(value);
+ GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
if (count == capacity) {
if (map->free > capacity / 4) {
@@ -96,40 +97,6 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
map->count = count + 1;
}
-void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
- grpc_chttp2_stream_map *dst) {
- /* if src is empty we dont need to do anything */
- if (src->count == src->free) {
- return;
- }
- /* if dst is empty we simply need to swap */
- if (dst->count == dst->free) {
- GPR_SWAP(grpc_chttp2_stream_map, *src, *dst);
- return;
- }
- /* the first element of src must be greater than the last of dst...
- * however the maps may need compacting for this property to hold */
- if (src->keys[0] <= dst->keys[dst->count - 1]) {
- src->count = compact(src->keys, src->values, src->count);
- src->free = 0;
- dst->count = compact(dst->keys, dst->values, dst->count);
- dst->free = 0;
- }
- GPR_ASSERT(src->keys[0] > dst->keys[dst->count - 1]);
- /* if dst doesn't have capacity, resize */
- if (dst->count + src->count > dst->capacity) {
- dst->capacity = GPR_MAX(dst->capacity * 3 / 2, dst->count + src->count);
- dst->keys = gpr_realloc(dst->keys, dst->capacity * sizeof(uint32_t));
- dst->values = gpr_realloc(dst->values, dst->capacity * sizeof(void *));
- }
- memcpy(dst->keys + dst->count, src->keys, src->count * sizeof(uint32_t));
- memcpy(dst->values + dst->count, src->values, src->count * sizeof(void *));
- dst->count += src->count;
- dst->free += src->free;
- src->count = 0;
- src->free = 0;
-}
-
static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
@@ -170,6 +137,7 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
if (map->free == map->count) {
map->free = map->count = 0;
}
+ GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL);
}
return out;
}
@@ -183,6 +151,17 @@ size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) {
return map->count - map->free;
}
+void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) {
+ if (map->count == map->free) {
+ return NULL;
+ }
+ if (map->free != 0) {
+ map->count = compact(map->keys, map->values, map->count);
+ map->free = 0;
+ }
+ return map->values[((size_t)rand()) % map->count];
+}
+
void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
void (*f)(void *user_data, uint32_t key,
void *value),
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.h b/src/core/ext/transport/chttp2/transport/stream_map.h
index b1d59ca6a3..203f640680 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.h
+++ b/src/core/ext/transport/chttp2/transport/stream_map.h
@@ -65,13 +65,12 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
or NULL otherwise */
void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
-/* Move all elements of src into dst */
-void grpc_chttp2_stream_map_move_into(grpc_chttp2_stream_map *src,
- grpc_chttp2_stream_map *dst);
-
/* Return an existing key, or NULL if it does not exist */
void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);
+/* Return a random entry */
+void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map);
+
/* How many (populated) entries are in the stream map? */
size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map);
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 979515bd54..b39695a1a5 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -40,349 +40,221 @@
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
-static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_writing *transport_writing);
+static void add_to_write_list(grpc_chttp2_write_cb **list,
+ grpc_chttp2_write_cb *cb) {
+ cb->next = *list;
+ *list = cb;
+}
+
+static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
+ grpc_error *error) {
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
+ "finish_write_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+}
-int grpc_chttp2_unlocking_check_writes(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing) {
- grpc_chttp2_stream_global *stream_global;
- grpc_chttp2_stream_writing *stream_writing;
+static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int64_t send_bytes,
+ grpc_chttp2_write_cb **list, grpc_error *error) {
+ grpc_chttp2_write_cb *cb = *list;
+ *list = NULL;
+ s->flow_controlled_bytes_written += send_bytes;
+ while (cb) {
+ grpc_chttp2_write_cb *next = cb->next;
+ if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
+ finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
+ } else {
+ add_to_write_list(list, cb);
+ }
+ cb = next;
+ }
+ GRPC_ERROR_UNREF(error);
+}
- GPR_TIMER_BEGIN("grpc_chttp2_unlocking_check_writes", 0);
+bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_stream *s;
- transport_writing->max_frame_size =
- transport_global->settings[GRPC_ACKED_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE];
+ GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
- if (transport_global->dirtied_local_settings &&
- !transport_global->sent_local_settings) {
+ if (t->dirtied_local_settings && !t->sent_local_settings) {
gpr_slice_buffer_add(
- &transport_writing->outbuf,
+ &t->outbuf,
grpc_chttp2_settings_create(
- transport_global->settings[GRPC_SENT_SETTINGS],
- transport_global->settings[GRPC_LOCAL_SETTINGS],
- transport_global->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
- transport_global->force_send_settings = 0;
- transport_global->dirtied_local_settings = 0;
- transport_global->sent_local_settings = 1;
+ t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
+ t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
+ t->force_send_settings = 0;
+ t->dirtied_local_settings = 0;
+ t->sent_local_settings = 1;
}
/* simple writes are queued to qbuf, and flushed here */
- gpr_slice_buffer_move_into(&transport_global->qbuf,
- &transport_writing->outbuf);
- GPR_ASSERT(transport_global->qbuf.count == 0);
+ gpr_slice_buffer_move_into(&t->qbuf, &t->outbuf);
+ GPR_ASSERT(t->qbuf.count == 0);
grpc_chttp2_hpack_compressor_set_max_table_size(
- &transport_writing->hpack_compressor,
- transport_global->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+ &t->hpack_compressor,
+ t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
- GRPC_CHTTP2_FLOW_MOVE_TRANSPORT("write", transport_writing, outgoing_window,
- transport_global, outgoing_window);
- if (transport_writing->outgoing_window > 0) {
- while (grpc_chttp2_list_pop_stalled_by_transport(transport_global,
- &stream_global)) {
- grpc_chttp2_become_writable(exec_ctx, transport_global, stream_global,
- false, "transport.read_flow_control");
+ if (t->outgoing_window > 0) {
+ while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
+ grpc_chttp2_become_writable(exec_ctx, t, s, false,
+ "transport.read_flow_control");
}
}
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
- while (grpc_chttp2_list_pop_writable_stream(
- transport_global, transport_writing, &stream_global, &stream_writing)) {
- bool sent_initial_metadata = stream_writing->sent_initial_metadata;
- bool become_writable = false;
+ while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
+ bool sent_initial_metadata = s->sent_initial_metadata;
+ bool now_writing = false;
- stream_writing->id = stream_global->id;
- stream_writing->read_closed = stream_global->read_closed;
+ GRPC_CHTTP2_IF_TRACING(gpr_log(
+ GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
+ t->is_client ? "CLIENT" : "SERVER", s->id, sent_initial_metadata,
+ s->send_initial_metadata != NULL, s->announce_window));
- GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_writing, stream_writing,
- outgoing_window, stream_global,
- outgoing_window);
-
- if (!sent_initial_metadata && stream_global->send_initial_metadata) {
- stream_writing->send_initial_metadata =
- stream_global->send_initial_metadata;
- stream_global->send_initial_metadata = NULL;
- become_writable = true;
+ /* send initial metadata if it's available */
+ if (!sent_initial_metadata && s->send_initial_metadata) {
+ grpc_chttp2_encode_header(
+ &t->hpack_compressor, s->id, s->send_initial_metadata, 0,
+ t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ &s->stats.outgoing, &t->outbuf);
+ s->send_initial_metadata = NULL;
+ s->sent_initial_metadata = true;
sent_initial_metadata = true;
+ now_writing = true;
+ }
+ /* send any window updates */
+ if (s->announce_window > 0) {
+ uint32_t announce = s->announce_window;
+ gpr_slice_buffer_add(&t->outbuf,
+ grpc_chttp2_window_update_create(
+ s->id, s->announce_window, &s->stats.outgoing));
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, announce_window, announce);
}
if (sent_initial_metadata) {
- if (stream_global->send_message != NULL) {
- gpr_slice hdr = gpr_slice_malloc(5);
- uint8_t *p = GPR_SLICE_START_PTR(hdr);
- uint32_t len = stream_global->send_message->length;
- GPR_ASSERT(stream_writing->send_message == NULL);
- p[0] = (stream_global->send_message->flags &
- GRPC_WRITE_INTERNAL_COMPRESS) != 0;
- p[1] = (uint8_t)(len >> 24);
- p[2] = (uint8_t)(len >> 16);
- p[3] = (uint8_t)(len >> 8);
- p[4] = (uint8_t)(len);
- gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer, hdr);
- if (stream_global->send_message->length > 0) {
- stream_writing->send_message = stream_global->send_message;
- } else {
- stream_writing->send_message = NULL;
+ /* send any body bytes, if allowed by flow control */
+ if (s->flow_controlled_buffer.length > 0) {
+ uint32_t max_outgoing =
+ (uint32_t)GPR_MIN(t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ GPR_MIN(s->outgoing_window, t->outgoing_window));
+ if (max_outgoing > 0) {
+ uint32_t send_bytes =
+ (uint32_t)GPR_MIN(max_outgoing, s->flow_controlled_buffer.length);
+ bool is_last_data_frame =
+ s->fetching_send_message == NULL &&
+ send_bytes == s->flow_controlled_buffer.length;
+ bool is_last_frame =
+ is_last_data_frame && s->send_trailing_metadata != NULL &&
+ grpc_metadata_batch_is_empty(s->send_trailing_metadata);
+ grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, send_bytes,
+ is_last_frame, &s->stats.outgoing,
+ &t->outbuf);
+ GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", t, s, outgoing_window,
+ send_bytes);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, outgoing_window,
+ send_bytes);
+ if (is_last_frame) {
+ s->send_trailing_metadata = NULL;
+ s->sent_trailing_metadata = true;
+ if (!t->is_client && !s->read_closed) {
+ gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
+ s->id, GRPC_CHTTP2_NO_ERROR,
+ &s->stats.outgoing));
+ }
+ }
+ s->sending_bytes += send_bytes;
+ now_writing = true;
+ if (s->flow_controlled_buffer.length > 0) {
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
+ grpc_chttp2_list_add_writable_stream(t, s);
+ }
+ } else if (t->outgoing_window == 0) {
+ grpc_chttp2_list_add_stalled_by_transport(t, s);
+ now_writing = true;
}
- stream_writing->stream_fetched = 0;
- stream_global->send_message = NULL;
}
- if ((stream_writing->send_message != NULL ||
- stream_writing->flow_controlled_buffer.length > 0) &&
- stream_writing->outgoing_window > 0) {
- if (transport_writing->outgoing_window > 0) {
- become_writable = true;
+ if (s->send_trailing_metadata != NULL &&
+ s->fetching_send_message == NULL &&
+ s->flow_controlled_buffer.length == 0) {
+ if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
+ grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
+ &s->stats.outgoing, &t->outbuf);
} else {
- grpc_chttp2_list_add_stalled_by_transport(transport_writing,
- stream_writing);
+ grpc_chttp2_encode_header(
+ &t->hpack_compressor, s->id, s->send_trailing_metadata, true,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ &s->stats.outgoing, &t->outbuf);
}
+ s->send_trailing_metadata = NULL;
+ s->sent_trailing_metadata = true;
+ if (!t->is_client && !s->read_closed) {
+ gpr_slice_buffer_add(
+ &t->outbuf, grpc_chttp2_rst_stream_create(
+ s->id, GRPC_CHTTP2_NO_ERROR, &s->stats.outgoing));
+ }
+ now_writing = true;
}
- if (stream_global->send_trailing_metadata) {
- stream_writing->send_trailing_metadata =
- stream_global->send_trailing_metadata;
- stream_global->send_trailing_metadata = NULL;
- become_writable = true;
- }
- }
-
- if (!stream_global->read_closed &&
- stream_global->unannounced_incoming_window_for_writing > 1024) {
- GRPC_CHTTP2_FLOW_MOVE_STREAM("write", transport_global, stream_writing,
- announce_window, stream_global,
- unannounced_incoming_window_for_writing);
- become_writable = true;
}
- if (become_writable) {
- grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
+ if (now_writing) {
+ if (!grpc_chttp2_list_add_writing_stream(t, s)) {
+ /* already in writing list: drop ref */
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
+ }
} else {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
}
}
/* if the grpc_chttp2_transport is ready to send a window update, do so here
also; 3/4 is a magic number that will likely get tuned soon */
- if (transport_global->announce_incoming_window > 0) {
- uint32_t announced = (uint32_t)GPR_MIN(
- transport_global->announce_incoming_window, UINT32_MAX);
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_global,
- announce_incoming_window, announced);
+ if (t->announce_incoming_window > 0) {
+ uint32_t announced =
+ (uint32_t)GPR_MIN(t->announce_incoming_window, UINT32_MAX);
+ GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", t, announce_incoming_window,
+ announced);
grpc_transport_one_way_stats throwaway_stats;
- gpr_slice_buffer_add(
- &transport_writing->outbuf,
- grpc_chttp2_window_update_create(0, announced, &throwaway_stats));
- }
-
- GPR_TIMER_END("grpc_chttp2_unlocking_check_writes", 0);
-
- return transport_writing->outbuf.count > 0 ||
- grpc_chttp2_list_have_writing_streams(transport_writing);
-}
-
-void grpc_chttp2_perform_writes(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_writing *transport_writing,
- grpc_endpoint *endpoint) {
- GPR_ASSERT(transport_writing->outbuf.count > 0 ||
- grpc_chttp2_list_have_writing_streams(transport_writing));
-
- finalize_outbuf(exec_ctx, transport_writing);
-
- GPR_ASSERT(endpoint);
-
- if (transport_writing->outbuf.count > 0) {
- grpc_endpoint_write(exec_ctx, endpoint, &transport_writing->outbuf,
- &transport_writing->done_cb);
- } else {
- grpc_exec_ctx_sched(exec_ctx, &transport_writing->done_cb, GRPC_ERROR_NONE,
- NULL);
+ gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_window_update_create(
+ 0, announced, &throwaway_stats));
}
-}
-
-static void finalize_outbuf(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport_writing *transport_writing) {
- grpc_chttp2_stream_writing *stream_writing;
- GPR_TIMER_BEGIN("finalize_outbuf", 0);
+ GPR_TIMER_END("grpc_chttp2_begin_write", 0);
- bool is_first_data_frame = true;
- while (
- grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
- uint32_t max_outgoing =
- (uint32_t)GPR_MIN(transport_writing->max_frame_size,
- GPR_MIN(stream_writing->outgoing_window,
- transport_writing->outgoing_window));
- /* send initial metadata if it's available */
- if (stream_writing->send_initial_metadata != NULL) {
- grpc_chttp2_encode_header(
- &transport_writing->hpack_compressor, stream_writing->id,
- stream_writing->send_initial_metadata, 0,
- transport_writing->max_frame_size, &stream_writing->stats,
- &transport_writing->outbuf);
- stream_writing->send_initial_metadata = NULL;
- stream_writing->sent_initial_metadata = 1;
- }
- /* send any window updates */
- if (stream_writing->announce_window > 0 &&
- stream_writing->send_initial_metadata == NULL) {
- uint32_t announce = stream_writing->announce_window;
- gpr_slice_buffer_add(
- &transport_writing->outbuf,
- grpc_chttp2_window_update_create(stream_writing->id,
- stream_writing->announce_window,
- &stream_writing->stats));
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing, stream_writing,
- announce_window, announce);
- stream_writing->announce_window = 0;
- }
- /* fetch any body bytes */
- while (!stream_writing->fetching && stream_writing->send_message &&
- stream_writing->flow_controlled_buffer.length < max_outgoing &&
- stream_writing->stream_fetched <
- stream_writing->send_message->length) {
- if (grpc_byte_stream_next(exec_ctx, stream_writing->send_message,
- &stream_writing->fetching_slice, max_outgoing,
- &stream_writing->finished_fetch)) {
- stream_writing->stream_fetched +=
- GPR_SLICE_LENGTH(stream_writing->fetching_slice);
- if (stream_writing->stream_fetched ==
- stream_writing->send_message->length) {
- stream_writing->send_message = NULL;
- }
- gpr_slice_buffer_add(&stream_writing->flow_controlled_buffer,
- stream_writing->fetching_slice);
- } else {
- stream_writing->fetching = 1;
- }
- }
- /* send any body bytes */
- if (stream_writing->flow_controlled_buffer.length > 0) {
- if (max_outgoing > 0) {
- uint32_t send_bytes = (uint32_t)GPR_MIN(
- max_outgoing, stream_writing->flow_controlled_buffer.length);
- int is_last_data_frame =
- stream_writing->send_message == NULL &&
- send_bytes == stream_writing->flow_controlled_buffer.length;
- int is_last_frame = is_last_data_frame &&
- stream_writing->send_trailing_metadata != NULL &&
- grpc_metadata_batch_is_empty(
- stream_writing->send_trailing_metadata);
- grpc_chttp2_encode_data(
- stream_writing->id, &stream_writing->flow_controlled_buffer,
- send_bytes, is_last_frame, &stream_writing->stats,
- &transport_writing->outbuf);
- if (is_first_data_frame) {
- /* TODO(dgq): this is a hack. It'll be fix in a future refactoring */
- stream_writing->stats.data_bytes -= 5; /* discount grpc framing */
- is_first_data_frame = false;
- }
- GRPC_CHTTP2_FLOW_DEBIT_STREAM("write", transport_writing,
- stream_writing, outgoing_window,
- send_bytes);
- GRPC_CHTTP2_FLOW_DEBIT_TRANSPORT("write", transport_writing,
- outgoing_window, send_bytes);
- if (is_last_frame) {
- stream_writing->send_trailing_metadata = NULL;
- stream_writing->sent_trailing_metadata = 1;
- }
- if (is_last_data_frame) {
- GPR_ASSERT(stream_writing->send_message == NULL);
- stream_writing->sent_message = 1;
- }
- } else if (transport_writing->outgoing_window == 0) {
- grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
- stream_writing);
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
- }
- }
- /* send trailing metadata if it's available and we're ready for it */
- if (stream_writing->send_message == NULL &&
- stream_writing->flow_controlled_buffer.length == 0 &&
- stream_writing->send_trailing_metadata != NULL) {
- if (grpc_metadata_batch_is_empty(
- stream_writing->send_trailing_metadata)) {
- grpc_chttp2_encode_data(
- stream_writing->id, &stream_writing->flow_controlled_buffer, 0, 1,
- &stream_writing->stats, &transport_writing->outbuf);
- } else {
- grpc_chttp2_encode_header(
- &transport_writing->hpack_compressor, stream_writing->id,
- stream_writing->send_trailing_metadata, 1,
- transport_writing->max_frame_size, &stream_writing->stats,
- &transport_writing->outbuf);
- }
- if (!transport_writing->is_client && !stream_writing->read_closed) {
- gpr_slice_buffer_add(&transport_writing->outbuf,
- grpc_chttp2_rst_stream_create(
- stream_writing->id, GRPC_CHTTP2_NO_ERROR,
- &stream_writing->stats));
- }
- stream_writing->send_trailing_metadata = NULL;
- stream_writing->sent_trailing_metadata = 1;
- }
- /* if there's more to write, then loop, otherwise prepare to finish the
- * write */
- if ((stream_writing->flow_controlled_buffer.length > 0 ||
- (stream_writing->send_message && !stream_writing->fetching)) &&
- stream_writing->outgoing_window > 0) {
- if (transport_writing->outgoing_window > 0) {
- grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
- } else {
- grpc_chttp2_list_add_writing_stalled_by_transport(transport_writing,
- stream_writing);
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
- }
- } else {
- grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
- }
- }
-
- GPR_TIMER_END("finalize_outbuf", 0);
+ return t->outbuf.count > 0;
}
-void grpc_chttp2_cleanup_writing(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
- grpc_chttp2_transport_writing *transport_writing) {
- GPR_TIMER_BEGIN("grpc_chttp2_cleanup_writing", 0);
- grpc_chttp2_stream_writing *stream_writing;
- grpc_chttp2_stream_global *stream_global;
-
- if (grpc_chttp2_list_flush_writing_stalled_by_transport(exec_ctx,
- transport_writing)) {
- grpc_chttp2_initiate_write(exec_ctx, transport_global, false,
- "resume_stalled_stream");
- }
+void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
+ grpc_chttp2_stream *s;
- while (grpc_chttp2_list_pop_written_stream(
- transport_global, transport_writing, &stream_global, &stream_writing)) {
- if (stream_writing->sent_initial_metadata) {
+ while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
+ if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_initial_metadata_finished, GRPC_ERROR_NONE);
+ exec_ctx, t, s, &s->send_initial_metadata_finished,
+ GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
- grpc_transport_move_one_way_stats(&stream_writing->stats,
- &stream_global->stats.outgoing);
- if (stream_writing->sent_message) {
- GPR_ASSERT(stream_writing->send_message == NULL);
- grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_message_finished, GRPC_ERROR_NONE);
- stream_writing->sent_message = 0;
+ if (s->sending_bytes != 0) {
+ update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
+ &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
+ s->sending_bytes = 0;
}
- if (stream_writing->sent_trailing_metadata) {
+ if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
- exec_ctx, transport_global, stream_global,
- &stream_global->send_trailing_metadata_finished, GRPC_ERROR_NONE);
- }
- if (stream_writing->sent_trailing_metadata) {
- grpc_chttp2_mark_stream_closed(exec_ctx, transport_global, stream_global,
- !transport_global->is_client, 1,
- GRPC_ERROR_NONE);
+ exec_ctx, t, s, &s->send_trailing_metadata_finished,
+ GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
+ grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
+ GRPC_ERROR_REF(error));
}
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream_global, "chttp2_writing");
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
}
- gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
- GPR_TIMER_END("grpc_chttp2_cleanup_writing", 0);
+ gpr_slice_buffer_reset_and_unref(&t->outbuf);
+ GRPC_ERROR_UNREF(error);
+ GPR_TIMER_END("grpc_chttp2_end_write", 0);
}
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c
index 3a56b1ff20..cfc072c0b5 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.c
@@ -66,22 +66,59 @@ static grpc_arg copy_arg(const grpc_arg *src) {
grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
const grpc_arg *to_add,
size_t num_to_add) {
+ return grpc_channel_args_copy_and_add_and_remove(src, NULL, 0, to_add,
+ num_to_add);
+}
+
+grpc_channel_args *grpc_channel_args_copy_and_remove(
+ const grpc_channel_args *src, const char **to_remove,
+ size_t num_to_remove) {
+ return grpc_channel_args_copy_and_add_and_remove(src, to_remove,
+ num_to_remove, NULL, 0);
+}
+
+static bool should_remove_arg(const grpc_arg *arg, const char **to_remove,
+ size_t num_to_remove) {
+ for (size_t i = 0; i < num_to_remove; ++i) {
+ if (strcmp(arg->key, to_remove[i]) == 0) return true;
+ }
+ return false;
+}
+
+grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
+ const grpc_channel_args *src, const char **to_remove, size_t num_to_remove,
+ const grpc_arg *to_add, size_t num_to_add) {
+ // Figure out how many args we'll be copying.
+ size_t num_args_to_copy = 0;
+ if (src != NULL) {
+ for (size_t i = 0; i < src->num_args; ++i) {
+ if (!should_remove_arg(&src->args[i], to_remove, num_to_remove)) {
+ ++num_args_to_copy;
+ }
+ }
+ }
+ // Create result.
grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
- size_t i;
- size_t src_num_args = (src == NULL) ? 0 : src->num_args;
- if (!src && !to_add) {
- dst->num_args = 0;
+ dst->num_args = num_args_to_copy + num_to_add;
+ if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->num_args = src_num_args + num_to_add;
dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
- for (i = 0; i < src_num_args; i++) {
- dst->args[i] = copy_arg(&src->args[i]);
+ // Copy args from src that are not being removed.
+ size_t dst_idx = 0;
+ if (src != NULL) {
+ for (size_t i = 0; i < src->num_args; ++i) {
+ if (!should_remove_arg(&src->args[i], to_remove, num_to_remove)) {
+ dst->args[dst_idx++] = copy_arg(&src->args[i]);
+ }
+ }
}
- for (i = 0; i < num_to_add; i++) {
- dst->args[i + src_num_args] = copy_arg(&to_add[i]);
+ // Add args from to_add.
+ for (size_t i = 0; i < num_to_add; ++i) {
+ dst->args[dst_idx++] = copy_arg(&to_add[i]);
}
+ GPR_ASSERT(dst_idx == dst->num_args);
return dst;
}
@@ -272,6 +309,18 @@ int grpc_channel_args_compare(const grpc_channel_args *a,
return 0;
}
+const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
+ const char *name) {
+ if (args != NULL) {
+ for (size_t i = 0; i < args->num_args; ++i) {
+ if (strcmp(args->args[i].key, name) == 0) {
+ return &args->args[i];
+ }
+ }
+ }
+ return NULL;
+}
+
int grpc_channel_arg_get_integer(grpc_arg *arg, grpc_integer_options options) {
if (arg->type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key);
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 586a296d1f..1e05303471 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -51,6 +51,17 @@ grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
const grpc_arg *to_add,
size_t num_to_add);
+/** Copies the arguments in \a src except for those whose keys are in
+ \a to_remove. */
+grpc_channel_args *grpc_channel_args_copy_and_remove(
+ const grpc_channel_args *src, const char **to_remove, size_t num_to_remove);
+
+/** Copies the arguments from \a src except for those whose keys are in
+ \a to_remove and appends the arguments in \a to_add. */
+grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
+ const grpc_channel_args *src, const char **to_remove, size_t num_to_remove,
+ const grpc_arg *to_add, size_t num_to_add);
+
/** Concatenate args from \a a and \a b into a new instance */
grpc_channel_args *grpc_channel_args_merge(const grpc_channel_args *a,
const grpc_channel_args *b);
@@ -89,6 +100,10 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
int grpc_channel_args_compare(const grpc_channel_args *a,
const grpc_channel_args *b);
+/** Returns the value of argument \a name from \a args, or NULL if not found. */
+const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
+ const char *name);
+
typedef struct grpc_integer_options {
int default_value; // Return this if value is outside of expected bounds.
int min_value;
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 57d34d9e9a..2c5367901d 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -162,7 +162,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- gpr_timespec deadline, grpc_call_stack *call_stack) {
+ grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
grpc_call_element_args args;
size_t count = channel_stack->count;
@@ -179,10 +179,12 @@ grpc_error *grpc_call_stack_init(
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
+ args.start_time = gpr_now(GPR_CLOCK_MONOTONIC);
for (i = 0; i < count; i++) {
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
+ args.path = path;
args.deadline = deadline;
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 1cfe2885d8..27f3be7b29 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -74,6 +74,8 @@ typedef struct {
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
+ grpc_mdstr *path;
+ gpr_timespec start_time;
gpr_timespec deadline;
} grpc_call_element_args;
@@ -225,7 +227,7 @@ grpc_error *grpc_call_stack_init(
grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
grpc_call_context_element *context, const void *transport_server_data,
- gpr_timespec deadline, grpc_call_stack *call_stack);
+ grpc_mdstr *path, gpr_timespec deadline, grpc_call_stack *call_stack);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 079b98a2f8..d2ea5250f6 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -64,30 +64,49 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
}
// Starts the deadline timer.
-static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem,
- gpr_timespec deadline) {
+static void start_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
+ // Note: We do not start the timer if there is already a timer
+ // pending. This should be okay, because this is only called from two
+ // functions exported by this module: grpc_deadline_state_start(), which
+ // starts the initial timer, and grpc_deadline_state_reset(), which
+ // cancels any pre-existing timer before starting a new one. In
+ // particular, we want to ensure that if grpc_deadline_state_start()
+ // winds up trying to start the timer after grpc_deadline_state_reset()
+ // has already done so, we ignore the value from the former.
+ if (!deadline_state->timer_pending &&
+ gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Take a reference to the call stack, to be owned by the timer.
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- gpr_mu_lock(&deadline_state->timer_mu);
deadline_state->timer_pending = true;
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
elem, gpr_now(GPR_CLOCK_MONOTONIC));
- gpr_mu_unlock(&deadline_state->timer_mu);
}
}
+static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ gpr_timespec deadline) {
+ grpc_deadline_state* deadline_state = elem->call_data;
+ gpr_mu_lock(&deadline_state->timer_mu);
+ start_timer_if_needed_locked(exec_ctx, elem, deadline);
+ gpr_mu_unlock(&deadline_state->timer_mu);
+}
// Cancels the deadline timer.
-static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
- grpc_deadline_state* deadline_state) {
- gpr_mu_lock(&deadline_state->timer_mu);
+static void cancel_timer_if_needed_locked(grpc_exec_ctx* exec_ctx,
+ grpc_deadline_state* deadline_state) {
if (deadline_state->timer_pending) {
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
deadline_state->timer_pending = false;
}
+}
+static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
+ grpc_deadline_state* deadline_state) {
+ gpr_mu_lock(&deadline_state->timer_mu);
+ cancel_timer_if_needed_locked(exec_ctx, deadline_state);
gpr_mu_unlock(&deadline_state->timer_mu);
}
@@ -108,6 +127,21 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
op->on_complete = &deadline_state->on_complete;
}
+void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_call_stack* call_stack) {
+ grpc_deadline_state* deadline_state = elem->call_data;
+ memset(deadline_state, 0, sizeof(*deadline_state));
+ deadline_state->call_stack = call_stack;
+ gpr_mu_init(&deadline_state->timer_mu);
+}
+
+void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ grpc_deadline_state* deadline_state = elem->call_data;
+ cancel_timer_if_needed(exec_ctx, deadline_state);
+ gpr_mu_destroy(&deadline_state->timer_mu);
+}
+
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
@@ -122,16 +156,11 @@ static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_free(state);
}
-void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- grpc_call_element_args* args) {
- grpc_deadline_state* deadline_state = elem->call_data;
- memset(deadline_state, 0, sizeof(*deadline_state));
- deadline_state->call_stack = args->call_stack;
- gpr_mu_init(&deadline_state->timer_mu);
+void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec deadline) {
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
- const gpr_timespec deadline =
- gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
+ deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
@@ -148,11 +177,13 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
}
}
-void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
- grpc_call_element* elem) {
+void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
- cancel_timer_if_needed(exec_ctx, deadline_state);
- gpr_mu_destroy(&deadline_state->timer_mu);
+ gpr_mu_lock(&deadline_state->timer_mu);
+ cancel_timer_if_needed_locked(exec_ctx, deadline_state);
+ start_timer_if_needed_locked(exec_ctx, elem, new_deadline);
+ gpr_mu_unlock(&deadline_state->timer_mu);
}
void grpc_deadline_state_client_start_transport_stream_op(
@@ -209,7 +240,8 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element_args* args) {
// Note: size of call data is different between client and server.
memset(elem->call_data, 0, elem->filter->sizeof_call_data);
- grpc_deadline_state_init(exec_ctx, elem, args);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
+ grpc_deadline_state_start(exec_ctx, elem, args->deadline);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/channel/deadline_filter.h b/src/core/lib/channel/deadline_filter.h
index 685df87761..716a852565 100644
--- a/src/core/lib/channel/deadline_filter.h
+++ b/src/core/lib/channel/deadline_filter.h
@@ -54,18 +54,37 @@ typedef struct grpc_deadline_state {
grpc_closure* next_on_complete;
} grpc_deadline_state;
-// To be used in a filter's init_call_elem(), destroy_call_elem(), and
-// start_transport_stream_op() methods to enforce call deadlines.
//
-// REQUIRES: The first field in elem->call_data is a grpc_deadline_state.
+// NOTE: All of these functions require that the first field in
+// elem->call_data is a grpc_deadline_state.
//
-// For grpc_deadline_state_client_start_transport_stream_op(), it is the
-// caller's responsibility to chain to the next filter if necessary
-// after the function returns.
+
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- grpc_call_element_args* args);
+ grpc_call_stack* call_stack);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
+
+// Starts the timer with the specified deadline.
+// Should be called from the filter's init_call_elem() method.
+void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec deadline);
+
+// Cancels the existing timer and starts a new one with new_deadline.
+//
+// Note: It is generally safe to call this with an earlier deadline
+// value than the current one, but not the reverse. No checks are done
+// to ensure that the timer callback is not invoked while it is in the
+// process of being reset, which means that attempting to increase the
+// deadline may result in the timer being called twice.
+void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ gpr_timespec new_deadline);
+
+// To be called from the client-side filter's start_transport_stream_op()
+// method. Ensures that the deadline timer is cancelled when the call
+// is completed.
+//
+// Note: It is the caller's responsibility to chain to the next filter if
+// necessary after this function returns.
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op);
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 8f9fb17a31..0d759887bc 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -183,7 +183,7 @@ void grpc_handshake_manager_do_handshake(
gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
grpc_handshaker_done_cb cb, void* user_data) {
grpc_channel_args* args_copy = grpc_channel_args_copy(args);
- gpr_slice_buffer* read_buffer = malloc(sizeof(*read_buffer));
+ gpr_slice_buffer* read_buffer = gpr_malloc(sizeof(*read_buffer));
gpr_slice_buffer_init(read_buffer);
if (mgr->count == 0) {
// No handshakers registered, so we just immediately call the done
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index 0f2bf97824..f2221fb0fb 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -42,6 +42,8 @@
#define EXPECTED_CONTENT_TYPE "application/grpc"
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
+extern int grpc_http_trace;
+
typedef struct call_data {
uint8_t seen_path;
uint8_t seen_method;
@@ -209,6 +211,11 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
err, GRPC_ERROR_CREATE("Missing te: trailers header"));
}
/* Error this call out */
+ if (grpc_http_trace) {
+ const char *error_str = grpc_error_string(err);
+ gpr_log(GPR_ERROR, "Invalid http2 headers: %s", error_str);
+ grpc_error_free_string(error_str);
+ }
grpc_call_element_send_cancel(exec_ctx, elem);
}
} else {
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index f067a3a51c..7dc5ae0df1 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -39,12 +39,53 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/transport/method_config.h"
#define DEFAULT_MAX_SEND_MESSAGE_LENGTH -1 // Unlimited.
// The protobuf library will (by default) start warning at 100 megs.
#define DEFAULT_MAX_RECV_MESSAGE_LENGTH (4 * 1024 * 1024)
+typedef struct message_size_limits {
+ int max_send_size;
+ int max_recv_size;
+} message_size_limits;
+
+static void* message_size_limits_copy(void* value) {
+ void* new_value = gpr_malloc(sizeof(message_size_limits));
+ memcpy(new_value, value, sizeof(message_size_limits));
+ return new_value;
+}
+
+static int message_size_limits_cmp(void* value1, void* value2) {
+ const message_size_limits* v1 = value1;
+ const message_size_limits* v2 = value2;
+ if (v1->max_send_size > v2->max_send_size) return 1;
+ if (v1->max_send_size < v2->max_send_size) return -1;
+ if (v1->max_recv_size > v2->max_recv_size) return 1;
+ if (v1->max_recv_size < v2->max_recv_size) return -1;
+ return 0;
+}
+
+static const grpc_mdstr_hash_table_vtable message_size_limits_vtable = {
+ gpr_free, message_size_limits_copy, message_size_limits_cmp};
+
+static void* method_config_convert_value(
+ const grpc_method_config* method_config) {
+ message_size_limits* value = gpr_malloc(sizeof(message_size_limits));
+ const int32_t* max_request_message_bytes =
+ grpc_method_config_get_max_request_message_bytes(method_config);
+ value->max_send_size =
+ max_request_message_bytes != NULL ? *max_request_message_bytes : -1;
+ const int32_t* max_response_message_bytes =
+ grpc_method_config_get_max_response_message_bytes(method_config);
+ value->max_recv_size =
+ max_response_message_bytes != NULL ? *max_response_message_bytes : -1;
+ return value;
+}
+
typedef struct call_data {
+ int max_send_size;
+ int max_recv_size;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
@@ -58,6 +99,8 @@ typedef struct call_data {
typedef struct channel_data {
int max_send_size;
int max_recv_size;
+ // Maps path names to message_size_limits structs.
+ grpc_mdstr_hash_table* method_limit_table;
} channel_data;
// Callback invoked when we receive a message. Here we check the max
@@ -66,13 +109,12 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
grpc_error* error) {
grpc_call_element* elem = user_data;
call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
- if (*calld->recv_message != NULL && chand->max_recv_size >= 0 &&
- (*calld->recv_message)->length > (size_t)chand->max_recv_size) {
+ if (*calld->recv_message != NULL && calld->max_recv_size >= 0 &&
+ (*calld->recv_message)->length > (size_t)calld->max_recv_size) {
char* message_string;
gpr_asprintf(&message_string,
"Received message larger than max (%u vs. %d)",
- (*calld->recv_message)->length, chand->max_recv_size);
+ (*calld->recv_message)->length, calld->max_recv_size);
grpc_error* new_error = grpc_error_set_int(
GRPC_ERROR_CREATE(message_string), GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_INVALID_ARGUMENT);
@@ -93,13 +135,12 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
call_data* calld = elem->call_data;
- channel_data* chand = elem->channel_data;
// Check max send message size.
- if (op->send_message != NULL && chand->max_send_size >= 0 &&
- op->send_message->length > (size_t)chand->max_send_size) {
+ if (op->send_message != NULL && calld->max_send_size >= 0 &&
+ op->send_message->length > (size_t)calld->max_send_size) {
char* message_string;
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
- op->send_message->length, chand->max_send_size);
+ op->send_message->length, calld->max_send_size);
gpr_slice message = gpr_slice_from_copied_string(message_string);
gpr_free(message_string);
grpc_call_element_send_close_with_message(
@@ -119,9 +160,32 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
+ channel_data* chand = elem->channel_data;
call_data* calld = elem->call_data;
calld->next_recv_message_ready = NULL;
grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem);
+ // Get max sizes from channel data, then merge in per-method config values.
+ // Note: Per-method config is only available on the client, so we
+ // apply the max request size to the send limit and the max response
+ // size to the receive limit.
+ calld->max_send_size = chand->max_send_size;
+ calld->max_recv_size = chand->max_recv_size;
+ if (chand->method_limit_table != NULL) {
+ message_size_limits* limits =
+ grpc_method_config_table_get(chand->method_limit_table, args->path);
+ if (limits != NULL) {
+ if (limits->max_send_size >= 0 &&
+ (limits->max_send_size < calld->max_send_size ||
+ calld->max_send_size < 0)) {
+ calld->max_send_size = limits->max_send_size;
+ }
+ if (limits->max_recv_size >= 0 &&
+ (limits->max_recv_size < calld->max_recv_size ||
+ calld->max_recv_size < 0)) {
+ calld->max_recv_size = limits->max_recv_size;
+ }
+ }
+ }
return GRPC_ERROR_NONE;
}
@@ -155,11 +219,23 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx,
grpc_channel_arg_get_integer(&args->channel_args->args[i], options);
}
}
+ // Get method config table from channel args.
+ const grpc_arg* channel_arg =
+ grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
+ if (channel_arg != NULL) {
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
+ chand->method_limit_table = grpc_method_config_table_convert(
+ (grpc_method_config_table*)channel_arg->value.pointer.p,
+ method_config_convert_value, &message_size_limits_vtable);
+ }
}
// Destructor for channel_data.
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
- grpc_channel_element* elem) {}
+ grpc_channel_element* elem) {
+ channel_data* chand = elem->channel_data;
+ grpc_mdstr_hash_table_unref(chand->method_limit_table);
+}
const grpc_channel_filter grpc_message_size_filter = {
start_transport_stream_op,
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 7f3c2d120d..411e669b53 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -32,7 +32,6 @@
*/
#include "src/core/lib/http/httpcli.h"
-#include "src/core/lib/iomgr/sockaddr.h"
#include <string.h>
@@ -71,6 +70,7 @@ typedef struct {
grpc_closure done_write;
grpc_closure connected;
grpc_error *overall_error;
+ grpc_resource_quota *resource_quota;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
@@ -118,6 +118,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
gpr_slice_buffer_destroy(&req->incoming);
gpr_slice_buffer_destroy(&req->outgoing);
GRPC_ERROR_UNREF(req->overall_error);
+ grpc_resource_quota_internal_unref(exec_ctx, req->resource_quota);
gpr_free(req);
}
@@ -126,7 +127,7 @@ static void append_error(internal_request *req, grpc_error *error) {
req->overall_error = GRPC_ERROR_CREATE("Failed HTTP/1 client request");
}
grpc_resolved_address *addr = &req->addresses->addrs[req->next_address - 1];
- char *addr_text = grpc_sockaddr_to_uri((struct sockaddr *)addr->addr);
+ char *addr_text = grpc_sockaddr_to_uri(addr);
req->overall_error = grpc_error_add_child(
req->overall_error,
grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, addr_text));
@@ -224,9 +225,15 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req);
- grpc_tcp_client_connect(
- exec_ctx, &req->connected, &req->ep, req->context->pollset_set,
- (struct sockaddr *)&addr->addr, addr->len, req->deadline);
+ grpc_arg arg;
+ arg.key = GRPC_ARG_RESOURCE_QUOTA;
+ arg.type = GRPC_ARG_POINTER;
+ arg.value.pointer.p = req->resource_quota;
+ arg.value.pointer.vtable = grpc_resource_quota_arg_vtable();
+ grpc_channel_args args = {1, &arg};
+ grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
+ req->context->pollset_set, &args, addr,
+ req->deadline);
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
@@ -242,6 +249,7 @@ static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
@@ -257,6 +265,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
req->context = context;
req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
+ req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&req->on_read, on_read, req);
grpc_closure_init(&req->done_write, done_write, req);
gpr_slice_buffer_init(&req->incoming);
@@ -274,6 +283,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
@@ -283,14 +293,15 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
return;
}
gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path);
- internal_request_begin(exec_ctx, context, pollent, request, deadline, on_done,
- response, name,
+ internal_request_begin(exec_ctx, context, pollent, resource_quota, request,
+ deadline, on_done, response, name,
grpc_httpcli_format_get_request(request));
gpr_free(name);
}
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_done,
@@ -303,7 +314,8 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
}
gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path);
internal_request_begin(
- exec_ctx, context, pollent, request, deadline, on_done, response, name,
+ exec_ctx, context, pollent, resource_quota, request, deadline, on_done,
+ response, name,
grpc_httpcli_format_post_request(request, body_bytes, body_size));
gpr_free(name);
}
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 320c0f86c6..11e03b44df 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -96,6 +96,7 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context *context);
'on_response' is a callback to report results to */
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
gpr_timespec deadline, grpc_closure *on_complete,
grpc_httpcli_response *response);
@@ -116,6 +117,7 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
Does not support ?var1=val1&var2=val2 in the path. */
void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
+ grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
gpr_timespec deadline, grpc_closure *on_complete,
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 1ba0a5c141..c6ddc76732 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -35,6 +35,8 @@
#include <grpc/support/alloc.h>
+#include "src/core/lib/profiling/timers.h"
+
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@@ -51,7 +53,7 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
GRPC_ERROR_UNREF(error);
return;
}
- closure->error = error;
+ closure->error_data.error = error;
closure->next_data.next = NULL;
if (closure_list->head == NULL) {
closure_list->head = closure;
@@ -64,8 +66,8 @@ void grpc_closure_list_append(grpc_closure_list *closure_list,
void grpc_closure_list_fail_all(grpc_closure_list *list,
grpc_error *forced_failure) {
for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
- if (c->error == GRPC_ERROR_NONE) {
- c->error = GRPC_ERROR_REF(forced_failure);
+ if (c->error_data.error == GRPC_ERROR_NONE) {
+ c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);
@@ -110,3 +112,13 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}
+
+void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("grpc_closure_run", 0);
+ if (c != NULL) {
+ c->cb(exec_ctx, c->cb_arg, error);
+ }
+ GRPC_ERROR_UNREF(error);
+ GPR_TIMER_END("grpc_closure_run", 0);
+}
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index c1a22b6021..2b4b271eaa 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -76,7 +76,10 @@ struct grpc_closure {
void *cb_arg;
/** Once queued, the result of the closure. Before then: scratch space */
- grpc_error *error;
+ union {
+ grpc_error *error;
+ uintptr_t scratch;
+ } error_data;
};
/** Initializes \a closure with \a cb and \a cb_arg. */
@@ -106,4 +109,10 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
/** return whether \a list is empty. */
bool grpc_closure_list_empty(grpc_closure_list list);
+/** Run a closure directly. Caller ensures that no locks are being held above.
+ * Note that calling this at the end of a closure callback function itself is
+ * by definition safe. */
+void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error);
+
#endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index 831bdb4aff..60ee14eb23 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -50,25 +50,57 @@ int grpc_combiner_trace = 0;
} \
} while (0)
+#define STATE_UNORPHANED 1
+#define STATE_ELEM_COUNT_LOW_BIT 2
+
struct grpc_combiner {
+ grpc_combiner *next_combiner_on_this_exec_ctx;
grpc_workqueue *optional_workqueue;
gpr_mpscq queue;
// state is:
- // lower bit - zero if orphaned
- // other bits - number of items queued on the lock
+ // lower bit - zero if orphaned (STATE_UNORPHANED)
+ // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
gpr_atm state;
- bool take_async_break_before_final_list;
+ // number of elements in the list that are covered by a poller: if >0, we can
+ // offload safely
+ gpr_atm elements_covered_by_poller;
+ bool time_to_execute_final_list;
+ bool final_list_covered_by_poller;
grpc_closure_list final_list;
- grpc_closure continue_finishing;
+ grpc_closure offload;
};
+static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
+
+typedef struct {
+ grpc_error *error;
+ bool covered_by_poller;
+} error_data;
+
+static uintptr_t pack_error_data(error_data d) {
+ return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
+}
+
+static error_data unpack_error_data(uintptr_t p) {
+ return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
+}
+
+static bool is_covered_by_poller(grpc_combiner *lock) {
+ return lock->final_list_covered_by_poller ||
+ gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
+}
+
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
+ lock->next_combiner_on_this_exec_ctx = NULL;
+ lock->time_to_execute_final_list = false;
lock->optional_workqueue = optional_workqueue;
- gpr_atm_no_barrier_store(&lock->state, 1);
+ lock->final_list_covered_by_poller = false;
+ gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
+ gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
gpr_mpscq_init(&lock->queue);
- lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
+ grpc_closure_init(&lock->offload, offload, lock);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -82,7 +114,7 @@ static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -1);
+ gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) {
@@ -90,170 +122,186 @@ void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
}
-static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
-static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
+static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
+ grpc_combiner *lock) {
+ lock->next_combiner_on_this_exec_ctx = NULL;
+ if (exec_ctx->active_combiner == NULL) {
+ exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
+ } else {
+ exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
+ exec_ctx->last_combiner = lock;
+ }
+}
-static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- GPR_TIMER_BEGIN("combiner.continue_executing_mainline", 0);
- grpc_combiner *lock = arg;
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p continue_finishing_mainline", lock));
- GPR_ASSERT(exec_ctx->active_combiner == NULL);
+static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
+ grpc_combiner *lock) {
+ lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
exec_ctx->active_combiner = lock;
- if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- exec_ctx->active_combiner = NULL;
- GPR_TIMER_END("combiner.continue_executing_mainline", 0);
+ if (lock->next_combiner_on_this_exec_ctx == NULL) {
+ exec_ctx->last_combiner = lock;
+ }
}
-static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- GPR_TIMER_BEGIN("combiner.execute_final", 0);
- grpc_closure *c = lock->final_list.head;
- GPR_ASSERT(c != NULL);
- grpc_closure_list_init(&lock->final_list);
- lock->take_async_break_before_final_list = false;
- int loops = 0;
- while (c != NULL) {
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error;
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- loops++;
+void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
+ grpc_closure *cl, grpc_error *error,
+ bool covered_by_poller) {
+ GPR_TIMER_BEGIN("combiner.execute", 0);
+ gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
+ GRPC_COMBINER_TRACE(gpr_log(
+ GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
+ cl, covered_by_poller, last));
+ GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
+ cl->error_data.scratch =
+ pack_error_data((error_data){error, covered_by_poller});
+ if (covered_by_poller) {
+ gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
+ }
+ gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
+ if (last == 1) {
+ // first element on this list: add it to the list of combiner locks
+ // executing within this exec_ctx
+ push_last_on_exec_ctx(exec_ctx, lock);
}
- GPR_TIMER_END("combiner.execute_final", 0);
+ GPR_TIMER_END("combiner.execute", 0);
}
-static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- GPR_TIMER_BEGIN("combiner.continue_executing_final", 0);
- grpc_combiner *lock = arg;
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p continue_executing_final", lock));
- GPR_ASSERT(exec_ctx->active_combiner == NULL);
- exec_ctx->active_combiner = lock;
- // quick peek to see if new things have turned up on the queue: if so, go back
- // to executing them before the final list
- if ((gpr_atm_acq_load(&lock->state) >> 1) > 1) {
- if (maybe_finish_one(exec_ctx, lock)) finish(exec_ctx, lock);
- } else {
- execute_final(exec_ctx, lock);
- finish(exec_ctx, lock);
+static void move_next(grpc_exec_ctx *exec_ctx) {
+ exec_ctx->active_combiner =
+ exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
+ if (exec_ctx->active_combiner == NULL) {
+ exec_ctx->last_combiner = NULL;
}
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- exec_ctx->active_combiner = NULL;
- GPR_TIMER_END("combiner.continue_executing_final", 0);
}
-static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- GPR_TIMER_BEGIN("combiner.start_execute_final", 0);
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG,
- "C:%p start_execute_final take_async_break_before_final_list=%d",
- lock, lock->take_async_break_before_final_list));
- if (lock->take_async_break_before_final_list) {
- grpc_closure_init(&lock->continue_finishing, continue_executing_final,
- lock);
- grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
- GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
- GPR_TIMER_END("combiner.start_execute_final", 0);
+static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_combiner *lock = arg;
+ push_last_on_exec_ctx(exec_ctx, lock);
+}
+
+static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+ move_next(exec_ctx);
+ GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
+ lock->optional_workqueue));
+ grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
+ GRPC_ERROR_NONE);
+}
+
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
+ GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
+ grpc_combiner *lock = exec_ctx->active_combiner;
+ if (lock == NULL) {
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
- } else {
- execute_final(exec_ctx, lock);
- GPR_TIMER_END("combiner.start_execute_final", 0);
- return true;
}
-}
-static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- GPR_TIMER_BEGIN("combiner.maybe_finish_one", 0);
- gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- if (n == NULL) {
- // Queue is in an transiently inconsistent state: a new item is being queued
- // but is not visible to this thread yet.
- // Use this as a cue that we should go off and do something else for a while
- // (and come back later)
- grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
- lock);
- grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
- GRPC_WORKQUEUE_REF(lock->optional_workqueue, "sched"));
- GPR_TIMER_END("combiner.maybe_finish_one", 0);
- return false;
+ gpr_log(GPR_DEBUG,
+ "C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
+ "is_covered_by_poller=%d exec_ctx_ready_to_finish=%d "
+ "time_to_execute_final_list=%d",
+ lock, lock->optional_workqueue, is_covered_by_poller(lock),
+ grpc_exec_ctx_ready_to_finish(exec_ctx),
+ lock->time_to_execute_final_list));
+
+ if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
+ grpc_exec_ctx_ready_to_finish(exec_ctx)) {
+ GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
+ // this execution context wants to move on, and we have a workqueue (and
+ // so can help the execution context out): schedule remaining work to be
+ // picked up on the workqueue
+ queue_offload(exec_ctx, lock);
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
}
- grpc_closure *cl = (grpc_closure *)n;
- grpc_error *error = cl->error;
- cl->cb(exec_ctx, cl->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- GPR_TIMER_END("combiner.maybe_finish_one", 0);
- return true;
-}
-static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
- bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock);
- GPR_TIMER_BEGIN("combiner.finish", 0);
- int loops = 0;
- do {
- executor = maybe_finish_one;
- gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -2);
- GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
- "C:%p finish[%d] old_state=%" PRIdPTR, lock,
- loops, old_state));
- switch (old_state) {
- default:
- // we have multiple queued work items: just continue executing them
- break;
- case 5: // we're down to one queued item: if it's the final list we
- case 4: // should do that
- if (!grpc_closure_list_empty(lock->final_list)) {
- executor = start_execute_final;
- }
- break;
- case 3: // had one count, one unorphaned --> unlocked unorphaned
- GPR_TIMER_END("combiner.finish", 0);
- return;
- case 2: // and one count, one orphaned --> unlocked and orphaned
- really_destroy(exec_ctx, lock);
- GPR_TIMER_END("combiner.finish", 0);
- return;
- case 1:
- case 0:
- // these values are illegal - representing an already unlocked or
- // deleted lock
- GPR_UNREACHABLE_CODE(return );
+ if (!lock->time_to_execute_final_list ||
+ // peek to see if something new has shown up, and execute that with
+ // priority
+ (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
+ gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
+ GRPC_COMBINER_TRACE(
+ gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
+ if (n == NULL) {
+ // queue is in an inconsistent state: use this as a cue that we should
+ // go off and do something else for a while (and come back later)
+ GPR_TIMER_MARK("delay_busy", 0);
+ if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
+ queue_offload(exec_ctx, lock);
+ }
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
}
- loops++;
- } while (executor(exec_ctx, lock));
- GPR_TIMER_END("combiner.finish", 0);
-}
+ GPR_TIMER_BEGIN("combiner.exec1", 0);
+ grpc_closure *cl = (grpc_closure *)n;
+ error_data err = unpack_error_data(cl->error_data.scratch);
+ cl->cb(exec_ctx, cl->cb_arg, err.error);
+ if (err.covered_by_poller) {
+ gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
+ }
+ GRPC_ERROR_UNREF(err.error);
+ GPR_TIMER_END("combiner.exec1", 0);
+ } else {
+ grpc_closure *c = lock->final_list.head;
+ GPR_ASSERT(c != NULL);
+ grpc_closure_list_init(&lock->final_list);
+ lock->final_list_covered_by_poller = false;
+ int loops = 0;
+ while (c != NULL) {
+ GPR_TIMER_BEGIN("combiner.exec_1final", 0);
+ GRPC_COMBINER_TRACE(
+ gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ GPR_TIMER_END("combiner.exec_1final", 0);
+ }
+ }
-void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *cl, grpc_error *error) {
+ GPR_TIMER_MARK("unref", 0);
+ move_next(exec_ctx);
+ lock->time_to_execute_final_list = false;
+ gpr_atm old_state =
+ gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
- gpr_log(GPR_DEBUG, "C:%p grpc_combiner_execute c=%p", lock, cl));
- GPR_TIMER_BEGIN("combiner.execute", 0);
- gpr_atm last = gpr_atm_full_fetch_add(&lock->state, 2);
- GPR_ASSERT(last & 1); // ensure lock has not been destroyed
- if (last == 1) {
- exec_ctx->active_combiner = lock;
- GPR_TIMER_BEGIN("combiner.execute_first_cb", 0);
- cl->cb(exec_ctx, cl->cb_arg, error);
- GPR_TIMER_END("combiner.execute_first_cb", 0);
- GRPC_ERROR_UNREF(error);
- finish(exec_ctx, lock);
- GPR_ASSERT(exec_ctx->active_combiner == lock);
- exec_ctx->active_combiner = NULL;
- } else {
- cl->error = error;
- gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
+ gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
+// Define a macro to ease readability of the following switch statement.
+#define OLD_STATE_WAS(orphaned, elem_count) \
+ (((orphaned) ? 0 : STATE_UNORPHANED) | \
+ ((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
+ // Depending on what the previous state was, we need to perform different
+ // actions.
+ switch (old_state) {
+ default:
+ // we have multiple queued work items: just continue executing them
+ break;
+ case OLD_STATE_WAS(false, 2):
+ case OLD_STATE_WAS(true, 2):
+ // we're down to one queued item: if it's the final list we should do that
+ if (!grpc_closure_list_empty(lock->final_list)) {
+ lock->time_to_execute_final_list = true;
+ }
+ break;
+ case OLD_STATE_WAS(false, 1):
+ // had one count, one unorphaned --> unlocked unorphaned
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
+ case OLD_STATE_WAS(true, 1):
+ // and one count, one orphaned --> unlocked and orphaned
+ really_destroy(exec_ctx, lock);
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
+ case OLD_STATE_WAS(false, 0):
+ case OLD_STATE_WAS(true, 0):
+ // these values are illegal - representing an already unlocked or
+ // deleted lock
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ GPR_UNREACHABLE_CODE(return true);
}
- GPR_TIMER_END("combiner.execute", 0);
+ push_first_on_exec_ctx(exec_ctx, lock);
+ GPR_TIMER_END("combiner.continue_exec_ctx", 0);
+ return true;
}
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
@@ -264,30 +312,26 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
- bool force_async_break) {
+ bool covered_by_poller) {
GRPC_COMBINER_TRACE(gpr_log(
- GPR_DEBUG,
- "C:%p grpc_combiner_execute_finally c=%p force_async_break=%d; ac=%p",
- lock, closure, force_async_break, exec_ctx->active_combiner));
+ GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
+ closure, exec_ctx->active_combiner, covered_by_poller));
GPR_TIMER_BEGIN("combiner.execute_finally", 0);
if (exec_ctx->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
grpc_combiner_execute(exec_ctx, lock,
- grpc_closure_create(enqueue_finally, closure), error);
+ grpc_closure_create(enqueue_finally, closure), error,
+ false);
GPR_TIMER_END("combiner.execute_finally", 0);
return;
}
- if (force_async_break) {
- lock->take_async_break_before_final_list = true;
- }
if (grpc_closure_list_empty(lock->final_list)) {
- gpr_atm_full_fetch_add(&lock->state, 2);
+ gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
+ }
+ if (covered_by_poller) {
+ lock->final_list_covered_by_poller = true;
}
grpc_closure_list_append(&lock->final_list, closure, error);
GPR_TIMER_END("combiner.execute_finally", 0);
}
-
-void grpc_combiner_force_async_finally(grpc_combiner *lock) {
- lock->take_async_break_before_final_list = true;
-}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 1409db24b9..d04eeed83a 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -52,19 +52,14 @@ grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
- grpc_closure *closure, grpc_error *error);
+ grpc_closure *closure, grpc_error *error,
+ bool covered_by_poller);
// Execute \a action within the lock just prior to unlocking.
-// if \a hint_async_break is true, the combiner tries to hand execution to
-// another thread before finishing the primary queue of combined closures and
-// executing the finally list.
-// Deprecation warning: \a hint_async_break will be removed in a future version
-// Takes a very slow and round-about path if not called from a
-// grpc_combiner_execute closure.
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
- bool hint_async_break);
-// Deprecated: force the finally list execution onto another thread
-void grpc_combiner_force_async_finally(grpc_combiner *lock);
+ bool covered_by_poller);
+
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern int grpc_combiner_trace;
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.c
index f901fcf962..74fa9c45df 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.c
@@ -69,3 +69,7 @@ char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
grpc_workqueue* grpc_endpoint_get_workqueue(grpc_endpoint* ep) {
return ep->vtable->get_workqueue(ep);
}
+
+grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
+ return ep->vtable->get_resource_user(ep);
+}
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 910a6f6532..0ac5486ff5 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -39,6 +39,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/iomgr/resource_quota.h"
/* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
@@ -58,6 +59,7 @@ struct grpc_endpoint_vtable {
grpc_pollset_set *pollset);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+ grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
};
@@ -100,6 +102,8 @@ void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
+grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
+
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index 5cd78051bd..f9de0c715e 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -41,7 +41,8 @@ typedef struct {
grpc_endpoint *server;
} grpc_endpoint_pair;
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size);
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size);
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.c b/src/core/lib/iomgr/endpoint_pair_posix.c
index e295fb4867..b9ff969e81 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.c
+++ b/src/core/lib/iomgr/endpoint_pair_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/endpoint_pair.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
@@ -62,20 +62,21 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE);
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
int sv[2];
grpc_endpoint_pair p;
char *final_name;
create_sockets(sv);
gpr_asprintf(&final_name, "%s:client", name);
- p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
- "socketpair-server");
+ p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), resource_quota,
+ read_slice_size, "socketpair-server");
gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name);
- p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
- "socketpair-client");
+ p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), resource_quota,
+ read_slice_size, "socketpair-client");
gpr_free(final_name);
return p;
}
diff --git a/src/core/lib/security/credentials/google_default/credentials_windows.c b/src/core/lib/iomgr/endpoint_pair_uv.c
index 208b8fd9ad..7941e20388 100644
--- a/src/core/lib/security/credentials/google_default/credentials_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_uv.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,31 +31,23 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINDOWS
+#ifdef GRPC_UV
-#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
+#include <stdlib.h>
-#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include "src/core/lib/support/env.h"
-#include "src/core/lib/support/string.h"
+#include "src/core/lib/iomgr/endpoint_pair.h"
-char *grpc_get_well_known_google_credentials_file_path_impl(void) {
- char *result = NULL;
- char *appdata_path = gpr_getenv("APPDATA");
- if (appdata_path == NULL) {
- gpr_log(GPR_ERROR, "Could not get APPDATA environment variable.");
- return NULL;
- }
- gpr_asprintf(&result, "%s/%s/%s", appdata_path,
- GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY,
- GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE);
- gpr_free(appdata_path);
- return result;
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
+ size_t read_slice_size) {
+ grpc_endpoint_pair endpoint_pair;
+ // TODO(mlumish): implement this properly under libuv
+ GPR_ASSERT(false &&
+ "grpc_iomgr_create_endpoint_pair is not suppoted with libuv");
+ return endpoint_pair;
}
-#endif /* GPR_WINDOWS */
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.c b/src/core/lib/iomgr/endpoint_pair_windows.c
index 582704e267..93f71b745c 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/endpoint_pair.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -82,15 +82,16 @@ static void create_sockets(SOCKET sv[2]) {
sv[0] = svr_sock;
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- size_t read_slice_size) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
+ const char *name, grpc_resource_quota *resource_quota,
+ size_t read_slice_size) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
- "endpoint:server");
+ resource_quota, "endpoint:server");
p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
- "endpoint:client");
+ resource_quota, "endpoint:client");
return p;
}
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 31c80260f8..f6bb3a0477 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -120,6 +120,8 @@ static const char *error_int_name(grpc_error_ints key) {
return "http_status";
case GRPC_ERROR_INT_LIMIT:
return "limit";
+ case GRPC_ERROR_INT_OCCURRED_DURING_WRITE:
+ return "occurred_during_write";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@@ -144,6 +146,8 @@ static const char *error_str_name(grpc_error_strs key) {
return "tsi_error";
case GRPC_ERROR_STR_FILENAME:
return "filename";
+ case GRPC_ERROR_STR_QUEUED_BUFFERS:
+ return "queued_buffers";
}
GPR_UNREACHABLE_CODE(return "unknown");
}
@@ -265,7 +269,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
} else {
out = gpr_malloc(sizeof(*out));
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
- gpr_log(GPR_DEBUG, "%p create copying", out);
+ gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
#endif
out->ints = gpr_avl_ref(in->ints);
out->strs = gpr_avl_ref(in->strs);
@@ -523,21 +527,25 @@ static char *fmt_time(void *p) {
return out;
}
-static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap) {
+static void add_errs(gpr_avl_node *n, char **s, size_t *sz, size_t *cap,
+ bool *first) {
if (n == NULL) return;
- add_errs(n->left, s, sz, cap);
+ add_errs(n->left, s, sz, cap, first);
+ if (!*first) append_chr(',', s, sz, cap);
+ *first = false;
const char *e = grpc_error_string(n->value);
append_str(e, s, sz, cap);
grpc_error_free_string(e);
- add_errs(n->right, s, sz, cap);
+ add_errs(n->right, s, sz, cap, first);
}
static char *errs_string(grpc_error *err) {
char *s = NULL;
size_t sz = 0;
size_t cap = 0;
+ bool first = true;
append_chr('[', &s, &sz, &cap);
- add_errs(err->errs.root, &s, &sz, &cap);
+ add_errs(err->errs.root, &s, &sz, &cap, &first);
append_chr(']', &s, &sz, &cap);
append_chr(0, &s, &sz, &cap);
return s;
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index 00ace8a7a9..f3f3b80a09 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -100,6 +100,8 @@ typedef enum {
GRPC_ERROR_INT_HTTP_STATUS,
/// context sensitive limit associated with the error
GRPC_ERROR_INT_LIMIT,
+ /// chttp2: did the error occur while a write was in progress
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
} grpc_error_ints;
typedef enum {
@@ -121,6 +123,8 @@ typedef enum {
GRPC_ERROR_STR_TSI_ERROR,
/// filename that we were trying to read/write when this error occurred
GRPC_ERROR_STR_FILENAME,
+ /// which data was queued for writing when the error occurred
+ GRPC_ERROR_STR_QUEUED_BUFFERS
} grpc_error_strs;
typedef enum {
@@ -128,9 +132,13 @@ typedef enum {
GRPC_ERROR_TIME_CREATED,
} grpc_error_times;
+/// The following "special" errors can be propagated without allocating memory.
+/// They are always even so that other code (particularly combiner locks) can
+/// safely use the lower bit for themselves.
+
#define GRPC_ERROR_NONE ((grpc_error *)NULL)
-#define GRPC_ERROR_OOM ((grpc_error *)1)
-#define GRPC_ERROR_CANCELLED ((grpc_error *)2)
+#define GRPC_ERROR_OOM ((grpc_error *)2)
+#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
const char *grpc_error_string(grpc_error *error);
void grpc_error_free_string(const char *str);
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index ab77ebc78b..db51ec4939 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -32,10 +32,10 @@
*/
#include <grpc/grpc_posix.h>
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GPR_LINUX_EPOLL
+#ifdef GRPC_LINUX_EPOLL
#include "src/core/lib/iomgr/ev_epoll_linux.h"
@@ -152,20 +152,20 @@ static void fd_global_shutdown(void);
* Polling island Declarations
*/
-//#define GRPC_PI_REF_COUNT_DEBUG
-#ifdef GRPC_PI_REF_COUNT_DEBUG
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
#define PI_UNREF(exec_ctx, p, r) \
pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
+#else /* defined(GRPC_WORKQUEUE_REFCOUNT_DEBUG) */
#define PI_ADD_REF(p, r) pi_add_ref((p))
#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
+/* This is also used as grpc_workqueue (by directly casing it) */
typedef struct polling_island {
gpr_mu mu;
/* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
@@ -185,8 +185,17 @@ typedef struct polling_island {
* (except mu and ref_count) are invalid and must be ignored. */
gpr_atm merged_to;
- /* The workqueue associated with this polling island */
- grpc_workqueue *workqueue;
+ /* Number of threads currently polling on this island */
+ gpr_atm poller_count;
+ /* Mutex guarding the read end of the workqueue (must be held to pop from
+ * workqueue_items) */
+ gpr_mu workqueue_read_mu;
+ /* Queue of closures to be executed */
+ gpr_mpscq workqueue_items;
+ /* Count of items in workqueue_items */
+ gpr_atm workqueue_item_count;
+ /* Wakeup fd used to wake pollers to check the contents of workqueue_items */
+ grpc_wakeup_fd workqueue_wakeup_fd;
/* The fd of the underlying epoll set */
int epoll_fd;
@@ -275,6 +284,10 @@ static bool append_error(grpc_error **composite, grpc_error *error,
threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
static grpc_wakeup_fd polling_island_wakeup_fd;
+/* The polling island being polled right now.
+ See comments in workqueue_maybe_wakeup for why this is tracked. */
+static __thread polling_island *g_current_thread_polling_island;
+
/* Forward declaration */
static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
@@ -289,12 +302,12 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-#ifdef GRPC_PI_REF_COUNT_DEBUG
static void pi_add_ref(polling_island *pi);
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
- int line) {
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+static void pi_add_ref_dbg(polling_island *pi, const char *reason,
+ const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_add_ref(pi);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
@@ -302,12 +315,42 @@ static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
}
static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
- char *reason, char *file, int line) {
+ const char *reason, const char *file, int line) {
long old_cnt = gpr_atm_acq_load(&pi->ref_count);
pi_unref(exec_ctx, pi);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
(void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
}
+
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
+ const char *file, int line,
+ const char *reason) {
+ if (workqueue != NULL) {
+ pi_add_ref_dbg((polling_island *)workqueue, reason, file, line);
+ }
+ return workqueue;
+}
+
+static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {
+ if (workqueue != NULL) {
+ pi_unref_dbg(exec_ctx, (polling_island *)workqueue, reason, file, line);
+ }
+}
+#else
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
+ if (workqueue != NULL) {
+ pi_add_ref((polling_island *)workqueue);
+ }
+ return workqueue;
+}
+
+static void workqueue_unref(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {
+ if (workqueue != NULL) {
+ pi_unref(exec_ctx, (polling_island *)workqueue);
+ }
+}
#endif
static void pi_add_ref(polling_island *pi) {
@@ -315,10 +358,7 @@ static void pi_add_ref(polling_island *pi) {
}
static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- /* If ref count went to one, we're back to just the workqueue owning a ref.
- Unref the workqueue to break the loop.
-
- If ref count went to zero, delete the polling island.
+ /* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
polling island (and that there is no racing pi_add_ref() call either).
@@ -326,20 +366,12 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Also, if we are deleting the polling island and the merged_to field is
non-empty, we should remove a ref to the merged_to polling island
*/
- switch (gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
- case 2: /* last external ref: the only one now owned is by the workqueue */
- GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
- break;
- case 1: {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(exec_ctx, pi);
- if (next != NULL) {
- PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
- }
- break;
+ if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
+ polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ polling_island_delete(exec_ctx, pi);
+ if (next != NULL) {
+ PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
}
- case 0:
- GPR_UNREACHABLE_CODE(return );
}
}
@@ -488,11 +520,20 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
pi->fd_capacity = 0;
pi->fds = NULL;
pi->epoll_fd = -1;
- pi->workqueue = NULL;
+
+ gpr_mu_init(&pi->workqueue_read_mu);
+ gpr_mpscq_init(&pi->workqueue_items);
+ gpr_atm_rel_store(&pi->workqueue_item_count, 0);
gpr_atm_rel_store(&pi->ref_count, 0);
+ gpr_atm_rel_store(&pi->poller_count, 0);
gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
+ if (!append_error(error, grpc_wakeup_fd_init(&pi->workqueue_wakeup_fd),
+ err_desc)) {
+ goto done;
+ }
+
pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (pi->epoll_fd < 0) {
@@ -501,26 +542,14 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
}
polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
+ polling_island_add_wakeup_fd_locked(pi, &pi->workqueue_wakeup_fd, error);
if (initial_fd != NULL) {
polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
}
- if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
- err_desc) &&
- *error == GRPC_ERROR_NONE) {
- polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
- error);
- GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
- pi->workqueue->wakeup_read_fd->polling_island = pi;
- PI_ADD_REF(pi, "fd");
- }
-
done:
if (*error != GRPC_ERROR_NONE) {
- if (pi->workqueue != NULL) {
- GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
- }
polling_island_delete(exec_ctx, pi);
pi = NULL;
}
@@ -533,7 +562,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
if (pi->epoll_fd >= 0) {
close(pi->epoll_fd);
}
+ GPR_ASSERT(gpr_atm_no_barrier_load(&pi->workqueue_item_count) == 0);
+ gpr_mu_destroy(&pi->workqueue_read_mu);
+ gpr_mpscq_destroy(&pi->workqueue_items);
gpr_mu_destroy(&pi->mu);
+ grpc_wakeup_fd_destroy(&pi->workqueue_wakeup_fd);
gpr_free(pi->fds);
gpr_free(pi);
}
@@ -678,6 +711,45 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
+static void workqueue_maybe_wakeup(polling_island *pi) {
+ /* If this thread is the current poller, then it may be that it's about to
+ decrement the current poller count, so we need to look past this thread */
+ bool is_current_poller = (g_current_thread_polling_island == pi);
+ gpr_atm min_current_pollers_for_wakeup = is_current_poller ? 1 : 0;
+ gpr_atm current_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
+ /* Only issue a wakeup if it's likely that some poller could come in and take
+ it right now. Note that since we do an anticipatory mpscq_pop every poll
+ loop, it's ok if we miss the wakeup here, as we'll get the work item when
+ the next poller enters anyway. */
+ if (current_pollers > min_current_pollers_for_wakeup) {
+ GRPC_LOG_IF_ERROR("workqueue_wakeup_fd",
+ grpc_wakeup_fd_wakeup(&pi->workqueue_wakeup_fd));
+ }
+}
+
+static void workqueue_move_items_to_parent(polling_island *q) {
+ polling_island *p = (polling_island *)gpr_atm_no_barrier_load(&q->merged_to);
+ if (p == NULL) {
+ return;
+ }
+ gpr_mu_lock(&q->workqueue_read_mu);
+ int num_added = 0;
+ while (gpr_atm_no_barrier_load(&q->workqueue_item_count) > 0) {
+ gpr_mpscq_node *n = gpr_mpscq_pop(&q->workqueue_items);
+ if (n != NULL) {
+ gpr_atm_no_barrier_fetch_add(&q->workqueue_item_count, -1);
+ gpr_atm_no_barrier_fetch_add(&p->workqueue_item_count, 1);
+ gpr_mpscq_push(&p->workqueue_items, n);
+ num_added++;
+ }
+ }
+ gpr_mu_unlock(&q->workqueue_read_mu);
+ if (num_added > 0) {
+ workqueue_maybe_wakeup(p);
+ }
+ workqueue_move_items_to_parent(p);
+}
+
static polling_island *polling_island_merge(polling_island *p,
polling_island *q,
grpc_error **error) {
@@ -702,6 +774,8 @@ static polling_island *polling_island_merge(polling_island *p,
/* Add the 'merged_to' link from p --> q */
gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
+
+ workqueue_move_items_to_parent(q);
}
/* else if p == q, nothing needs to be done */
@@ -712,6 +786,26 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue, grpc_closure *closure,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("workqueue.enqueue", 0);
+ /* take a ref to the workqueue: otherwise it can happen that whatever events
+ * this kicks off ends up destroying the workqueue before this function
+ * completes */
+ GRPC_WORKQUEUE_REF(workqueue, "enqueue");
+ polling_island *pi = (polling_island *)workqueue;
+ gpr_atm last = gpr_atm_no_barrier_fetch_add(&pi->workqueue_item_count, 1);
+ closure->error_data.error = error;
+ gpr_mpscq_push(&pi->workqueue_items, &closure->next_data.atm_next);
+ if (last == 0) {
+ workqueue_maybe_wakeup(pi);
+ }
+ workqueue_move_items_to_parent(pi);
+ GRPC_WORKQUEUE_UNREF(exec_ctx, workqueue, "enqueue");
+ GPR_TIMER_END("workqueue.enqueue", 0);
+}
+
static grpc_error *polling_island_global_init() {
grpc_error *error = GRPC_ERROR_NONE;
@@ -1042,11 +1136,8 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
gpr_mu_lock(&fd->mu);
- grpc_workqueue *workqueue = NULL;
- if (fd->polling_island != NULL) {
- workqueue =
- GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
- }
+ grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF(
+ (grpc_workqueue *)fd->polling_island, "fd_get_workqueue");
gpr_mu_unlock(&fd->mu);
return workqueue;
}
@@ -1299,7 +1390,29 @@ static void pollset_reset(grpc_pollset *pollset) {
GPR_ASSERT(pollset->polling_island == NULL);
}
-#define GRPC_EPOLL_MAX_EVENTS 1000
+static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx,
+ polling_island *pi) {
+ if (gpr_mu_trylock(&pi->workqueue_read_mu)) {
+ gpr_mpscq_node *n = gpr_mpscq_pop(&pi->workqueue_items);
+ gpr_mu_unlock(&pi->workqueue_read_mu);
+ if (n != NULL) {
+ if (gpr_atm_full_fetch_add(&pi->workqueue_item_count, -1) > 1) {
+ workqueue_maybe_wakeup(pi);
+ }
+ grpc_closure *c = (grpc_closure *)n;
+ grpc_closure_run(exec_ctx, c, c->error_data.error);
+ return true;
+ } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
+ /* n == NULL might mean there's work but it's not available to be popped
+ * yet - try to ensure another workqueue wakes up to check shortly if so
+ */
+ workqueue_maybe_wakeup(pi);
+ }
+ }
+ return false;
+}
+
+#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
@@ -1354,7 +1467,13 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pi, "ps_work");
gpr_mu_unlock(&pollset->mu);
- do {
+ /* If we get some workqueue work to do, it might end up completing an item on
+ the completion queue, so there's no need to poll... so we skip that and
+ redo the complete loop to verify */
+ if (!maybe_do_workqueue_work(exec_ctx, pi)) {
+ gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
+ g_current_thread_polling_island = pi;
+
GRPC_SCHEDULING_START_BLOCKING_REGION;
ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
sig_mask);
@@ -1386,6 +1505,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
append_error(error,
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
err_desc);
+ } else if (data_ptr == &pi->workqueue_wakeup_fd) {
+ append_error(error,
+ grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
+ err_desc);
+ maybe_do_workqueue_work(exec_ctx, pi);
} else if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
@@ -1408,7 +1532,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
}
}
}
- } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
+
+ g_current_thread_polling_island = NULL;
+ gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
+ }
GPR_ASSERT(pi != NULL);
@@ -1584,6 +1711,12 @@ retry:
"pollset_add_fd: Raced creating new polling island. pi_new: %p "
"(fd: %d, pollset: %p)",
(void *)pi_new, fd->fd, (void *)pollset);
+
+ /* No need to lock 'pi_new' here since this is a new polling island and
+ * no one has a reference to it yet */
+ polling_island_remove_all_fds_locked(pi_new, true, &error);
+
+ /* Ref and unref so that the polling island gets deleted during unref */
PI_ADD_REF(pi_new, "dance_of_destruction");
PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
goto retry;
@@ -1868,6 +2001,10 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
+ .workqueue_ref = workqueue_ref,
+ .workqueue_unref = workqueue_unref,
+ .workqueue_enqueue = workqueue_enqueue,
+
.shutdown_engine = shutdown_engine,
};
@@ -1892,6 +2029,10 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
return NULL;
}
+ if (!grpc_has_wakeup_fd()) {
+ return NULL;
+ }
+
if (!is_epoll_available()) {
return NULL;
}
@@ -1914,13 +2055,13 @@ const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
return &vtable;
}
-#else /* defined(GPR_LINUX_EPOLL) */
-#if defined(GPR_POSIX_SOCKET)
+#else /* defined(GRPC_LINUX_EPOLL) */
+#if defined(GRPC_POSIX_SOCKET)
#include "src/core/lib/iomgr/ev_posix.h"
-/* If GPR_LINUX_EPOLL is not defined, it means epoll is not available. Return
+/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; }
-#endif /* defined(GPR_POSIX_SOCKET) */
+#endif /* defined(GRPC_POSIX_SOCKET) */
void grpc_use_signal(int signum) {}
-#endif /* !defined(GPR_LINUX_EPOLL) */
+#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll_linux.h b/src/core/lib/iomgr/ev_epoll_linux.h
index 7a494aba19..8fc3ff59a3 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.h
+++ b/src/core/lib/iomgr/ev_epoll_linux.h
@@ -35,13 +35,14 @@
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/port.h"
const grpc_event_engine_vtable *grpc_init_epoll_linux(void);
-#ifdef GPR_LINUX_EPOLL
+#ifdef GRPC_LINUX_EPOLL
void *grpc_fd_get_polling_island(grpc_fd *fd);
void *grpc_pollset_get_polling_island(grpc_pollset *ps);
bool grpc_are_polling_islands_equal(void *p, void *q);
-#endif /* defined(GPR_LINUX_EPOLL) */
+#endif /* defined(GRPC_LINUX_EPOLL) */
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
index c2107e5e39..bf51404203 100644
--- a/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_and_epoll_posix.c
@@ -42,9 +42,9 @@
* - ev_epoll_posix.{h,c}
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/ev_poll_and_epoll_posix.h"
@@ -1338,7 +1338,7 @@ static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
* pollset_multipoller_with_poll_posix.c
*/
-#ifndef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+#ifndef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
typedef struct {
/* all polled fds */
@@ -1520,13 +1520,13 @@ static void poll_become_multipoller(grpc_exec_ctx *exec_ctx,
}
}
-#endif /* !GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+#endif /* !GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
/*******************************************************************************
* pollset_multipoller_with_epoll_posix.c
*/
-#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+#ifdef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
#include <errno.h>
#include <poll.h>
@@ -1839,11 +1839,11 @@ static void epoll_become_multipoller(grpc_exec_ctx *exec_ctx,
}
}
-#else /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+#else /* GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
static void remove_fd_from_all_epoll_sets(int fd) {}
-#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
+#endif /* GRPC_LINUX_MULTIPOLL_WITH_EPOLL */
/*******************************************************************************
* pollset_set_posix.c
@@ -1989,6 +1989,32 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
}
/*******************************************************************************
+ * workqueue stubs
+ */
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
+ const char *file, int line,
+ const char *reason) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {}
+#endif
+
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue, grpc_closure *closure,
+ grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
+/*******************************************************************************
* event engine binding
*/
@@ -2029,11 +2055,15 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
+ .workqueue_ref = workqueue_ref,
+ .workqueue_unref = workqueue_unref,
+ .workqueue_enqueue = workqueue_enqueue,
+
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_and_epoll_posix(void) {
-#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
+#ifdef GRPC_LINUX_MULTIPOLL_WITH_EPOLL
platform_become_multipoller = epoll_become_multipoller;
#else
platform_become_multipoller = poll_become_multipoller;
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 16a5e3083e..e1d620cfff 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/ev_poll_posix.h"
@@ -47,10 +47,12 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
+#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
@@ -246,6 +248,28 @@ struct grpc_pollset_set {
};
/*******************************************************************************
+ * condition variable polling definitions
+ */
+
+#define CV_POLL_PERIOD_MS 1000
+#define CV_DEFAULT_TABLE_SIZE 16
+
+typedef enum poll_status_t { INPROGRESS, COMPLETED, CANCELLED } poll_status_t;
+
+typedef struct poll_args {
+ gpr_refcount refcount;
+ gpr_cv *cv;
+ struct pollfd *fds;
+ nfds_t nfds;
+ int timeout;
+ int retval;
+ int err;
+ gpr_atm status;
+} poll_args;
+
+cv_fd_table g_cvfds;
+
+/*******************************************************************************
* fd_posix.c
*/
@@ -961,8 +985,15 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
}
+
for (i = 2; i < pfd_count; i++) {
- fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ if (watchers[i].fd == NULL) {
+ fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
+ } else {
+ // Wake up all the file descriptors, if we have an invalid one
+ // we can identify it on the next pollset_work()
+ fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset);
+ }
}
} else if (r == 0) {
for (i = 2; i < pfd_count; i++) {
@@ -1236,10 +1267,237 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
}
/*******************************************************************************
+ * workqueue stubs
+ */
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
+ const char *file, int line,
+ const char *reason) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
+static void workqueue_unref(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue) {}
+#endif
+
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
+ grpc_workqueue *workqueue, grpc_closure *closure,
+ grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
+/*******************************************************************************
+ * Condition Variable polling extensions
+ */
+
+static void decref_poll_args(poll_args *args) {
+ if (gpr_unref(&args->refcount)) {
+ gpr_free(args->fds);
+ gpr_cv_destroy(args->cv);
+ gpr_free(args->cv);
+ gpr_free(args);
+ }
+}
+
+// Poll in a background thread
+static void run_poll(void *arg) {
+ int timeout, retval;
+ poll_args *pargs = (poll_args *)arg;
+ while (gpr_atm_no_barrier_load(&pargs->status) == INPROGRESS) {
+ if (pargs->timeout < 0) {
+ timeout = CV_POLL_PERIOD_MS;
+ } else {
+ timeout = GPR_MIN(CV_POLL_PERIOD_MS, pargs->timeout);
+ pargs->timeout -= timeout;
+ }
+ retval = g_cvfds.poll(pargs->fds, pargs->nfds, timeout);
+ if (retval != 0 || pargs->timeout == 0) {
+ pargs->retval = retval;
+ pargs->err = errno;
+ break;
+ }
+ }
+ gpr_mu_lock(&g_cvfds.mu);
+ if (gpr_atm_no_barrier_load(&pargs->status) == INPROGRESS) {
+ // Signal main thread that the poll completed
+ gpr_atm_no_barrier_store(&pargs->status, COMPLETED);
+ gpr_cv_signal(pargs->cv);
+ }
+ decref_poll_args(pargs);
+ g_cvfds.pollcount--;
+ if (g_cvfds.shutdown && g_cvfds.pollcount == 0) {
+ gpr_cv_signal(&g_cvfds.shutdown_complete);
+ }
+ gpr_mu_unlock(&g_cvfds.mu);
+}
+
+// This function overrides poll() to handle condition variable wakeup fds
+static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
+ unsigned int i;
+ int res, idx;
+ gpr_cv *pollcv;
+ cv_node *cvn, *prev;
+ nfds_t nsockfds = 0;
+ gpr_thd_id t_id;
+ gpr_thd_options opt;
+ poll_args *pargs = NULL;
+ gpr_mu_lock(&g_cvfds.mu);
+ pollcv = gpr_malloc(sizeof(gpr_cv));
+ gpr_cv_init(pollcv);
+ for (i = 0; i < nfds; i++) {
+ fds[i].revents = 0;
+ if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
+ idx = FD_TO_IDX(fds[i].fd);
+ cvn = gpr_malloc(sizeof(cv_node));
+ cvn->cv = pollcv;
+ cvn->next = g_cvfds.cvfds[idx].cvs;
+ g_cvfds.cvfds[idx].cvs = cvn;
+ // We should return immediately if there are pending events,
+ // but we still need to call poll() to check for socket events
+ if (g_cvfds.cvfds[idx].is_set) {
+ timeout = 0;
+ }
+ } else if (fds[i].fd >= 0) {
+ nsockfds++;
+ }
+ }
+
+ if (nsockfds > 0) {
+ pargs = gpr_malloc(sizeof(struct poll_args));
+ // Both the main thread and calling thread get a reference
+ gpr_ref_init(&pargs->refcount, 2);
+ pargs->cv = pollcv;
+ pargs->fds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ pargs->nfds = nsockfds;
+ pargs->timeout = timeout;
+ pargs->retval = 0;
+ pargs->err = 0;
+ gpr_atm_no_barrier_store(&pargs->status, INPROGRESS);
+ idx = 0;
+ for (i = 0; i < nfds; i++) {
+ if (fds[i].fd >= 0) {
+ pargs->fds[idx].fd = fds[i].fd;
+ pargs->fds[idx].events = fds[i].events;
+ pargs->fds[idx].revents = 0;
+ idx++;
+ }
+ }
+ g_cvfds.pollcount++;
+ opt = gpr_thd_options_default();
+ gpr_thd_options_set_detached(&opt);
+ gpr_thd_new(&t_id, &run_poll, pargs, &opt);
+ // We want the poll() thread to trigger the deadline, so wait forever here
+ gpr_cv_wait(pollcv, &g_cvfds.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ if (gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
+ res = pargs->retval;
+ errno = pargs->err;
+ } else {
+ res = 0;
+ errno = 0;
+ gpr_atm_no_barrier_store(&pargs->status, CANCELLED);
+ }
+ } else {
+ gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
+ deadline =
+ gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
+ gpr_cv_wait(pollcv, &g_cvfds.mu, deadline);
+ res = 0;
+ }
+
+ idx = 0;
+ for (i = 0; i < nfds; i++) {
+ if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
+ cvn = g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs;
+ prev = NULL;
+ while (cvn->cv != pollcv) {
+ prev = cvn;
+ cvn = cvn->next;
+ GPR_ASSERT(cvn);
+ }
+ if (!prev) {
+ g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs = cvn->next;
+ } else {
+ prev->next = cvn->next;
+ }
+ gpr_free(cvn);
+
+ if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
+ fds[i].revents = POLLIN;
+ if (res >= 0) res++;
+ }
+ } else if (fds[i].fd >= 0 &&
+ gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
+ fds[i].revents = pargs->fds[idx].revents;
+ idx++;
+ }
+ }
+
+ if (pargs) {
+ decref_poll_args(pargs);
+ } else {
+ gpr_cv_destroy(pollcv);
+ gpr_free(pollcv);
+ }
+ gpr_mu_unlock(&g_cvfds.mu);
+
+ return res;
+}
+
+static void global_cv_fd_table_init() {
+ gpr_mu_init(&g_cvfds.mu);
+ gpr_mu_lock(&g_cvfds.mu);
+ gpr_cv_init(&g_cvfds.shutdown_complete);
+ g_cvfds.shutdown = 0;
+ g_cvfds.pollcount = 0;
+ g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
+ g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+ g_cvfds.free_fds = NULL;
+ for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
+ g_cvfds.cvfds[i].is_set = 0;
+ g_cvfds.cvfds[i].cvs = NULL;
+ g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[i];
+ }
+ // Override the poll function with one that supports cvfds
+ g_cvfds.poll = grpc_poll_function;
+ grpc_poll_function = &cvfd_poll;
+ gpr_mu_unlock(&g_cvfds.mu);
+}
+
+static void global_cv_fd_table_shutdown() {
+ gpr_mu_lock(&g_cvfds.mu);
+ g_cvfds.shutdown = 1;
+ // Attempt to wait for all abandoned poll() threads to terminate
+ // Not doing so will result in reported memory leaks
+ if (g_cvfds.pollcount > 0) {
+ int res = gpr_cv_wait(&g_cvfds.shutdown_complete, &g_cvfds.mu,
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(3, GPR_TIMESPAN)));
+ GPR_ASSERT(res == 0);
+ }
+ gpr_cv_destroy(&g_cvfds.shutdown_complete);
+ grpc_poll_function = g_cvfds.poll;
+ gpr_free(g_cvfds.cvfds);
+ gpr_mu_unlock(&g_cvfds.mu);
+ gpr_mu_destroy(&g_cvfds.mu);
+}
+
+/*******************************************************************************
* event engine binding
*/
-static void shutdown_engine(void) { pollset_global_shutdown(); }
+static void shutdown_engine(void) {
+ pollset_global_shutdown();
+ if (grpc_cv_wakeup_fds_enabled()) {
+ global_cv_fd_table_shutdown();
+ }
+}
static const grpc_event_engine_vtable vtable = {
.pollset_size = sizeof(grpc_pollset),
@@ -1273,11 +1531,29 @@ static const grpc_event_engine_vtable vtable = {
.kick_poller = kick_poller,
+ .workqueue_ref = workqueue_ref,
+ .workqueue_unref = workqueue_unref,
+ .workqueue_enqueue = workqueue_enqueue,
+
.shutdown_engine = shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(void) {
+ if (!grpc_has_wakeup_fd()) {
+ return NULL;
+ }
+ if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+ return NULL;
+ }
+ return &vtable;
+}
+
+const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void) {
+ global_cv_fd_table_init();
+ grpc_enable_cv_wakeup_fds(1);
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+ global_cv_fd_table_shutdown();
+ grpc_enable_cv_wakeup_fds(0);
return NULL;
}
return &vtable;
diff --git a/src/core/lib/iomgr/ev_poll_posix.h b/src/core/lib/iomgr/ev_poll_posix.h
index 291736a2db..202ffca14c 100644
--- a/src/core/lib/iomgr/ev_poll_posix.h
+++ b/src/core/lib/iomgr/ev_poll_posix.h
@@ -37,5 +37,6 @@
#include "src/core/lib/iomgr/ev_posix.h"
const grpc_event_engine_vtable *grpc_init_poll_posix(void);
+const grpc_event_engine_vtable *grpc_init_poll_cv_posix(void);
#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index 6536672685..ef36ba89b2 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/ev_posix.h"
@@ -66,6 +66,7 @@ typedef struct {
static const event_engine_factory g_factories[] = {
{"epoll", grpc_init_epoll_linux},
{"poll", grpc_init_poll_posix},
+ {"poll-cv", grpc_init_poll_cv_posix},
{"legacy", grpc_init_poll_and_epoll_posix},
};
@@ -258,4 +259,27 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_kick_poller(void) { return g_event_engine->kick_poller(); }
-#endif // GPR_POSIX_SOCKET
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason) {
+ return g_event_engine->workqueue_ref(workqueue, file, line, reason);
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {
+ g_event_engine->workqueue_unref(exec_ctx, workqueue, file, line, reason);
+}
+#else
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
+ return g_event_engine->workqueue_ref(workqueue);
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
+ g_event_engine->workqueue_unref(exec_ctx, workqueue);
+}
+#endif
+
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error) {
+ g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
+}
+
+#endif // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index c2aa1756ea..2fdef06838 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -40,6 +40,7 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#include "src/core/lib/iomgr/workqueue.h"
typedef struct grpc_fd grpc_fd;
@@ -95,6 +96,18 @@ typedef struct grpc_event_engine_vtable {
grpc_error *(*kick_poller)(void);
void (*shutdown_engine)(void);
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+ grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason);
+ void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason);
+#else
+ grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
+ void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
+#endif
+ void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error);
} grpc_event_engine_vtable;
void grpc_event_engine_init(void);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index ac7785ec13..604713e578 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -37,6 +37,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
+#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/workqueue.h"
#include "src/core/lib/profiling/timers.h"
@@ -60,18 +61,43 @@ bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
- while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
- grpc_closure *c = exec_ctx->closure_list.head;
- exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error;
- did_something = true;
- GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
+ for (;;) {
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_closure *c = exec_ctx->closure_list.head;
+ exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ did_something = true;
+ grpc_closure_run(exec_ctx, c, c->error_data.error);
+ c = next;
+ }
+ } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
+ break;
+ }
+ }
+ GPR_ASSERT(exec_ctx->active_combiner == NULL);
+ if (exec_ctx->stealing_from_workqueue != NULL) {
+ if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
+ grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
+ exec_ctx->stolen_closure,
+ exec_ctx->stolen_closure->error_data.error);
+ GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
+ "exec_ctx_sched");
+ exec_ctx->stealing_from_workqueue = NULL;
+ exec_ctx->stolen_closure = NULL;
+ } else {
+ grpc_closure *c = exec_ctx->stolen_closure;
+ GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
+ "exec_ctx_sched");
+ exec_ctx->stealing_from_workqueue = NULL;
+ exec_ctx->stolen_closure = NULL;
+ grpc_error *error = c->error_data.error;
+ GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
- GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
- c = next;
+ GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
+ grpc_exec_ctx_flush(exec_ctx);
+ did_something = true;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
@@ -86,12 +112,25 @@ void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error,
grpc_workqueue *offload_target_or_null) {
+ GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
if (offload_target_or_null == NULL) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- } else {
+ } else if (exec_ctx->stealing_from_workqueue == NULL) {
+ exec_ctx->stealing_from_workqueue = offload_target_or_null;
+ closure->error_data.error = error;
+ exec_ctx->stolen_closure = closure;
+ } else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
+ } else { /* stealing_from_workqueue == offload_target_or_null */
+ grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
+ exec_ctx->stolen_closure,
+ exec_ctx->stolen_closure->error_data.error);
+ closure->error_data.error = error;
+ exec_ctx->stolen_closure = closure;
+ GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
}
+ GPR_TIMER_END("grpc_exec_ctx_sched", 0);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 4d20ecf922..7e50cb9825 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -66,15 +66,33 @@ typedef struct grpc_combiner grpc_combiner;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
struct grpc_exec_ctx {
grpc_closure_list closure_list;
+ /** The workqueue we're stealing work from.
+ As items are queued to the execution context, we try to steal one
+ workqueue item and execute it inline (assuming the exec_ctx is not
+ finished) - doing so does not invalidate the workqueue's contract, and
+ provides a small latency win in cases where we get a hit */
+ grpc_workqueue *stealing_from_workqueue;
+ /** The workqueue item that was stolen from the workqueue above. When new
+ items are scheduled to be offloaded to that workqueue, we need to update
+ this like a 1-deep fifo to maintain the invariant that workqueue items
+ queued by one thread are started in order */
+ grpc_closure *stolen_closure;
/** currently active combiner: updated only via combiner.c */
grpc_combiner *active_combiner;
+ /** last active combiner in the active combiner list */
+ grpc_combiner *last_combiner;
bool cached_ready_to_finish;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
};
+/* initializer for grpc_exec_ctx:
+ prefer to use GRPC_EXEC_CTX_INIT whenever possible */
#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
- { GRPC_CLOSURE_LIST_INIT, NULL, false, finish_check_arg, finish_check }
+ { \
+ GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
+ finish_check \
+ }
#else
struct grpc_exec_ctx {
bool cached_ready_to_finish;
@@ -85,8 +103,10 @@ struct grpc_exec_ctx {
{ false, finish_check_arg, finish_check }
#endif
+/* initialize an execution context at the top level of an API call into grpc
+ (this is safe to use elsewhere, though possibly not as efficient) */
#define GRPC_EXEC_CTX_INIT \
- GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
+ GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.
diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.c
index 2532e52e48..60ebe43676 100644
--- a/src/core/lib/iomgr/iocp_windows.c
+++ b/src/core/lib/iomgr/iocp_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <winsock2.h>
@@ -166,4 +166,4 @@ void grpc_iocp_add_socket(grpc_winsocket *socket) {
GPR_ASSERT(ret == g_iocp);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c
index d67d388b8c..4fd83e0b22 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.c
@@ -112,6 +112,14 @@ void grpc_iomgr_shutdown(void) {
continue;
}
if (g_root_object.next != &g_root_object) {
+ if (grpc_iomgr_abort_on_leaks()) {
+ gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
+ count_objects());
+ dump_objects("LEAKED");
+ abort();
+ }
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
@@ -122,9 +130,6 @@ void grpc_iomgr_shutdown(void) {
"memory leaks are likely",
count_objects());
dump_objects("LEAKED");
- if (grpc_iomgr_abort_on_leaks()) {
- abort();
- }
}
break;
}
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 6c82de78ac..c1cfaf302e 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -34,6 +34,8 @@
#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_H
#define GRPC_CORE_LIB_IOMGR_IOMGR_H
+#include "src/core/lib/iomgr/port.h"
+
/** Initializes the iomgr. */
void grpc_iomgr_init(void);
diff --git a/src/core/lib/iomgr/iomgr_posix.c b/src/core/lib/iomgr/iomgr_posix.c
index cede97f4c6..f5ee0c9ee4 100644
--- a/src/core/lib/iomgr/iomgr_posix.c
+++ b/src/core/lib/iomgr/iomgr_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
diff --git a/src/core/lib/iomgr/iomgr_uv.c b/src/core/lib/iomgr/iomgr_uv.c
new file mode 100644
index 0000000000..96516ff167
--- /dev/null
+++ b/src/core/lib/iomgr/iomgr_uv.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/pollset_uv.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+
+void grpc_iomgr_platform_init(void) {
+ grpc_pollset_global_init();
+ grpc_register_tracer("tcp", &grpc_tcp_trace);
+}
+void grpc_iomgr_platform_flush(void) {}
+void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/iomgr_windows.c b/src/core/lib/iomgr/iomgr_windows.c
index 7653f6e635..b659264ede 100644
--- a/src/core/lib/iomgr/iomgr_windows.c
+++ b/src/core/lib/iomgr/iomgr_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/sockaddr_windows.h"
diff --git a/src/core/lib/iomgr/pollset_set_uv.c b/src/core/lib/iomgr/pollset_set_uv.c
new file mode 100644
index 0000000000..e5ef8b29e0
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_set_uv.c
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include "src/core/lib/iomgr/pollset_set.h"
+
+grpc_pollset_set* grpc_pollset_set_create(void) {
+ return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
+}
+
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
+
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {}
+
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.c
index a35a9766fc..645650db9b 100644
--- a/src/core/lib/iomgr/pollset_set_windows.c
+++ b/src/core/lib/iomgr/pollset_set_windows.c
@@ -31,10 +31,10 @@
*
*/
-#include <grpc/support/port_platform.h>
#include <stdint.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/pollset_set_windows.h"
@@ -60,4 +60,4 @@ void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* bag,
grpc_pollset_set* item) {}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
new file mode 100644
index 0000000000..3a74b842b6
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -0,0 +1,142 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+#include <string.h>
+
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/pollset_uv.h"
+
+struct grpc_pollset {
+ uv_timer_t timer;
+ int shutting_down;
+};
+
+/* Indicates that grpc_pollset_work should run an iteration of the UV loop
+ before running callbacks. This defaults to 1, and should be disabled if
+ grpc_pollset_work will be called within the callstack of uv_run */
+int grpc_pollset_work_run_loop;
+
+gpr_mu grpc_polling_mu;
+
+size_t grpc_pollset_size() { return sizeof(grpc_pollset); }
+
+void grpc_pollset_global_init(void) {
+ gpr_mu_init(&grpc_polling_mu);
+ grpc_pollset_work_run_loop = 1;
+}
+
+void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); }
+
+void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+ *mu = &grpc_polling_mu;
+ memset(pollset, 0, sizeof(grpc_pollset));
+ uv_timer_init(uv_default_loop(), &pollset->timer);
+ pollset->shutting_down = 0;
+}
+
+static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
+
+void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
+ GPR_ASSERT(!pollset->shutting_down);
+ pollset->shutting_down = 1;
+ if (grpc_pollset_work_run_loop) {
+ // Drain any pending UV callbacks without blocking
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ }
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+}
+
+void grpc_pollset_destroy(grpc_pollset *pollset) {
+ uv_close((uv_handle_t *)&pollset->timer, timer_close_cb);
+ // timer.data is a boolean indicating that the timer has finished closing
+ pollset->timer.data = (void *)0;
+ if (grpc_pollset_work_run_loop) {
+ while (!pollset->timer.data) {
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ }
+ }
+}
+
+void grpc_pollset_reset(grpc_pollset *pollset) {
+ GPR_ASSERT(pollset->shutting_down);
+ pollset->shutting_down = 0;
+}
+
+static void timer_run_cb(uv_timer_t *timer) {}
+
+grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl,
+ gpr_timespec now, gpr_timespec deadline) {
+ uint64_t timeout;
+ gpr_mu_unlock(&grpc_polling_mu);
+ if (grpc_pollset_work_run_loop) {
+ if (gpr_time_cmp(deadline, now) >= 0) {
+ timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ } else {
+ timeout = 0;
+ }
+ /* We special-case timeout=0 so that we don't bother with the timer when
+ the loop won't block anyway */
+ if (timeout > 0) {
+ uv_timer_start(&pollset->timer, timer_run_cb, timeout, 0);
+ /* Run until there is some I/O activity or the timer triggers. It doesn't
+ matter which happens */
+ uv_run(uv_default_loop(), UV_RUN_ONCE);
+ uv_timer_stop(&pollset->timer);
+ } else {
+ uv_run(uv_default_loop(), UV_RUN_NOWAIT);
+ }
+ }
+ if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+ grpc_exec_ctx_flush(exec_ctx);
+ }
+ gpr_mu_lock(&grpc_polling_mu);
+ return GRPC_ERROR_NONE;
+}
+
+grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
+ return GRPC_ERROR_NONE;
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_uv.h b/src/core/lib/iomgr/pollset_uv.h
new file mode 100644
index 0000000000..0715eb4295
--- /dev/null
+++ b/src/core/lib/iomgr/pollset_uv.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
+#define GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
+
+extern int grpc_pollset_work_run_loop;
+
+void grpc_pollset_global_init(void);
+void grpc_pollset_global_shutdown(void);
+
+#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_UV_H */
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 626dd784b3..5540303e49 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
@@ -241,4 +241,4 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
void grpc_kick_poller(void) { grpc_iocp_kick(); }
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
new file mode 100644
index 0000000000..f1897bb91f
--- /dev/null
+++ b/src/core/lib/iomgr/port.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifndef GRPC_CORE_LIB_IOMGR_PORT_H
+#define GRPC_CORE_LIB_IOMGR_PORT_H
+
+#if defined(GRPC_UV)
+// Do nothing
+#elif defined(GPR_MANYLINUX1)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_IP_PKTINFO 1
+#define GRPC_HAVE_MSG_NOSIGNAL 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_WINDOWS)
+#define GRPC_TIMER_USE_GENERIC 1
+#define GRPC_WINSOCK_SOCKET 1
+#define GRPC_WINDOWS_SOCKETUTILS 1
+#elif defined(GPR_ANDROID)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_IP_PKTINFO 1
+#define GRPC_HAVE_MSG_NOSIGNAL 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_LINUX_EVENTFD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_LINUX)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_IP_PKTINFO 1
+#define GRPC_HAVE_MSG_NOSIGNAL 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_LINUX_MULTIPOLL_WITH_EPOLL 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#ifdef __GLIBC_PREREQ
+#if __GLIBC_PREREQ(2, 9)
+#define GRPC_LINUX_EPOLL 1
+#define GRPC_LINUX_EVENTFD 1
+#endif
+#if __GLIBC_PREREQ(2, 10)
+#define GRPC_LINUX_SOCKETUTILS 1
+#endif
+#endif
+#ifndef GRPC_LINUX_EVENTFD
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#endif
+#ifndef GRPC_LINUX_SOCKETUTILS
+#define GRPC_POSIX_SOCKETUTILS
+#endif
+#elif defined(GPR_APPLE)
+#define GRPC_HAVE_SO_NOSIGPIPE 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_MSG_IOVLEN_TYPE int
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_FREEBSD)
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_SO_NOSIGPIPE 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_NACL)
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
+#elif !defined(GPR_NO_AUTODETECT_PLATFORM)
+#error "Platform not recognized"
+#endif
+
+#if defined(GRPC_POSIX_SOCKET) + defined(GRPC_WINSOCK_SOCKET) + \
+ defined(GRPC_CUSTOM_SOCKET) + defined(GRPC_UV) != \
+ 1
+#error Must define exactly one of GRPC_POSIX_SOCKET, GRPC_WINSOCK_SOCKET, GPR_CUSTOM_SOCKET
+#endif
+
+#endif /* GRPC_CORE_LIB_IOMGR_PORT_H */
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index ddbe375755..275924448a 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -36,7 +36,6 @@
#include <stddef.h>
#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/iomgr.h"
#define GRPC_MAX_SOCKADDR_SIZE 128
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 4e9f978584..de791b2b67 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -31,12 +31,13 @@
*
*/
-#include <grpc/support/port_platform.h>
-#ifdef GPR_POSIX_SOCKET
+#include "src/core/lib/iomgr/port.h"
+#ifdef GRPC_POSIX_SOCKET
-#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+
#include <string.h>
#include <sys/types.h>
@@ -49,7 +50,6 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
new file mode 100644
index 0000000000..b8295acfa1
--- /dev/null
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -0,0 +1,231 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+
+#include <string.h>
+
+typedef struct request {
+ grpc_closure *on_done;
+ grpc_resolved_addresses **addresses;
+ struct addrinfo *hints;
+} request;
+
+static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
+ grpc_resolved_addresses **addresses) {
+ struct addrinfo *resp;
+ size_t i;
+ if (status != 0) {
+ grpc_error *error;
+ *addresses = NULL;
+ error = GRPC_ERROR_CREATE("getaddrinfo failed");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+ (*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses));
+ (*addresses)->naddrs = 0;
+ for (resp = result; resp != NULL; resp = resp->ai_next) {
+ (*addresses)->naddrs++;
+ }
+ (*addresses)->addrs =
+ gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ i = 0;
+ for (resp = result; resp != NULL; resp = resp->ai_next) {
+ memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
+ (*addresses)->addrs[i].len = resp->ai_addrlen;
+ i++;
+ }
+
+ {
+ for (i = 0; i < (*addresses)->naddrs; i++) {
+ char *buf;
+ grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
+ gpr_free(buf);
+ }
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
+ struct addrinfo *res) {
+ request *r = (request *)req->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_error *error;
+ error = handle_addrinfo_result(status, res, r->addresses);
+ grpc_exec_ctx_sched(&exec_ctx, r->on_done, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ gpr_free(r->hints);
+ gpr_free(r);
+ gpr_free(req);
+ uv_freeaddrinfo(res);
+}
+
+static grpc_error *try_split_host_port(const char *name,
+ const char *default_port, char **host,
+ char **port) {
+ /* parse name, splitting it into host and port parts */
+ grpc_error *error;
+ gpr_split_host_port(name, host, port);
+ if (host == NULL) {
+ char *msg;
+ gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
+ error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return error;
+ }
+ if (port == NULL) {
+ if (default_port == NULL) {
+ char *msg;
+ gpr_asprintf(&msg, "no port in name '%s'", name);
+ error = GRPC_ERROR_CREATE(msg);
+ gpr_free(msg);
+ return error;
+ }
+ *port = gpr_strdup(default_port);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error *blocking_resolve_address_impl(
+ const char *name, const char *default_port,
+ grpc_resolved_addresses **addresses) {
+ char *host;
+ char *port;
+ struct addrinfo hints;
+ uv_getaddrinfo_t req;
+ int s;
+ grpc_error *err;
+
+ req.addrinfo = NULL;
+
+ err = try_split_host_port(name, default_port, &host, &port);
+ if (err != GRPC_ERROR_NONE) {
+ goto done;
+ }
+
+ /* Call getaddrinfo */
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
+ hints.ai_socktype = SOCK_STREAM; /* stream socket */
+ hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
+
+ s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints);
+ err = handle_addrinfo_result(s, req.addrinfo, addresses);
+
+done:
+ gpr_free(host);
+ gpr_free(port);
+ if (req.addrinfo) {
+ uv_freeaddrinfo(req.addrinfo);
+ }
+ return err;
+}
+
+grpc_error *(*grpc_blocking_resolve_address)(
+ const char *name, const char *default_port,
+ grpc_resolved_addresses **addresses) = blocking_resolve_address_impl;
+
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
+ if (addrs != NULL) {
+ gpr_free(addrs->addrs);
+ }
+ gpr_free(addrs);
+}
+
+static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ uv_getaddrinfo_t *req;
+ request *r;
+ struct addrinfo *hints;
+ char *host;
+ char *port;
+ grpc_error *err;
+ int s;
+ err = try_split_host_port(name, default_port, &host, &port);
+ if (err != GRPC_ERROR_NONE) {
+ grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+ return;
+ }
+ r = gpr_malloc(sizeof(request));
+ r->on_done = on_done;
+ r->addresses = addrs;
+ req = gpr_malloc(sizeof(uv_getaddrinfo_t));
+ req->data = r;
+
+ /* Call getaddrinfo */
+ hints = gpr_malloc(sizeof(struct addrinfo));
+ memset(hints, 0, sizeof(struct addrinfo));
+ hints->ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
+ hints->ai_socktype = SOCK_STREAM; /* stream socket */
+ hints->ai_flags = AI_PASSIVE; /* for wildcard IP address */
+ r->hints = hints;
+
+ s = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_callback, host, port,
+ hints);
+
+ if (s != 0) {
+ *addrs = NULL;
+ err = GRPC_ERROR_CREATE("getaddrinfo failed");
+ err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
+ grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+ gpr_free(r);
+ gpr_free(req);
+ gpr_free(hints);
+ }
+}
+
+void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) =
+ resolve_address_impl;
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 2af8af82dc..e139293c03 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -31,12 +31,13 @@
*
*/
-#include <grpc/support/port_platform.h>
-#ifdef GPR_WINSOCK_SOCKET
+#include "src/core/lib/iomgr/port.h"
+#ifdef GRPC_WINSOCK_SOCKET
-#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+
#include <string.h>
#include <sys/types.h>
@@ -124,8 +125,7 @@ static grpc_error *blocking_resolve_address_impl(
{
for (i = 0; i < (*addresses)->naddrs; i++) {
char *buf;
- grpc_sockaddr_to_string(
- &buf, (struct sockaddr *)&(*addresses)->addrs[i].addr, 0);
+ grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
gpr_free(buf);
}
}
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
new file mode 100644
index 0000000000..e39cf28e35
--- /dev/null
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -0,0 +1,717 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/resource_quota.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/iomgr/combiner.h"
+
+int grpc_resource_quota_trace = 0;
+
+struct grpc_resource_quota {
+ /* refcount */
+ gpr_refcount refs;
+
+ /* Master combiner lock: all activity on a quota executes under this combiner
+ * (so no mutex is needed for this data structure)
+ */
+ grpc_combiner *combiner;
+ /* Size of the resource quota */
+ int64_t size;
+ /* Amount of free memory in the resource quota */
+ int64_t free_pool;
+
+ /* Has rq_step been scheduled to occur? */
+ bool step_scheduled;
+ /* Are we currently reclaiming memory */
+ bool reclaiming;
+ /* Closure around rq_step */
+ grpc_closure rq_step_closure;
+ /* Closure around rq_reclamation_done */
+ grpc_closure rq_reclamation_done_closure;
+
+ /* Roots of all resource user lists */
+ grpc_resource_user *roots[GRPC_RULIST_COUNT];
+
+ char *name;
+};
+
+/*******************************************************************************
+ * list management
+ */
+
+static void rulist_add_head(grpc_resource_user *resource_user,
+ grpc_rulist list) {
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_user **root = &resource_quota->roots[list];
+ if (*root == NULL) {
+ *root = resource_user;
+ resource_user->links[list].next = resource_user->links[list].prev =
+ resource_user;
+ } else {
+ resource_user->links[list].next = *root;
+ resource_user->links[list].prev = (*root)->links[list].prev;
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev->links[list].next = resource_user;
+ *root = resource_user;
+ }
+}
+
+static void rulist_add_tail(grpc_resource_user *resource_user,
+ grpc_rulist list) {
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_user **root = &resource_quota->roots[list];
+ if (*root == NULL) {
+ *root = resource_user;
+ resource_user->links[list].next = resource_user->links[list].prev =
+ resource_user;
+ } else {
+ resource_user->links[list].next = (*root)->links[list].next;
+ resource_user->links[list].prev = *root;
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev->links[list].next = resource_user;
+ }
+}
+
+static bool rulist_empty(grpc_resource_quota *resource_quota,
+ grpc_rulist list) {
+ return resource_quota->roots[list] == NULL;
+}
+
+static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota,
+ grpc_rulist list) {
+ grpc_resource_user **root = &resource_quota->roots[list];
+ grpc_resource_user *resource_user = *root;
+ if (resource_user == NULL) {
+ return NULL;
+ }
+ if (resource_user->links[list].next == resource_user) {
+ *root = NULL;
+ } else {
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev;
+ resource_user->links[list].prev->links[list].next =
+ resource_user->links[list].next;
+ *root = resource_user->links[list].next;
+ }
+ resource_user->links[list].next = resource_user->links[list].prev = NULL;
+ return resource_user;
+}
+
+static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) {
+ if (resource_user->links[list].next == NULL) return;
+ grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ if (resource_quota->roots[list] == resource_user) {
+ resource_quota->roots[list] = resource_user->links[list].next;
+ if (resource_quota->roots[list] == resource_user) {
+ resource_quota->roots[list] = NULL;
+ }
+ }
+ resource_user->links[list].next->links[list].prev =
+ resource_user->links[list].prev;
+ resource_user->links[list].prev->links[list].next =
+ resource_user->links[list].next;
+}
+
+/*******************************************************************************
+ * resource quota state machine
+ */
+
+static bool rq_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota);
+static bool rq_reclaim_from_per_user_free_pool(
+ grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota);
+static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota, bool destructive);
+
+static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
+ grpc_resource_quota *resource_quota = rq;
+ resource_quota->step_scheduled = false;
+ do {
+ if (rq_alloc(exec_ctx, resource_quota)) goto done;
+ } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota));
+
+ if (!rq_reclaim(exec_ctx, resource_quota, false)) {
+ rq_reclaim(exec_ctx, resource_quota, true);
+ }
+
+done:
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+}
+
+static void rq_step_sched(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ if (resource_quota->step_scheduled) return;
+ resource_quota->step_scheduled = true;
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
+ &resource_quota->rq_step_closure,
+ GRPC_ERROR_NONE, false);
+}
+
+/* returns true if all allocations are completed */
+static bool rq_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ grpc_resource_user *resource_user;
+ while ((resource_user = rulist_pop_head(resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION))) {
+ gpr_mu_lock(&resource_user->mu);
+ if (resource_user->free_pool < 0 &&
+ -resource_user->free_pool <= resource_quota->free_pool) {
+ int64_t amt = -resource_user->free_pool;
+ resource_user->free_pool = 0;
+ resource_quota->free_pool -= amt;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
+ resource_quota->name, resource_user->name, amt,
+ resource_quota->free_pool);
+ }
+ } else if (grpc_resource_quota_trace && resource_user->free_pool >= 0) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
+ resource_quota->name, resource_user->name);
+ }
+ if (resource_user->free_pool >= 0) {
+ resource_user->allocating = false;
+ grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
+ gpr_mu_unlock(&resource_user->mu);
+ } else {
+ rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
+ gpr_mu_unlock(&resource_user->mu);
+ return false;
+ }
+ }
+ return true;
+}
+
+/* returns true if any memory could be reclaimed from buffers */
+static bool rq_reclaim_from_per_user_free_pool(
+ grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
+ grpc_resource_user *resource_user;
+ while ((resource_user = rulist_pop_head(resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
+ gpr_mu_lock(&resource_user->mu);
+ if (resource_user->free_pool > 0) {
+ int64_t amt = resource_user->free_pool;
+ resource_user->free_pool = 0;
+ resource_quota->free_pool += amt;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
+ resource_quota->name, resource_user->name, amt,
+ resource_quota->free_pool);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+ return true;
+ } else {
+ gpr_mu_unlock(&resource_user->mu);
+ }
+ }
+ return false;
+}
+
+/* returns true if reclamation is proceeding */
+static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota, bool destructive) {
+ if (resource_quota->reclaiming) return true;
+ grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
+ : GRPC_RULIST_RECLAIMER_BENIGN;
+ grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
+ if (resource_user == NULL) return false;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
+ resource_quota->name, resource_user->name,
+ destructive ? "destructive" : "benign");
+ }
+ resource_quota->reclaiming = true;
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_closure *c = resource_user->reclaimers[destructive];
+ resource_user->reclaimers[destructive] = NULL;
+ grpc_closure_run(exec_ctx, c, GRPC_ERROR_NONE);
+ return true;
+}
+
+/*******************************************************************************
+ * ru_slice: a slice implementation that is backed by a grpc_resource_user
+ */
+
+typedef struct {
+ gpr_slice_refcount base;
+ gpr_refcount refs;
+ grpc_resource_user *resource_user;
+ size_t size;
+} ru_slice_refcount;
+
+static void ru_slice_ref(void *p) {
+ ru_slice_refcount *rc = p;
+ gpr_ref(&rc->refs);
+}
+
+static void ru_slice_unref(void *p) {
+ ru_slice_refcount *rc = p;
+ if (gpr_unref(&rc->refs)) {
+ /* TODO(ctiller): this is dangerous, but I think safe for now:
+ we have no guarantee here that we're at a safe point for creating an
+ execution context, but we have no way of writing this code otherwise.
+ In the future: consider lifting gpr_slice to grpc, and offering an
+ internal_{ref,unref} pair that is execution context aware.
+ Alternatively,
+ make exec_ctx be thread local and 'do the right thing' (whatever that
+ is)
+ if NULL */
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_user_free(&exec_ctx, rc->resource_user, rc->size);
+ grpc_exec_ctx_finish(&exec_ctx);
+ gpr_free(rc);
+ }
+}
+
+static gpr_slice ru_slice_create(grpc_resource_user *resource_user,
+ size_t size) {
+ ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
+ rc->base.ref = ru_slice_ref;
+ rc->base.unref = ru_slice_unref;
+ gpr_ref_init(&rc->refs, 1);
+ rc->resource_user = resource_user;
+ rc->size = size;
+ gpr_slice slice;
+ slice.refcount = &rc->base;
+ slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
+ slice.data.refcounted.length = size;
+ return slice;
+}
+
+/*******************************************************************************
+ * grpc_resource_quota internal implementation: resource user manipulation under
+ * the combiner
+ */
+
+static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
+}
+
+static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
+}
+
+static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_BENIGN)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
+}
+
+static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
+ grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ if (!rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_AWAITING_ALLOCATION) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_BENIGN) &&
+ rulist_empty(resource_user->resource_quota,
+ GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+ rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
+}
+
+static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+ grpc_resource_user *resource_user = ru;
+ GPR_ASSERT(resource_user->allocated == 0);
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ rulist_remove(resource_user, (grpc_rulist)i);
+ }
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
+ GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
+ GRPC_ERROR_CANCELLED, NULL);
+ grpc_exec_ctx_sched(exec_ctx, (grpc_closure *)gpr_atm_no_barrier_load(
+ &resource_user->on_done_destroy_closure),
+ GRPC_ERROR_NONE, NULL);
+ if (resource_user->free_pool != 0) {
+ resource_user->resource_quota->free_pool += resource_user->free_pool;
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
+}
+
+static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_resource_user_slice_allocator *slice_allocator = arg;
+ if (error == GRPC_ERROR_NONE) {
+ for (size_t i = 0; i < slice_allocator->count; i++) {
+ gpr_slice_buffer_add_indexed(
+ slice_allocator->dest, ru_slice_create(slice_allocator->resource_user,
+ slice_allocator->length));
+ }
+ }
+ grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
+}
+
+/*******************************************************************************
+ * grpc_resource_quota internal implementation: quota manipulation under the
+ * combiner
+ */
+
+typedef struct {
+ int64_t size;
+ grpc_resource_quota *resource_quota;
+ grpc_closure closure;
+} rq_resize_args;
+
+static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
+ rq_resize_args *a = args;
+ int64_t delta = a->size - a->resource_quota->size;
+ a->resource_quota->size += delta;
+ a->resource_quota->free_pool += delta;
+ rq_step_sched(exec_ctx, a->resource_quota);
+ grpc_resource_quota_internal_unref(exec_ctx, a->resource_quota);
+ gpr_free(a);
+}
+
+static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
+ grpc_error *error) {
+ grpc_resource_quota *resource_quota = rq;
+ resource_quota->reclaiming = false;
+ rq_step_sched(exec_ctx, resource_quota);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+}
+
+/*******************************************************************************
+ * grpc_resource_quota api
+ */
+
+/* Public API */
+grpc_resource_quota *grpc_resource_quota_create(const char *name) {
+ grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
+ gpr_ref_init(&resource_quota->refs, 1);
+ resource_quota->combiner = grpc_combiner_create(NULL);
+ resource_quota->free_pool = INT64_MAX;
+ resource_quota->size = INT64_MAX;
+ resource_quota->step_scheduled = false;
+ resource_quota->reclaiming = false;
+ if (name != NULL) {
+ resource_quota->name = gpr_strdup(name);
+ } else {
+ gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
+ (intptr_t)resource_quota);
+ }
+ grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota);
+ grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
+ rq_reclamation_done, resource_quota);
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ resource_quota->roots[i] = NULL;
+ }
+ return resource_quota;
+}
+
+void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota) {
+ if (gpr_unref(&resource_quota->refs)) {
+ grpc_combiner_destroy(exec_ctx, resource_quota->combiner);
+ gpr_free(resource_quota->name);
+ gpr_free(resource_quota);
+ }
+}
+
+/* Public API */
+void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+grpc_resource_quota *grpc_resource_quota_internal_ref(
+ grpc_resource_quota *resource_quota) {
+ gpr_ref(&resource_quota->refs);
+ return resource_quota;
+}
+
+/* Public API */
+void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) {
+ grpc_resource_quota_internal_ref(resource_quota);
+}
+
+/* Public API */
+void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
+ size_t size) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ rq_resize_args *a = gpr_malloc(sizeof(*a));
+ a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
+ a->size = (int64_t)size;
+ grpc_closure_init(&a->closure, rq_resize, a);
+ grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure,
+ GRPC_ERROR_NONE, false);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+/*******************************************************************************
+ * grpc_resource_user channel args api
+ */
+
+grpc_resource_quota *grpc_resource_quota_from_channel_args(
+ const grpc_channel_args *channel_args) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ if (channel_args->args[i].type == GRPC_ARG_POINTER) {
+ return grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ } else {
+ gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
+ }
+ }
+ }
+ return grpc_resource_quota_create(NULL);
+}
+
+static void *rq_copy(void *rq) {
+ grpc_resource_quota_ref(rq);
+ return rq;
+}
+
+static void rq_destroy(void *rq) { grpc_resource_quota_unref(rq); }
+
+static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+
+const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
+ static const grpc_arg_pointer_vtable vtable = {rq_copy, rq_destroy, rq_cmp};
+ return &vtable;
+}
+
+/*******************************************************************************
+ * grpc_resource_user api
+ */
+
+void grpc_resource_user_init(grpc_resource_user *resource_user,
+ grpc_resource_quota *resource_quota,
+ const char *name) {
+ resource_user->resource_quota =
+ grpc_resource_quota_internal_ref(resource_quota);
+ grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
+ resource_user);
+ grpc_closure_init(&resource_user->add_to_free_pool_closure,
+ &ru_add_to_free_pool, resource_user);
+ grpc_closure_init(&resource_user->post_reclaimer_closure[0],
+ &ru_post_benign_reclaimer, resource_user);
+ grpc_closure_init(&resource_user->post_reclaimer_closure[1],
+ &ru_post_destructive_reclaimer, resource_user);
+ grpc_closure_init(&resource_user->destroy_closure, &ru_destroy,
+ resource_user);
+ gpr_mu_init(&resource_user->mu);
+ resource_user->allocated = 0;
+ resource_user->free_pool = 0;
+ grpc_closure_list_init(&resource_user->on_allocated);
+ resource_user->allocating = false;
+ resource_user->added_to_free_pool = false;
+ gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure, 0);
+ resource_user->reclaimers[0] = NULL;
+ resource_user->reclaimers[1] = NULL;
+ for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
+ resource_user->links[i].next = resource_user->links[i].prev = NULL;
+ }
+ if (name != NULL) {
+ resource_user->name = gpr_strdup(name);
+ } else {
+ gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR,
+ (intptr_t)resource_user);
+ }
+}
+
+void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ grpc_closure *on_done) {
+ gpr_mu_lock(&resource_user->mu);
+ GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->on_done_destroy_closure) ==
+ 0);
+ gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure,
+ (gpr_atm)on_done);
+ if (resource_user->allocated == 0) {
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->destroy_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota);
+ gpr_mu_destroy(&resource_user->mu);
+ gpr_free(resource_user->name);
+}
+
+void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size,
+ grpc_closure *optional_on_done) {
+ gpr_mu_lock(&resource_user->mu);
+ grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
+ &resource_user->on_done_destroy_closure);
+ if (on_done_destroy != NULL) {
+ /* already shutdown */
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR " after shutdown",
+ resource_user->resource_quota->name, resource_user->name, size);
+ }
+ grpc_exec_ctx_sched(
+ exec_ctx, optional_on_done,
+ GRPC_ERROR_CREATE("Buffer pool user is already shutdown"), NULL);
+ gpr_mu_unlock(&resource_user->mu);
+ return;
+ }
+ resource_user->allocated += (int64_t)size;
+ resource_user->free_pool -= (int64_t)size;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; allocated -> %" PRId64
+ ", free_pool -> %" PRId64,
+ resource_user->resource_quota->name, resource_user->name, size,
+ resource_user->allocated, resource_user->free_pool);
+ }
+ if (resource_user->free_pool < 0) {
+ grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
+ GRPC_ERROR_NONE);
+ if (!resource_user->allocating) {
+ resource_user->allocating = true;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->allocate_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ } else {
+ grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size) {
+ gpr_mu_lock(&resource_user->mu);
+ GPR_ASSERT(resource_user->allocated >= (int64_t)size);
+ bool was_zero_or_negative = resource_user->free_pool <= 0;
+ resource_user->free_pool += (int64_t)size;
+ resource_user->allocated -= (int64_t)size;
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; allocated -> %" PRId64
+ ", free_pool -> %" PRId64,
+ resource_user->resource_quota->name, resource_user->name, size,
+ resource_user->allocated, resource_user->free_pool);
+ }
+ bool is_bigger_than_zero = resource_user->free_pool > 0;
+ if (is_bigger_than_zero && was_zero_or_negative &&
+ !resource_user->added_to_free_pool) {
+ resource_user->added_to_free_pool = true;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->add_to_free_pool_closure,
+ GRPC_ERROR_NONE, false);
+ }
+ grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
+ &resource_user->on_done_destroy_closure);
+ if (on_done_destroy != NULL && resource_user->allocated == 0) {
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->destroy_closure, GRPC_ERROR_NONE,
+ false);
+ }
+ gpr_mu_unlock(&resource_user->mu);
+}
+
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive,
+ grpc_closure *closure) {
+ if (gpr_atm_acq_load(&resource_user->on_done_destroy_closure) == 0) {
+ GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
+ resource_user->reclaimers[destructive] = closure;
+ grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->post_reclaimer_closure[destructive],
+ GRPC_ERROR_NONE, false);
+ } else {
+ grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
+ }
+}
+
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user) {
+ if (grpc_resource_quota_trace) {
+ gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
+ resource_user->resource_quota->name, resource_user->name);
+ }
+ grpc_combiner_execute(
+ exec_ctx, resource_user->resource_quota->combiner,
+ &resource_user->resource_quota->rq_reclamation_done_closure,
+ GRPC_ERROR_NONE, false);
+}
+
+void grpc_resource_user_slice_allocator_init(
+ grpc_resource_user_slice_allocator *slice_allocator,
+ grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
+ grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices,
+ slice_allocator);
+ grpc_closure_init(&slice_allocator->on_done, cb, p);
+ slice_allocator->resource_user = resource_user;
+}
+
+void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx *exec_ctx,
+ grpc_resource_user_slice_allocator *slice_allocator, size_t length,
+ size_t count, gpr_slice_buffer *dest) {
+ slice_allocator->length = length;
+ slice_allocator->count = count;
+ slice_allocator->dest = dest;
+ grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
+ count * length, &slice_allocator->on_allocated);
+}
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
new file mode 100644
index 0000000000..6dfac55f88
--- /dev/null
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -0,0 +1,224 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
+#define GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
+
+#include <grpc/grpc.h>
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+/** \file Tracks resource usage against a pool.
+
+ The current implementation tracks only memory usage, but in the future
+ this may be extended to (for example) threads and file descriptors.
+
+ A grpc_resource_quota represents the pooled resources, and
+ grpc_resource_user instances attach to the quota and consume those
+ resources. They also offer a vector for reclamation: if we become
+ resource constrained, grpc_resource_user instances are asked (in turn) to
+ free up whatever they can so that the system as a whole can make progress.
+
+ There are three kinds of reclamation that take place, in order of increasing
+ invasiveness:
+ - an internal reclamation, where cached resource at the resource user level
+ is returned to the quota
+ - a benign reclamation phase, whereby resources that are in use but are not
+ helping anything make progress are reclaimed
+ - a destructive reclamation, whereby resources that are helping something
+ make progress may be enacted so that at least one part of the system can
+ complete.
+
+ Only one reclamation will be outstanding for a given quota at a given time.
+ On each reclamation attempt, the kinds of reclamation are tried in order of
+ increasing invasiveness, stopping at the first one that succeeds. Thus, on a
+ given reclamation attempt, if internal and benign reclamation both fail, it
+ will wind up doing a destructive reclamation. However, the next reclamation
+ attempt may then be able to get what it needs via internal or benign
+ reclamation, due to resources that may have been freed up by the destructive
+ reclamation in the previous attempt.
+
+ Future work will be to expose the current resource pressure so that back
+ pressure can be applied to avoid reclamation phases starting.
+
+ Resource users own references to resource quotas, and resource quotas
+ maintain lists of users (which users arrange to leave before they are
+ destroyed) */
+
+extern int grpc_resource_quota_trace;
+
+grpc_resource_quota *grpc_resource_quota_internal_ref(
+ grpc_resource_quota *resource_quota);
+void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
+ grpc_resource_quota *resource_quota);
+grpc_resource_quota *grpc_resource_quota_from_channel_args(
+ const grpc_channel_args *channel_args);
+
+/* Resource users are kept in (potentially) several intrusive linked lists
+ at once. These are the list names. */
+typedef enum {
+ /* Resource users that are waiting for an allocation */
+ GRPC_RULIST_AWAITING_ALLOCATION,
+ /* Resource users that have free memory available for internal reclamation */
+ GRPC_RULIST_NON_EMPTY_FREE_POOL,
+ /* Resource users that have published a benign reclamation is available */
+ GRPC_RULIST_RECLAIMER_BENIGN,
+ /* Resource users that have published a destructive reclamation is
+ available */
+ GRPC_RULIST_RECLAIMER_DESTRUCTIVE,
+ /* Number of lists: must be last */
+ GRPC_RULIST_COUNT
+} grpc_rulist;
+
+typedef struct grpc_resource_user grpc_resource_user;
+
+/* Internal linked list pointers for a resource user */
+typedef struct {
+ grpc_resource_user *next;
+ grpc_resource_user *prev;
+} grpc_resource_user_link;
+
+struct grpc_resource_user {
+ /* The quota this resource user consumes from */
+ grpc_resource_quota *resource_quota;
+
+ /* Closure to schedule an allocation under the resource quota combiner lock */
+ grpc_closure allocate_closure;
+ /* Closure to publish a non empty free pool under the resource quota combiner
+ lock */
+ grpc_closure add_to_free_pool_closure;
+
+ gpr_mu mu;
+ /* Total allocated memory outstanding by this resource user in bytes;
+ always positive */
+ int64_t allocated;
+ /* The amount of memory (in bytes) this user has cached for its own use: to
+ avoid quota contention, each resource user can keep some memory in
+ addition to what it is immediately using (e.g., for caching), and the quota
+ can pull it back under memory pressure.
+ This value can become negative if more memory has been requested than
+ existed in the free pool, at which point the quota is consulted to bring
+ this value non-negative (asynchronously). */
+ int64_t free_pool;
+ /* A list of closures to call once free_pool becomes non-negative - ie when
+ all outstanding allocations have been granted. */
+ grpc_closure_list on_allocated;
+ /* True if we are currently trying to allocate from the quota, false if not */
+ bool allocating;
+ /* True if we are currently trying to add ourselves to the non-free quota
+ list, false otherwise */
+ bool added_to_free_pool;
+
+ /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
+ */
+ grpc_closure *reclaimers[2];
+ /* Trampoline closures to finish reclamation and re-enter the quota combiner
+ lock */
+ grpc_closure post_reclaimer_closure[2];
+
+ /* Closure to execute under the quota combiner to de-register and shutdown the
+ resource user */
+ grpc_closure destroy_closure;
+ /* User supplied closure to call once the user has finished shutting down AND
+ all outstanding allocations have been freed. Real type is grpc_closure*,
+ but it's stored as an atomic to avoid a mutex on some fast paths. */
+ gpr_atm on_done_destroy_closure;
+
+ /* Links in the various grpc_rulist lists */
+ grpc_resource_user_link links[GRPC_RULIST_COUNT];
+
+ /* The name of this resource user, for debugging/tracing */
+ char *name;
+};
+
+void grpc_resource_user_init(grpc_resource_user *resource_user,
+ grpc_resource_quota *resource_quota,
+ const char *name);
+void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ grpc_closure *on_done);
+void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+
+/* Allocate from the resource user (and its quota).
+ If optional_on_done is NULL, then allocate immediately. This may push the
+ quota over-limit, at which point reclamation will kick in.
+ If optional_on_done is non-NULL, it will be scheduled when the allocation has
+ been granted by the quota. */
+void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size,
+ grpc_closure *optional_on_done);
+/* Release memory back to the quota */
+void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, size_t size);
+/* Post a memory reclaimer to the resource user. Only one benign and one
+ destructive reclaimer can be posted at once. When executed, the reclaimer
+ MUST call grpc_resource_user_finish_reclamation before it completes, to
+ return control to the resource quota. */
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user,
+ bool destructive, grpc_closure *closure);
+/* Finish a reclamation step */
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user);
+
+/* Helper to allocate slices from a resource user */
+typedef struct grpc_resource_user_slice_allocator {
+ /* Closure for when a resource user allocation completes */
+ grpc_closure on_allocated;
+ /* Closure to call when slices have been allocated */
+ grpc_closure on_done;
+ /* Length of slices to allocate on the current request */
+ size_t length;
+ /* Number of slices to allocate on the current request */
+ size_t count;
+ /* Destination for slices to allocate on the current request */
+ gpr_slice_buffer *dest;
+ /* Parent resource user */
+ grpc_resource_user *resource_user;
+} grpc_resource_user_slice_allocator;
+
+/* Initialize a slice allocator.
+ When an allocation is completed, calls \a cb with arg \p. */
+void grpc_resource_user_slice_allocator_init(
+ grpc_resource_user_slice_allocator *slice_allocator,
+ grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p);
+
+/* Allocate \a count slices of length \a length into \a dest. Only one request
+ can be outstanding at a time. */
+void grpc_resource_user_alloc_slices(
+ grpc_exec_ctx *exec_ctx,
+ grpc_resource_user_slice_allocator *slice_allocator, size_t length,
+ size_t count, gpr_slice_buffer *dest);
+
+#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/sockaddr.h b/src/core/lib/iomgr/sockaddr.h
index 5563d0b8a6..52b504390d 100644
--- a/src/core/lib/iomgr/sockaddr.h
+++ b/src/core/lib/iomgr/sockaddr.h
@@ -31,16 +31,24 @@
*
*/
+/* This header transitively includes other headers that care about include
+ * order, so it should be included first. As a consequence, it should not be
+ * included in any other header. */
+
#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_H
#define GRPC_CORE_LIB_IOMGR_SOCKADDR_H
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+#include <uv.h>
+#endif
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/sockaddr_windows.h"
#endif
-#ifdef GPR_POSIX_SOCKETADDR
+#ifdef GRPC_POSIX_SOCKETADDR
#include "src/core/lib/iomgr/sockaddr_posix.h"
#endif
diff --git a/src/core/lib/iomgr/sockaddr_utils.c b/src/core/lib/iomgr/sockaddr_utils.c
index 127d95c618..44bc2f968b 100644
--- a/src/core/lib/iomgr/sockaddr_utils.c
+++ b/src/core/lib/iomgr/sockaddr_utils.c
@@ -42,26 +42,32 @@
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xff, 0xff};
-int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in *addr4_out) {
- GPR_ASSERT(addr != (struct sockaddr *)addr4_out);
+int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *resolved_addr,
+ grpc_resolved_address *resolved_addr4_out) {
+ GPR_ASSERT(resolved_addr != resolved_addr4_out);
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+ struct sockaddr_in *addr4_out =
+ (struct sockaddr_in *)resolved_addr4_out->addr;
if (addr->sa_family == AF_INET6) {
const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr;
if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix,
sizeof(kV4MappedPrefix)) == 0) {
- if (addr4_out != NULL) {
+ if (resolved_addr4_out != NULL) {
/* Normalize ::ffff:0.0.0.0/96 to IPv4. */
- memset(addr4_out, 0, sizeof(*addr4_out));
+ memset(resolved_addr4_out, 0, sizeof(*resolved_addr4_out));
addr4_out->sin_family = AF_INET;
/* s6_addr32 would be nice, but it's non-standard. */
memcpy(&addr4_out->sin_addr, &addr6->sin6_addr.s6_addr[12], 4);
addr4_out->sin_port = addr6->sin6_port;
+ resolved_addr4_out->len = sizeof(struct sockaddr_in);
}
return 1;
}
@@ -69,26 +75,33 @@ int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
return 0;
}
-int grpc_sockaddr_to_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in6 *addr6_out) {
- GPR_ASSERT(addr != (struct sockaddr *)addr6_out);
+int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *resolved_addr,
+ grpc_resolved_address *resolved_addr6_out) {
+ GPR_ASSERT(resolved_addr != resolved_addr6_out);
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+ struct sockaddr_in6 *addr6_out =
+ (struct sockaddr_in6 *)resolved_addr6_out->addr;
if (addr->sa_family == AF_INET) {
const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
- memset(addr6_out, 0, sizeof(*addr6_out));
+ memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out));
addr6_out->sin6_family = AF_INET6;
memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12);
memcpy(&addr6_out->sin6_addr.s6_addr[12], &addr4->sin_addr, 4);
addr6_out->sin6_port = addr4->sin_port;
+ resolved_addr6_out->len = sizeof(struct sockaddr_in6);
return 1;
}
return 0;
}
-int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out) {
- struct sockaddr_in addr4_normalized;
- if (grpc_sockaddr_is_v4mapped(addr, &addr4_normalized)) {
- addr = (struct sockaddr *)&addr4_normalized;
+int grpc_sockaddr_is_wildcard(const grpc_resolved_address *resolved_addr,
+ int *port_out) {
+ const struct sockaddr *addr;
+ grpc_resolved_address addr4_normalized;
+ if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) {
+ resolved_addr = &addr4_normalized;
}
+ addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family == AF_INET) {
/* Check for 0.0.0.0 */
const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
@@ -113,39 +126,49 @@ int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out) {
}
}
-void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
- struct sockaddr_in6 *wild6_out) {
+void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out,
+ grpc_resolved_address *wild6_out) {
grpc_sockaddr_make_wildcard4(port, wild4_out);
grpc_sockaddr_make_wildcard6(port, wild6_out);
}
-void grpc_sockaddr_make_wildcard4(int port, struct sockaddr_in *wild_out) {
+void grpc_sockaddr_make_wildcard4(int port,
+ grpc_resolved_address *resolved_wild_out) {
+ struct sockaddr_in *wild_out = (struct sockaddr_in *)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
- memset(wild_out, 0, sizeof(*wild_out));
+ memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin_family = AF_INET;
wild_out->sin_port = htons((uint16_t)port);
+ resolved_wild_out->len = sizeof(struct sockaddr_in);
}
-void grpc_sockaddr_make_wildcard6(int port, struct sockaddr_in6 *wild_out) {
+void grpc_sockaddr_make_wildcard6(int port,
+ grpc_resolved_address *resolved_wild_out) {
+ struct sockaddr_in6 *wild_out =
+ (struct sockaddr_in6 *)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
- memset(wild_out, 0, sizeof(*wild_out));
+ memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin6_family = AF_INET6;
wild_out->sin6_port = htons((uint16_t)port);
+ resolved_wild_out->len = sizeof(struct sockaddr_in6);
}
-int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
+int grpc_sockaddr_to_string(char **out,
+ const grpc_resolved_address *resolved_addr,
int normalize) {
+ const struct sockaddr *addr;
const int save_errno = errno;
- struct sockaddr_in addr_normalized;
+ grpc_resolved_address addr_normalized;
char ntop_buf[INET6_ADDRSTRLEN];
const void *ip = NULL;
int port;
int ret;
*out = NULL;
- if (normalize && grpc_sockaddr_is_v4mapped(addr, &addr_normalized)) {
- addr = (const struct sockaddr *)&addr_normalized;
+ if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
+ resolved_addr = &addr_normalized;
}
+ addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family == AF_INET) {
const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
ip = &addr4->sin_addr;
@@ -155,10 +178,8 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
ip = &addr6->sin6_addr;
port = ntohs(addr6->sin6_port);
}
- /* Windows inet_ntop wants a mutable ip pointer */
if (ip != NULL &&
- inet_ntop(addr->sa_family, (void *)ip, ntop_buf, sizeof(ntop_buf)) !=
- NULL) {
+ grpc_inet_ntop(addr->sa_family, ip, ntop_buf, sizeof(ntop_buf)) != NULL) {
ret = gpr_join_host_port(out, ntop_buf, port);
} else {
ret = gpr_asprintf(out, "(sockaddr family=%d)", addr->sa_family);
@@ -168,39 +189,43 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
return ret;
}
-char *grpc_sockaddr_to_uri(const struct sockaddr *addr) {
+char *grpc_sockaddr_to_uri(const grpc_resolved_address *resolved_addr) {
char *temp;
char *result;
- struct sockaddr_in addr_normalized;
+ grpc_resolved_address addr_normalized;
+ const struct sockaddr *addr;
- if (grpc_sockaddr_is_v4mapped(addr, &addr_normalized)) {
- addr = (const struct sockaddr *)&addr_normalized;
+ if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
+ resolved_addr = &addr_normalized;
}
+ addr = (const struct sockaddr *)resolved_addr->addr;
+
switch (addr->sa_family) {
case AF_INET:
- grpc_sockaddr_to_string(&temp, addr, 0);
+ grpc_sockaddr_to_string(&temp, resolved_addr, 0);
gpr_asprintf(&result, "ipv4:%s", temp);
gpr_free(temp);
return result;
case AF_INET6:
- grpc_sockaddr_to_string(&temp, addr, 0);
+ grpc_sockaddr_to_string(&temp, resolved_addr, 0);
gpr_asprintf(&result, "ipv6:%s", temp);
gpr_free(temp);
return result;
default:
- return grpc_sockaddr_to_uri_unix_if_possible(addr);
+ return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr);
}
}
-int grpc_sockaddr_get_port(const struct sockaddr *addr) {
+int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
return ntohs(((struct sockaddr_in *)addr)->sin_port);
case AF_INET6:
return ntohs(((struct sockaddr_in6 *)addr)->sin6_port);
default:
- if (grpc_is_unix_socket(addr)) {
+ if (grpc_is_unix_socket(resolved_addr)) {
return 1;
}
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port",
@@ -209,7 +234,9 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
}
}
-int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
+int grpc_sockaddr_set_port(const grpc_resolved_address *resolved_addr,
+ int port) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h
index 9f81992e6b..5371e360c5 100644
--- a/src/core/lib/iomgr/sockaddr_utils.h
+++ b/src/core/lib/iomgr/sockaddr_utils.h
@@ -34,40 +34,40 @@
#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H
#define GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
/* Returns true if addr is an IPv4-mapped IPv6 address within the
::ffff:0.0.0.0/96 range, or false otherwise.
If addr4_out is non-NULL, the inner IPv4 address will be copied here when
returning true. */
-int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in *addr4_out);
+int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *addr,
+ grpc_resolved_address *addr4_out);
/* If addr is an AF_INET address, writes the corresponding ::ffff:0.0.0.0/96
address to addr6_out and returns true. Otherwise returns false. */
-int grpc_sockaddr_to_v4mapped(const struct sockaddr *addr,
- struct sockaddr_in6 *addr6_out);
+int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *addr,
+ grpc_resolved_address *addr6_out);
/* If addr is ::, 0.0.0.0, or ::ffff:0.0.0.0, writes the port number to
*port_out (if not NULL) and returns true, otherwise returns false. */
-int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out);
+int grpc_sockaddr_is_wildcard(const grpc_resolved_address *addr, int *port_out);
/* Writes 0.0.0.0:port and [::]:port to separate sockaddrs. */
-void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
- struct sockaddr_in6 *wild6_out);
+void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out,
+ grpc_resolved_address *wild6_out);
/* Writes 0.0.0.0:port. */
-void grpc_sockaddr_make_wildcard4(int port, struct sockaddr_in *wild_out);
+void grpc_sockaddr_make_wildcard4(int port, grpc_resolved_address *wild_out);
/* Writes [::]:port. */
-void grpc_sockaddr_make_wildcard6(int port, struct sockaddr_in6 *wild_out);
+void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address *wild_out);
/* Return the IP port number of a sockaddr */
-int grpc_sockaddr_get_port(const struct sockaddr *addr);
+int grpc_sockaddr_get_port(const grpc_resolved_address *addr);
/* Set IP port number of a sockaddr */
-int grpc_sockaddr_set_port(const struct sockaddr *addr, int port);
+int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port);
/* Converts a sockaddr into a newly-allocated human-readable string.
@@ -81,9 +81,9 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port);
In the unlikely event of an error, returns -1 and sets *out to NULL.
The existing value of errno is always preserved. */
-int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
+int grpc_sockaddr_to_string(char **out, const grpc_resolved_address *addr,
int normalize);
-char *grpc_sockaddr_to_uri(const struct sockaddr *addr);
+char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr);
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */
diff --git a/src/core/lib/iomgr/workqueue_posix.h b/src/core/lib/iomgr/socket_utils.h
index 03ee21cef7..cc3ee2e30c 100644
--- a/src/core/lib/iomgr/workqueue_posix.h
+++ b/src/core/lib/iomgr/socket_utils.h
@@ -31,31 +31,12 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
-#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H
+#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H
+#define GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/support/mpscq.h"
+#include <stddef.h>
-struct grpc_fd;
+/* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size);
-struct grpc_workqueue {
- gpr_refcount refs;
- gpr_mpscq queue;
- // state is:
- // lower bit - zero if orphaned
- // other bits - number of items enqueued
- gpr_atm state;
-
- grpc_wakeup_fd wakeup_fd;
- struct grpc_fd *wakeup_read_fd;
-
- grpc_closure read_closure;
-};
-
-/** Create a work queue. Returns an error if creation fails. If creation
- succeeds, sets *workqueue to point to it. */
-grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
- grpc_workqueue **workqueue);
-
-#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_POSIX_H */
+#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c
index d2f6261e2a..bc28bbe316 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.c
@@ -31,10 +31,11 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
+#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include <arpa/inet.h>
@@ -78,7 +79,7 @@ grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking) {
}
grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
-#ifdef GPR_HAVE_SO_NOSIGPIPE
+#ifdef GRPC_HAVE_SO_NOSIGPIPE
int val = 1;
int newval;
socklen_t intlen = sizeof(newval);
@@ -96,7 +97,7 @@ grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
}
grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) {
-#ifdef GPR_HAVE_IP_PKTINFO
+#ifdef GRPC_HAVE_IP_PKTINFO
int get_local_ip = 1;
if (0 != setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &get_local_ip,
sizeof(get_local_ip))) {
@@ -107,7 +108,7 @@ grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) {
}
grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) {
-#ifdef GPR_HAVE_IPV6_RECVPKTINFO
+#ifdef GRPC_HAVE_IPV6_RECVPKTINFO
int get_local_ip = 1;
if (0 != setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &get_local_ip,
sizeof(get_local_ip))) {
@@ -253,7 +254,7 @@ static int set_socket_dualstack(int fd) {
}
}
-static grpc_error *error_for_fd(int fd, const struct sockaddr *addr) {
+static grpc_error *error_for_fd(int fd, const grpc_resolved_address *addr) {
if (fd >= 0) return GRPC_ERROR_NONE;
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
@@ -263,10 +264,10 @@ static grpc_error *error_for_fd(int fd, const struct sockaddr *addr) {
return err;
}
-grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
- int protocol,
- grpc_dualstack_mode *dsmode,
- int *newfd) {
+grpc_error *grpc_create_dualstack_socket(
+ const grpc_resolved_address *resolved_addr, int type, int protocol,
+ grpc_dualstack_mode *dsmode, int *newfd) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
int family = addr->sa_family;
if (family == AF_INET6) {
if (grpc_ipv6_loopback_available()) {
@@ -281,9 +282,9 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
return GRPC_ERROR_NONE;
}
/* If this isn't an IPv4 address, then return whatever we've got. */
- if (!grpc_sockaddr_is_v4mapped(addr, NULL)) {
+ if (!grpc_sockaddr_is_v4mapped(resolved_addr, NULL)) {
*dsmode = GRPC_DSMODE_IPV6;
- return error_for_fd(*newfd, addr);
+ return error_for_fd(*newfd, resolved_addr);
}
/* Fall back to AF_INET. */
if (*newfd >= 0) {
@@ -293,7 +294,12 @@ grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
}
*dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
*newfd = socket(family, type, protocol);
- return error_for_fd(*newfd, addr);
+ return error_for_fd(*newfd, resolved_addr);
+}
+
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+ GPR_ASSERT(size <= (socklen_t)-1);
+ return inet_ntop(af, src, dst, (socklen_t)size);
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_linux.c b/src/core/lib/iomgr/socket_utils_linux.c
index 144e3110c8..bf6e9e4f55 100644
--- a/src/core/lib/iomgr/socket_utils_linux.c
+++ b/src/core/lib/iomgr/socket_utils_linux.c
@@ -31,21 +31,27 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_LINUX_SOCKETUTILS
+#ifdef GRPC_LINUX_SOCKETUTILS
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
+#include <grpc/support/log.h>
+
#include <sys/socket.h>
#include <sys/types.h>
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
- int nonblock, int cloexec) {
+int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+ int cloexec) {
int flags = 0;
+ GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
+ GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
flags |= nonblock ? SOCK_NONBLOCK : 0;
flags |= cloexec ? SOCK_CLOEXEC : 0;
- return accept4(sockfd, addr, addrlen, flags);
+ return accept4(sockfd, (struct sockaddr *)resolved_addr->addr,
+ (socklen_t *)&resolved_addr->len, flags);
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_posix.c b/src/core/lib/iomgr/socket_utils_posix.c
index 57ae64c103..9dea0c0cd8 100644
--- a/src/core/lib/iomgr/socket_utils_posix.c
+++ b/src/core/lib/iomgr/socket_utils_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKETUTILS
+#ifdef GRPC_POSIX_SOCKETUTILS
#include "src/core/lib/iomgr/socket_utils_posix.h"
@@ -42,12 +42,15 @@
#include <unistd.h>
#include <grpc/support/log.h>
+#include "src/core/lib/iomgr/sockaddr.h"
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
- int nonblock, int cloexec) {
+int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+ int cloexec) {
int fd, flags;
-
- fd = accept(sockfd, addr, addrlen);
+ GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
+ GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
+ fd = accept(sockfd, (struct sockaddr *)resolved_addr->addr,
+ (socklen_t *)&resolved_addr->len);
if (fd >= 0) {
if (nonblock) {
flags = fcntl(fd, F_GETFL, 0);
@@ -67,4 +70,4 @@ close_and_error:
return -1;
}
-#endif /* GPR_POSIX_SOCKETUTILS */
+#endif /* GRPC_POSIX_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 7bcc2219ae..175fb2b717 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -34,14 +34,16 @@
#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H
#define GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H
+#include "src/core/lib/iomgr/resolve_address.h"
+
#include <sys/socket.h>
#include <unistd.h>
#include "src/core/lib/iomgr/error.h"
/* a wrapper for accept or accept4 */
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
- int nonblock, int cloexec);
+int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+ int cloexec);
/* set a socket to non blocking mode */
grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking);
@@ -125,8 +127,8 @@ extern int grpc_forbid_dualstack_sockets_for_testing;
IPv4, so that bind() or connect() see the correct family.
Also, it's important to distinguish between DUALSTACK and IPV6 when
listening on the [::] wildcard address. */
-grpc_error *grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
- int protocol,
+grpc_error *grpc_create_dualstack_socket(const grpc_resolved_address *addr,
+ int type, int protocol,
grpc_dualstack_mode *dsmode,
int *newfd);
diff --git a/src/core/lib/iomgr/socket_utils_uv.c b/src/core/lib/iomgr/socket_utils_uv.c
new file mode 100644
index 0000000000..741bf28969
--- /dev/null
+++ b/src/core/lib/iomgr/socket_utils_uv.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <uv.h>
+
+#include "src/core/lib/iomgr/socket_utils.h"
+
+#include <grpc/support/log.h>
+
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+ uv_inet_ntop(af, src, dst, size);
+ return dst;
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c
new file mode 100644
index 0000000000..628ad4a45b
--- /dev/null
+++ b/src/core/lib/iomgr/socket_utils_windows.c
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_WINDOWS_SOCKETUTILS
+
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
+
+#include <grpc/support/log.h>
+
+const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+ /* Windows InetNtopA wants a mutable ip pointer */
+ return InetNtopA(af, (void *)src, dst, size);
+}
+
+#endif /* GRPC_WINDOWS_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index 78ef46d042..35f23300dc 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <winsock2.h>
@@ -156,4 +156,4 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
if (should_destroy) destroy(socket);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index a07e0b9f0c..18e6e60ebc 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -37,7 +37,11 @@
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/pollset_set.h"
-#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+
+/* Channel arg (integer) setting how large a slice to try and read from the wire
+ each time recvmsg (or equivalent) is called */
+#define GRPC_ARG_TCP_READ_CHUNK_SIZE "grpc.experimental.tcp_read_chunk_size"
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
@@ -47,7 +51,8 @@
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties,
- const struct sockaddr *addr, size_t addr_len,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
gpr_timespec deadline);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index 3496b6094f..bc08c94ee0 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -31,11 +31,11 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
-#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_client_posix.h"
#include <errno.h>
#include <netinet/in.h>
@@ -47,6 +47,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -69,9 +70,10 @@ typedef struct {
char *addr_str;
grpc_endpoint **ep;
grpc_closure *closure;
+ grpc_channel_args *channel_args;
} async_connect;
-static grpc_error *prepare_socket(const struct sockaddr *addr, int fd) {
+static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd) {
grpc_error *err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@@ -114,10 +116,39 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
+ grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
}
+grpc_endpoint *grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
+ const char *addr_str) {
+ size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
+ grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
+ 8 * 1024 * 1024};
+ tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
+ &channel_args->args[i], options);
+ } else if (0 ==
+ strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
+ grpc_endpoint *ep =
+ grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ return ep;
+}
+
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect *ac = acp;
int so_error = 0;
@@ -165,7 +196,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
switch (so_error) {
case 0:
grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
- *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
+ *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args,
+ ac->addr_str);
fd = NULL;
break;
case ENOBUFS:
@@ -215,6 +247,7 @@ finish:
if (done) {
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
+ grpc_channel_args_destroy(ac->channel_args);
gpr_free(ac);
}
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
@@ -223,14 +256,15 @@ finish:
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
- const struct sockaddr *addr,
- size_t addr_len, gpr_timespec deadline) {
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
async_connect *ac;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in addr4_copy;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address addr4_copy;
grpc_fd *fdobj;
char *name;
char *addr_str;
@@ -240,8 +274,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
@@ -252,8 +285,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (dsmode == GRPC_DSMODE_IPV4) {
/* If we got an AF_INET socket, map the address back to IPv4. */
GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
- addr = (struct sockaddr *)&addr4_copy;
- addr_len = sizeof(addr4_copy);
+ addr = &addr4_copy;
}
if ((error = prepare_socket(addr, fd)) != GRPC_ERROR_NONE) {
grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
@@ -261,8 +293,9 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
}
do {
- GPR_ASSERT(addr_len < ~(socklen_t)0);
- err = connect(fd, addr, (socklen_t)addr_len);
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ err =
+ connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len);
} while (err < 0 && errno == EINTR);
addr_str = grpc_sockaddr_to_uri(addr);
@@ -271,7 +304,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
fdobj = grpc_fd_create(fd, name);
if (err >= 0) {
- *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
+ *ep =
+ grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
goto done;
}
@@ -296,6 +330,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
ac->refs = 2;
ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac;
+ ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
@@ -317,16 +352,18 @@ done:
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const struct sockaddr *addr,
- size_t addr_len, gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
- const struct sockaddr *addr, size_t addr_len,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
gpr_timespec deadline) {
- grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
- addr_len, deadline);
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
+ channel_args, addr, deadline);
}
#endif
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
new file mode 100644
index 0000000000..efc5fcd5bb
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
+#define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H
+
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+
+grpc_endpoint *grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
+ const char *addr_str);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
new file mode 100644
index 0000000000..6274667042
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/iomgr/timer.h"
+
+typedef struct grpc_uv_tcp_connect {
+ uv_connect_t connect_req;
+ grpc_timer alarm;
+ uv_tcp_t *tcp_handle;
+ grpc_closure *closure;
+ grpc_endpoint **endpoint;
+ int refs;
+ char *addr_name;
+} grpc_uv_tcp_connect;
+
+static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect *connect) {
+ gpr_free(connect);
+}
+
+static void tcp_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+
+static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
+ grpc_error *error) {
+ int done;
+ grpc_uv_tcp_connect *connect = acp;
+ if (error == GRPC_ERROR_NONE) {
+ /* error == NONE implies that the timer ran out, and wasn't cancelled. If
+ it was cancelled, then the handler that cancelled it also should close
+ the handle, if applicable */
+ uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
+ }
+ done = (--connect->refs == 0);
+ if (done) {
+ uv_tcp_connect_cleanup(connect);
+ }
+}
+
+static void uv_tc_on_connect(uv_connect_t *req, int status) {
+ grpc_uv_tcp_connect *connect = req->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_error *error = GRPC_ERROR_NONE;
+ int done;
+ grpc_closure *closure = connect->closure;
+ grpc_timer_cancel(&exec_ctx, &connect->alarm);
+ if (status == 0) {
+ *connect->endpoint =
+ grpc_tcp_create(connect->tcp_handle, connect->addr_name);
+ } else {
+ error = GRPC_ERROR_CREATE("Failed to connect to remote host");
+ error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ if (status == UV_ECANCELED) {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ "Timeout occurred");
+ // This should only happen if the handle is already closed
+ } else {
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ uv_strerror(status));
+ uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
+ }
+ }
+ done = (--connect->refs == 0);
+ if (done) {
+ uv_tcp_connect_cleanup(connect);
+ }
+ grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
+ grpc_closure *closure, grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties,
+ const grpc_resolved_address *resolved_addr,
+ gpr_timespec deadline) {
+ grpc_uv_tcp_connect *connect;
+ (void)interested_parties;
+ connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
+ memset(connect, 0, sizeof(grpc_uv_tcp_connect));
+ connect->closure = closure;
+ connect->endpoint = ep;
+ connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
+ connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
+ uv_tcp_init(uv_default_loop(), connect->tcp_handle);
+ connect->connect_req.data = connect;
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
+ (const struct sockaddr *)resolved_addr->addr,
+ uv_tc_on_connect);
+ grpc_timer_init(exec_ctx, &connect->alarm,
+ gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
+ uv_tc_on_alarm, connect, gpr_now(GPR_CLOCK_MONOTONIC));
+}
+
+// overridden by api_fuzzer.c
+void (*grpc_tcp_client_connect_impl)(
+ grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties, const grpc_resolved_address *addr,
+ gpr_timespec deadline) = tcp_client_connect_impl;
+
+void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_endpoint **ep,
+ grpc_pollset_set *interested_parties,
+ const grpc_resolved_address *addr,
+ gpr_timespec deadline) {
+ grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
+ deadline);
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index 562cb9c6bf..30f7c66f15 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/sockaddr_windows.h"
@@ -43,6 +43,7 @@
#include <grpc/support/slice_buffer.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -61,13 +62,16 @@ typedef struct {
int refs;
grpc_closure on_connect;
grpc_endpoint **endpoint;
+ grpc_resource_quota *resource_quota;
} async_connect;
-static void async_connect_unlock_and_cleanup(async_connect *ac,
+static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
+ async_connect *ac,
grpc_winsocket *socket) {
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
+ grpc_resource_quota_internal_unref(exec_ctx, ac->resource_quota);
gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_name);
gpr_free(ac);
@@ -83,7 +87,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (socket != NULL) {
grpc_winsocket_shutdown(socket);
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
}
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
@@ -113,12 +117,12 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
if (!wsa_success) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
} else {
- *ep = grpc_tcp_create(socket, ac->addr_name);
+ *ep = grpc_tcp_create(socket, ac->resource_quota, ac->addr_name);
socket = NULL;
}
}
- async_connect_unlock_and_cleanup(ac, socket);
+ async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
/* If the connection was aborted, the callback was already called when
the deadline was met. */
grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
@@ -129,13 +133,14 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties,
- const struct sockaddr *addr, size_t addr_len,
+ const grpc_channel_args *channel_args,
+ const grpc_resolved_address *addr,
gpr_timespec deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in6 local_address;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address local_address;
async_connect *ac;
grpc_winsocket *socket = NULL;
LPFN_CONNECTEX ConnectEx;
@@ -144,12 +149,22 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_winsocket_callback_info *info;
grpc_error *error = GRPC_ERROR_NONE;
+ grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ if (channel_args != NULL) {
+ for (size_t i = 0; i < channel_args->num_args; i++) {
+ if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
+ resource_quota = grpc_resource_quota_internal_ref(
+ channel_args->args[i].value.pointer.p);
+ }
+ }
+ }
+
*endpoint = NULL;
/* Use dualstack sockets where available. */
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
@@ -178,7 +193,8 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
grpc_sockaddr_make_wildcard6(0, &local_address);
- status = bind(sock, (struct sockaddr *)&local_address, sizeof(local_address));
+ status = bind(sock, (struct sockaddr *)&local_address.addr,
+ (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -186,8 +202,8 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
- success =
- ConnectEx(sock, addr, (int)addr_len, NULL, 0, NULL, &info->overlapped);
+ success = ConnectEx(sock, (struct sockaddr *)&addr->addr, (int)addr->len,
+ NULL, 0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */
@@ -206,6 +222,7 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_done,
ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
+ ac->resource_quota = resource_quota;
grpc_closure_init(&ac->on_connect, on_connect, ac);
grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
@@ -225,7 +242,8 @@ failure:
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
}
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL);
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 92767721d5..880af93ee1 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/tcp_posix.h"
@@ -58,14 +58,14 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
-#ifdef GPR_HAVE_MSG_NOSIGNAL
+#ifdef GRPC_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL
#else
#define SENDMSG_FLAGS 0
#endif
-#ifdef GPR_MSG_IOVLEN_TYPE
-typedef GPR_MSG_IOVLEN_TYPE msg_iovlen_type;
+#ifdef GRPC_MSG_IOVLEN_TYPE
+typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
#else
typedef size_t msg_iovlen_type;
#endif
@@ -80,6 +80,7 @@ typedef struct {
msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
size_t slice_size;
gpr_refcount refcount;
+ gpr_atm shutdown_count;
/* garbage after the last read */
gpr_slice_buffer last_read_buffer;
@@ -100,15 +101,29 @@ typedef struct {
grpc_closure write_closure;
char *peer_string;
+
+ grpc_resource_user resource_user;
+ grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
+static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void tcp_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx,
+ grpc_tcp *tcp) {
+ if (gpr_atm_full_fetch_add(&tcp->shutdown_count, 1) == 0) {
+ grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user,
+ grpc_closure_create(tcp_unref_closure, tcp));
+ }
+}
static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
+ tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
grpc_fd_shutdown(exec_ctx, tcp->em_fd);
}
@@ -116,6 +131,7 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
"tcp_unref_orphan");
gpr_slice_buffer_destroy(&tcp->last_read_buffer);
+ grpc_resource_user_destroy(exec_ctx, &tcp->resource_user);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
@@ -152,9 +168,16 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
+static void tcp_unref_closure(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ TCP_UNREF(exec_ctx, arg, "resource_user");
+}
+
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
+ tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
@@ -177,11 +200,11 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
tcp->read_cb = NULL;
tcp->incoming_buffer = NULL;
- grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ grpc_closure_run(exec_ctx, cb, error);
}
#define MAX_READ_IOVEC 4
-static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -192,10 +215,6 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GPR_TIMER_BEGIN("tcp_continue_read", 0);
- while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
- gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
- gpr_slice_malloc(tcp->slice_size));
- }
for (i = 0; i < tcp->incoming_buffer->count; i++) {
iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
@@ -209,11 +228,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
- GPR_TIMER_BEGIN("recvmsg", 1);
+ GPR_TIMER_BEGIN("recvmsg", 0);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
- GPR_TIMER_END("recvmsg", 0);
+ GPR_TIMER_END("recvmsg", read_bytes >= 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@@ -232,7 +251,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
- call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("EOF"));
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("Socket closed"));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
@@ -252,6 +271,30 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_TIMER_END("tcp_continue_read", 0);
}
+static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
+ grpc_error *error) {
+ grpc_tcp *tcp = tcpp;
+ if (error != GRPC_ERROR_NONE) {
+ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
+ call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
+ TCP_UNREF(exec_ctx, tcp, "read");
+ } else {
+ tcp_do_read(exec_ctx, tcp);
+ }
+}
+
+static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
+ grpc_resource_user_alloc_slices(
+ exec_ctx, &tcp->slice_allocator, tcp->slice_size,
+ (size_t)tcp->iov_size - tcp->incoming_buffer->count,
+ tcp->incoming_buffer);
+ } else {
+ tcp_do_read(exec_ctx, tcp);
+ }
+}
+
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
@@ -259,6 +302,7 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (error != GRPC_ERROR_NONE) {
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
@@ -392,11 +436,8 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error_free_string(str);
}
- GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
- cb->cb(exec_ctx, cb->cb_arg, error);
- GPR_TIMER_END("tcp_handle_write.cb", 0);
+ grpc_closure_run(exec_ctx, cb, error);
TCP_UNREF(exec_ctx, tcp, "write");
- GRPC_ERROR_UNREF(error);
}
}
@@ -472,6 +513,11 @@ static grpc_workqueue *tcp_get_workqueue(grpc_endpoint *ep) {
return grpc_fd_get_workqueue(tcp->em_fd);
}
+static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return &tcp->resource_user;
+}
+
static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_write,
tcp_get_workqueue,
@@ -479,10 +525,12 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_add_to_pollset_set,
tcp_shutdown,
tcp_destroy,
+ tcp_get_resource_user,
tcp_get_peer};
-grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
- const char *peer_string) {
+grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
+ grpc_resource_quota *resource_quota,
+ size_t slice_size, const char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
@@ -495,14 +543,20 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->slice_size = slice_size;
tcp->iov_size = 1;
tcp->finished_edge = true;
- /* paired with unref in grpc_tcp_destroy */
- gpr_ref_init(&tcp->refcount, 1);
+ /* paired with unref in grpc_tcp_destroy, and with the shutdown for our
+ * resource_user */
+ gpr_ref_init(&tcp->refcount, 2);
+ gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
tcp->read_closure.cb = tcp_handle_read;
tcp->read_closure.cb_arg = tcp;
tcp->write_closure.cb = tcp_handle_write;
tcp->write_closure.cb_arg = tcp;
gpr_slice_buffer_init(&tcp->last_read_buffer);
+ grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
+ grpc_resource_user_slice_allocator_init(&tcp->slice_allocator,
+ &tcp->resource_user,
+ tcp_read_allocation_done, tcp);
/* Tell network status tracker about new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
@@ -517,10 +571,13 @@ int grpc_tcp_fd(grpc_endpoint *ep) {
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done) {
+ grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
+ tcp_maybe_shutdown_resource_user(exec_ctx, tcp);
+ gpr_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 99125836d6..1c0d13f96e 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -53,8 +53,8 @@ extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
- const char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_fd *fd, grpc_resource_quota *resource_quota,
+ size_t read_slice_size, const char *peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 9a390699b4..6eba8c4057 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -38,6 +38,7 @@
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/resolve_address.h"
/* Forward decl of grpc_tcp_server */
typedef struct grpc_tcp_server grpc_tcp_server;
@@ -60,7 +61,8 @@ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server);
@@ -78,8 +80,9 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len, int *out_port);
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *out_port);
/* Number of fds at the given port_index, or 0 if port_index is out of
bounds. */
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 73df5477e6..b6fc1e4ca2 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -36,9 +36,9 @@
#define _GNU_SOURCE
#endif
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/tcp_server.h"
@@ -62,6 +62,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
@@ -79,11 +80,7 @@ struct grpc_tcp_listener {
int fd;
grpc_fd *emfd;
grpc_tcp_server *server;
- union {
- uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
- struct sockaddr sockaddr;
- } addr;
- size_t addr_len;
+ grpc_resolved_address addr;
int port;
unsigned port_index;
unsigned fd_index;
@@ -137,6 +134,8 @@ struct grpc_tcp_server {
/* next pollset to assign a channel to */
gpr_atm next_pollset_to_assign;
+
+ grpc_resource_quota *resource_quota;
};
static gpr_once check_init = GPR_ONCE_INIT;
@@ -153,23 +152,37 @@ static void init(void) {
#endif
}
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
+ s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_INTEGER) {
s->so_reuseport =
has_so_reuseport && (args->args[i].value.integer != 0);
} else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
" must be an integer");
}
+ } else if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
}
}
gpr_ref_init(&s->refs, 1);
@@ -206,6 +219,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+
gpr_free(s);
}
@@ -238,7 +253,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->head) {
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
- grpc_unlink_if_unix_domain_socket(&sp->addr.sockaddr);
+ grpc_unlink_if_unix_domain_socket(&sp->addr);
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
@@ -304,11 +319,9 @@ static int get_max_accept_queue_size(void) {
}
/* Prepare a recently-created socket for listening. */
-static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
- size_t addr_len, bool so_reuseport,
- int *port) {
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+static grpc_error *prepare_socket(int fd, const grpc_resolved_address *addr,
+ bool so_reuseport, int *port) {
+ grpc_resolved_address sockname_temp;
grpc_error *err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@@ -331,8 +344,8 @@ static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
err = grpc_set_socket_no_sigpipe_if_possible(fd);
if (err != GRPC_ERROR_NONE) goto error;
- GPR_ASSERT(addr_len < ~(socklen_t)0);
- if (bind(fd, addr, (socklen_t)addr_len) < 0) {
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) {
err = GRPC_OS_ERROR(errno, "bind");
goto error;
}
@@ -342,13 +355,15 @@ static grpc_error *prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
- sockname_len = sizeof(sockname_temp);
- if (getsockname(fd, (struct sockaddr *)&sockname_temp, &sockname_len) < 0) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+
+ if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len) < 0) {
err = GRPC_OS_ERROR(errno, "getsockname");
goto error;
}
- *port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ *port = grpc_sockaddr_get_port(&sockname_temp);
return GRPC_ERROR_NONE;
error:
@@ -382,13 +397,13 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
/* loop until accept4 returns EAGAIN, and then re-arm notification */
for (;;) {
- struct sockaddr_storage addr;
- socklen_t addrlen = sizeof(addr);
+ grpc_resolved_address addr;
char *addr_str;
char *name;
+ addr.len = sizeof(struct sockaddr_storage);
/* Note: If we ever decide to return this address to the user, remember to
strip off the ::ffff:0.0.0.0/96 prefix first. */
- int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
+ int fd = grpc_accept4(sp->fd, &addr, 1, 1);
if (fd < 0) {
switch (errno) {
case EINTR:
@@ -404,7 +419,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_set_socket_no_sigpipe_if_possible(fd);
- addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
+ addr_str = grpc_sockaddr_to_uri(&addr);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
if (grpc_tcp_trace) {
@@ -422,7 +437,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
sp->server->on_accept_cb(
exec_ctx, sp->server->on_accept_cb_arg,
- grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
+ grpc_tcp_create(fdobj, sp->server->resource_quota,
+ GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
read_notifier_pollset, &acceptor);
gpr_free(name);
@@ -442,19 +458,18 @@ error:
}
static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
- const struct sockaddr *addr,
- size_t addr_len, unsigned port_index,
- unsigned fd_index,
+ const grpc_resolved_address *addr,
+ unsigned port_index, unsigned fd_index,
grpc_tcp_listener **listener) {
grpc_tcp_listener *sp = NULL;
int port = -1;
char *addr_str;
char *name;
- grpc_error *err = prepare_socket(fd, addr, addr_len, s->so_reuseport, &port);
+ grpc_error *err = prepare_socket(fd, addr, s->so_reuseport, &port);
if (err == GRPC_ERROR_NONE) {
GPR_ASSERT(port > 0);
- grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
+ grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
@@ -470,8 +485,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
- memcpy(sp->addr.untyped, addr, addr_len);
- sp->addr_len = addr_len;
+ memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
sp->port = port;
sp->port_index = port_index;
sp->fd_index = fd_index;
@@ -504,14 +518,13 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
int fd = -1;
int port = -1;
grpc_dualstack_mode dsmode;
- err = grpc_create_dualstack_socket(&listener->addr.sockaddr, SOCK_STREAM, 0,
- &dsmode, &fd);
+ err = grpc_create_dualstack_socket(&listener->addr, SOCK_STREAM, 0, &dsmode,
+ &fd);
if (err != GRPC_ERROR_NONE) return err;
- err = prepare_socket(fd, &listener->addr.sockaddr, listener->addr_len, true,
- &port);
+ err = prepare_socket(fd, &listener->addr, true, &port);
if (err != GRPC_ERROR_NONE) return err;
listener->server->nports++;
- grpc_sockaddr_to_string(&addr_str, &listener->addr.sockaddr, 1);
+ grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
@@ -524,8 +537,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
sp->server = listener->server;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
- memcpy(sp->addr.untyped, listener->addr.untyped, listener->addr_len);
- sp->addr_len = listener->addr_len;
+ memcpy(&sp->addr, &listener->addr, sizeof(grpc_resolved_address));
sp->port = port;
sp->port_index = listener->port_index;
sp->fd_index = listener->fd_index + count - i;
@@ -540,19 +552,19 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len, int *out_port) {
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *out_port) {
grpc_tcp_listener *sp;
grpc_tcp_listener *sp2 = NULL;
int fd;
grpc_dualstack_mode dsmode;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in wild4;
- struct sockaddr_in6 wild6;
- struct sockaddr_in addr4_copy;
- struct sockaddr *allocated_addr = NULL;
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wild4;
+ grpc_resolved_address wild6;
+ grpc_resolved_address addr4_copy;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
int port;
unsigned port_index = 0;
unsigned fd_index = 0;
@@ -560,19 +572,19 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
if (s->tail != NULL) {
port_index = s->tail->port_index + 1;
}
- grpc_unlink_if_unix_domain_socket((struct sockaddr *)addr);
+ grpc_unlink_if_unix_domain_socket(addr);
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
- sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp,
- &sockname_len)) {
- port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+ if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len)) {
+ port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(addr_len);
- memcpy(allocated_addr, addr, addr_len);
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, addr->len);
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
break;
@@ -584,8 +596,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
sp = NULL;
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
@@ -593,12 +604,10 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
/* Try listening on IPv6 first. */
- addr = (struct sockaddr *)&wild6;
- addr_len = sizeof(wild6);
+ addr = &wild6;
errs[0] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (errs[0] == GRPC_ERROR_NONE) {
- errs[0] = add_socket_to_server(s, fd, addr, addr_len, port_index,
- fd_index, &sp);
+ errs[0] = add_socket_to_server(s, fd, addr, port_index, fd_index, &sp);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
@@ -607,23 +616,20 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && sp != NULL) {
- grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
+ grpc_sockaddr_set_port(&wild4, sp->port);
}
}
- addr = (struct sockaddr *)&wild4;
- addr_len = sizeof(wild4);
+ addr = &wild4;
}
errs[1] = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
if (errs[1] == GRPC_ERROR_NONE) {
if (dsmode == GRPC_DSMODE_IPV4 &&
grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
- addr = (struct sockaddr *)&addr4_copy;
- addr_len = sizeof(addr4_copy);
+ addr = &addr4_copy;
}
sp2 = sp;
- errs[1] =
- add_socket_to_server(s, fd, addr, addr_len, port_index, fd_index, &sp);
+ errs[1] = add_socket_to_server(s, fd, addr, port_index, fd_index, &sp);
if (sp2 != NULL && sp != NULL) {
sp2->sibling = sp;
sp->is_sibling = 1;
@@ -704,7 +710,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
s->pollset_count = pollset_count;
sp = s->head;
while (sp != NULL) {
- if (s->so_reuseport && !grpc_is_unix_socket(&sp->addr.sockaddr) &&
+ if (s->so_reuseport && !grpc_is_unix_socket(&sp->addr) &&
pollset_count > 1) {
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
new file mode 100644
index 0000000000..73e4db3d65
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -0,0 +1,365 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+
+/* one listening port */
+typedef struct grpc_tcp_listener grpc_tcp_listener;
+struct grpc_tcp_listener {
+ uv_tcp_t *handle;
+ grpc_tcp_server *server;
+ unsigned port_index;
+ int port;
+ /* linked list */
+ struct grpc_tcp_listener *next;
+};
+
+struct grpc_tcp_server {
+ gpr_refcount refs;
+
+ /* Called whenever accept() succeeds on a server port. */
+ grpc_tcp_server_cb on_accept_cb;
+ void *on_accept_cb_arg;
+
+ int open_ports;
+
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
+ grpc_tcp_listener *tail;
+
+ /* List of closures passed to shutdown_starting_add(). */
+ grpc_closure_list shutdown_starting;
+
+ /* shutdown callback */
+ grpc_closure *shutdown_complete;
+};
+
+grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+ const grpc_channel_args *args,
+ grpc_tcp_server **server) {
+ grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ (void)args;
+ gpr_ref_init(&s->refs, 1);
+ s->on_accept_cb = NULL;
+ s->on_accept_cb_arg = NULL;
+ s->open_ports = 0;
+ s->head = NULL;
+ s->tail = NULL;
+ s->shutdown_starting.head = NULL;
+ s->shutdown_starting.tail = NULL;
+ s->shutdown_complete = shutdown_complete;
+ *server = s;
+ return GRPC_ERROR_NONE;
+}
+
+grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+ gpr_ref(&s->refs);
+ return s;
+}
+
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
+ grpc_closure *shutdown_starting) {
+ grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
+ GRPC_ERROR_NONE);
+}
+
+static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ }
+
+ while (s->head) {
+ grpc_tcp_listener *sp = s->head;
+ s->head = sp->next;
+ sp->next = NULL;
+ gpr_free(sp->handle);
+ gpr_free(sp);
+ }
+ gpr_free(s);
+}
+
+static void handle_close_callback(uv_handle_t *handle) {
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)handle->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ sp->server->open_ports--;
+ if (sp->server->open_ports == 0) {
+ finish_shutdown(&exec_ctx, sp->server);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ int immediately_done = 0;
+ grpc_tcp_listener *sp;
+
+ if (s->open_ports == 0) {
+ immediately_done = 1;
+ }
+ for (sp = s->head; sp; sp = sp->next) {
+ uv_close((uv_handle_t *)sp->handle, handle_close_callback);
+ }
+
+ if (immediately_done) {
+ finish_shutdown(exec_ctx, s);
+ }
+}
+
+void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+ if (gpr_unref(&s->refs)) {
+ /* Complete shutdown_starting work before destroying. */
+ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+ if (exec_ctx == NULL) {
+ grpc_exec_ctx_flush(&local_exec_ctx);
+ tcp_server_destroy(&local_exec_ctx, s);
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ } else {
+ grpc_exec_ctx_finish(&local_exec_ctx);
+ tcp_server_destroy(exec_ctx, s);
+ }
+ }
+}
+
+static void accepted_connection_close_cb(uv_handle_t *handle) {
+ gpr_free(handle);
+}
+
+static void on_connect(uv_stream_t *server, int status) {
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
+ grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
+ uv_tcp_t *client;
+ grpc_endpoint *ep = NULL;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_resolved_address peer_name;
+ char *peer_name_string;
+ int err;
+
+ if (status < 0) {
+ gpr_log(GPR_INFO, "Skipping on_accept due to error: %s",
+ uv_strerror(status));
+ return;
+ }
+ client = gpr_malloc(sizeof(uv_tcp_t));
+ uv_tcp_init(uv_default_loop(), client);
+ // UV documentation says this is guaranteed to succeed
+ uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
+ // If the server has not been started, we discard incoming connections
+ if (sp->server->on_accept_cb == NULL) {
+ uv_close((uv_handle_t *)client, accepted_connection_close_cb);
+ } else {
+ peer_name_string = NULL;
+ memset(&peer_name, 0, sizeof(grpc_resolved_address));
+ peer_name.len = sizeof(struct sockaddr_storage);
+ err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
+ (int *)&peer_name.len);
+ if (err == 0) {
+ peer_name_string = grpc_sockaddr_to_uri(&peer_name);
+ } else {
+ gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
+ }
+ ep = grpc_tcp_create(client, peer_name_string);
+ sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
+ &acceptor);
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
+}
+
+static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
+ const grpc_resolved_address *addr,
+ unsigned port_index,
+ grpc_tcp_listener **listener) {
+ grpc_tcp_listener *sp = NULL;
+ int port = -1;
+ int status;
+ grpc_error *error;
+ grpc_resolved_address sockname_temp;
+
+ // The last argument to uv_tcp_bind is flags
+ status = uv_tcp_bind(handle, (struct sockaddr *)addr->addr, 0);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("Failed to bind to port");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+
+ status = uv_listen((uv_stream_t *)handle, SOMAXCONN, on_connect);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("Failed to listen to port");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+
+ sockname_temp.len = (int)sizeof(struct sockaddr_storage);
+ status = uv_tcp_getsockname(handle, (struct sockaddr *)&sockname_temp.addr,
+ (int *)&sockname_temp.len);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("getsockname failed");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ return error;
+ }
+
+ port = grpc_sockaddr_get_port(&sockname_temp);
+
+ GPR_ASSERT(port >= 0);
+ GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
+ }
+ s->tail = sp;
+ sp->server = s;
+ sp->handle = handle;
+ sp->port = port;
+ sp->port_index = port_index;
+ handle->data = sp;
+ s->open_ports++;
+ GPR_ASSERT(sp->handle);
+ *listener = sp;
+
+ return GRPC_ERROR_NONE;
+}
+
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *port) {
+ // This function is mostly copied from tcp_server_windows.c
+ grpc_tcp_listener *sp = NULL;
+ uv_tcp_t *handle;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wildcard;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
+ unsigned port_index = 0;
+ int status;
+ grpc_error *error = GRPC_ERROR_NONE;
+
+ if (s->tail != NULL) {
+ port_index = s->tail->port_index + 1;
+ }
+
+ /* Check if this is a wildcard port, and if so, try to keep the port the same
+ as some previously created listener. */
+ if (grpc_sockaddr_get_port(addr) == 0) {
+ for (sp = s->head; sp; sp = sp->next) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+ if (0 == uv_tcp_getsockname(sp->handle,
+ (struct sockaddr *)&sockname_temp.addr,
+ (int *)&sockname_temp.len)) {
+ *port = grpc_sockaddr_get_port(&sockname_temp);
+ if (*port > 0) {
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
+ grpc_sockaddr_set_port(allocated_addr, *port);
+ addr = allocated_addr;
+ break;
+ }
+ }
+ }
+ }
+
+ if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
+ addr = &addr6_v4mapped;
+ }
+
+ /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
+ if (grpc_sockaddr_is_wildcard(addr, port)) {
+ grpc_sockaddr_make_wildcard6(*port, &wildcard);
+
+ addr = &wildcard;
+ }
+
+ handle = gpr_malloc(sizeof(uv_tcp_t));
+ status = uv_tcp_init(uv_default_loop(), handle);
+ if (status == 0) {
+ error = add_socket_to_server(s, handle, addr, port_index, &sp);
+ } else {
+ error = GRPC_ERROR_CREATE("Failed to initialize UV tcp handle");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ }
+
+ gpr_free(allocated_addr);
+
+ if (error != GRPC_ERROR_NONE) {
+ grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING(
+ "Failed to add port to server", &error, 1);
+ GRPC_ERROR_UNREF(error);
+ error = error_out;
+ *port = -1;
+ } else {
+ GPR_ASSERT(sp != NULL);
+ *port = sp->port;
+ }
+ return error;
+}
+
+void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
+ grpc_pollset **pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void *cb_arg) {
+ grpc_tcp_listener *sp;
+ (void)pollsets;
+ (void)pollset_count;
+ GPR_ASSERT(on_accept_cb);
+ GPR_ASSERT(!server->on_accept_cb);
+ server->on_accept_cb = on_accept_cb;
+ server->on_accept_cb_arg = cb_arg;
+ for (sp = server->head; sp; sp = sp->next) {
+ GPR_ASSERT(uv_listen((uv_stream_t *)sp->handle, SOMAXCONN, on_connect) ==
+ 0);
+ }
+}
+
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
+ grpc_tcp_server *s) {}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index 4ff05601fa..ae54c70d2d 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -31,13 +31,13 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
-#include <io.h>
+#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include <io.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -48,6 +48,8 @@
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/pollset_windows.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_windows.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/iomgr/tcp_windows.h"
@@ -98,14 +100,32 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
+
+ grpc_resource_quota *resource_quota;
};
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
+grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
+ grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ s->resource_quota = grpc_resource_quota_create(NULL);
+ for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
+ if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
+ if (args->args[i].type == GRPC_ARG_POINTER) {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ s->resource_quota =
+ grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
+ } else {
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
+ gpr_free(s);
+ return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
+ " must be a pointer to a buffer pool");
+ }
+ }
+ }
gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
s->active_ports = 0;
@@ -135,6 +155,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
grpc_winsocket_destroy(sp->socket);
gpr_free(sp);
}
+ grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
}
@@ -183,10 +204,10 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
/* Prepare (bind) a recently-created socket for listening. */
-static grpc_error *prepare_socket(SOCKET sock, const struct sockaddr *addr,
- size_t addr_len, int *port) {
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+static grpc_error *prepare_socket(SOCKET sock,
+ const grpc_resolved_address *addr,
+ int *port) {
+ grpc_resolved_address sockname_temp;
grpc_error *error = GRPC_ERROR_NONE;
error = grpc_tcp_prepare_socket(sock);
@@ -194,7 +215,8 @@ static grpc_error *prepare_socket(SOCKET sock, const struct sockaddr *addr,
goto failure;
}
- if (bind(sock, addr, (int)addr_len) == SOCKET_ERROR) {
+ if (bind(sock, (const struct sockaddr *)addr->addr, (int)addr->len) ==
+ SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
}
@@ -204,14 +226,15 @@ static grpc_error *prepare_socket(SOCKET sock, const struct sockaddr *addr,
goto failure;
}
- sockname_len = sizeof(sockname_temp);
- if (getsockname(sock, (struct sockaddr *)&sockname_temp, &sockname_len) ==
- SOCKET_ERROR) {
+ int sockname_temp_len = sizeof(struct sockaddr_storage);
+ if (getsockname(sock, (struct sockaddr *)sockname_temp.addr,
+ &sockname_temp_len) == SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname");
goto failure;
}
+ sockname_temp.len = sockname_temp_len;
- *port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ *port = grpc_sockaddr_get_port(&sockname_temp);
return GRPC_ERROR_NONE;
failure:
@@ -307,15 +330,16 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
- struct sockaddr_storage peer_name;
+ grpc_resolved_address peer_name;
char *peer_name_string;
char *fd_name;
- int peer_name_len = sizeof(peer_name);
DWORD transfered_bytes;
DWORD flags;
BOOL wsa_success;
int err;
+ peer_name.len = sizeof(struct sockaddr_storage);
+
/* The general mechanism for shutting down is to queue abortion calls. While
this is necessary in the read/write case, it's useless for the accept
case. We only need to adjust the pending callback count */
@@ -353,9 +377,12 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
gpr_free(utf8_message);
}
- err = getpeername(sock, (struct sockaddr *)&peer_name, &peer_name_len);
+ int peer_name_len = (int)peer_name.len;
+ err =
+ getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len);
+ peer_name.len = peer_name_len;
if (!err) {
- peer_name_string = grpc_sockaddr_to_uri((struct sockaddr *)&peer_name);
+ peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
char *utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);
@@ -363,7 +390,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
- peer_name_string);
+ sp->server->resource_quota, peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
} else {
@@ -385,8 +412,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
- const struct sockaddr *addr,
- size_t addr_len, unsigned port_index,
+ const grpc_resolved_address *addr,
+ unsigned port_index,
grpc_tcp_listener **listener) {
grpc_tcp_listener *sp = NULL;
int port = -1;
@@ -410,7 +437,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
return NULL;
}
- error = prepare_socket(sock, addr, addr_len, &port);
+ error = prepare_socket(sock, addr, &port);
if (error != GRPC_ERROR_NONE) {
return error;
}
@@ -441,15 +468,15 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
- size_t addr_len, int *port) {
+grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ int *port) {
grpc_tcp_listener *sp = NULL;
SOCKET sock;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in6 wildcard;
- struct sockaddr *allocated_addr = NULL;
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wildcard;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
unsigned port_index = 0;
grpc_error *error = GRPC_ERROR_NONE;
@@ -461,13 +488,15 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
- sockname_len = sizeof(sockname_temp);
+ int sockname_temp_len = sizeof(struct sockaddr_storage);
if (0 == getsockname(sp->socket->socket,
- (struct sockaddr *)&sockname_temp, &sockname_len)) {
- *port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ (struct sockaddr *)sockname_temp.addr,
+ &sockname_temp_len)) {
+ sockname_temp.len = sockname_temp_len;
+ *port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
- allocated_addr = gpr_malloc(addr_len);
- memcpy(allocated_addr, addr, addr_len);
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
break;
@@ -477,16 +506,14 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
}
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
if (grpc_sockaddr_is_wildcard(addr, port)) {
grpc_sockaddr_make_wildcard6(*port, &wildcard);
- addr = (struct sockaddr *)&wildcard;
- addr_len = sizeof(wildcard);
+ addr = &wildcard;
}
sock = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
@@ -496,7 +523,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
goto done;
}
- error = add_socket_to_server(s, sock, addr, addr_len, port_index, &sp);
+ error = add_socket_to_server(s, sock, addr, port_index, &sp);
done:
gpr_free(allocated_addr);
@@ -535,4 +562,4 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) {}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
new file mode 100644
index 0000000000..3860fe3e9b
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -0,0 +1,338 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include <limits.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/slice_buffer.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/network_status_tracker.h"
+#include "src/core/lib/iomgr/tcp_uv.h"
+#include "src/core/lib/support/string.h"
+
+int grpc_tcp_trace = 0;
+
+typedef struct {
+ grpc_endpoint base;
+ gpr_refcount refcount;
+
+ uv_tcp_t *handle;
+
+ grpc_closure *read_cb;
+ grpc_closure *write_cb;
+
+ gpr_slice read_slice;
+ gpr_slice_buffer *read_slices;
+ gpr_slice_buffer *write_slices;
+ uv_buf_t *write_buffers;
+
+ bool shutting_down;
+ char *peer_string;
+ grpc_pollset *pollset;
+} grpc_tcp;
+
+static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+
+static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); }
+
+/*#define GRPC_TCP_REFCOUNT_DEBUG*/
+#ifdef GRPC_TCP_REFCOUNT_DEBUG
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
+#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
+static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count - 1);
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp);
+ }
+}
+
+static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+ int line) {
+ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp,
+ reason, tcp->refcount.count, tcp->refcount.count + 1);
+ gpr_ref(&tcp->refcount);
+}
+#else
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
+#define TCP_REF(tcp, reason) tcp_ref((tcp))
+static void tcp_unref(grpc_tcp *tcp) {
+ if (gpr_unref(&tcp->refcount)) {
+ tcp_free(tcp);
+ }
+}
+
+static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+#endif
+
+static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
+ uv_buf_t *buf) {
+ grpc_tcp *tcp = handle->data;
+ (void)suggested_size;
+ tcp->read_slice = gpr_slice_malloc(GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
+ buf->base = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
+ buf->len = GPR_SLICE_LENGTH(tcp->read_slice);
+}
+
+static void read_callback(uv_stream_t *stream, ssize_t nread,
+ const uv_buf_t *buf) {
+ gpr_slice sub;
+ grpc_error *error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_tcp *tcp = stream->data;
+ grpc_closure *cb = tcp->read_cb;
+ if (nread == 0) {
+ // Nothing happened. Wait for the next callback
+ return;
+ }
+ TCP_UNREF(tcp, "read");
+ tcp->read_cb = NULL;
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_read_stop(stream);
+ if (nread == UV_EOF) {
+ error = GRPC_ERROR_CREATE("EOF");
+ } else if (nread > 0) {
+ // Successful read
+ sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
+ gpr_slice_buffer_add(tcp->read_slices, sub);
+ error = GRPC_ERROR_NONE;
+ if (grpc_tcp_trace) {
+ size_t i;
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "read: error=%s", str);
+ grpc_error_free_string(str);
+ for (i = 0; i < tcp->read_slices->count; i++) {
+ char *dump = gpr_dump_slice(tcp->read_slices->slices[i],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
+ dump);
+ gpr_free(dump);
+ }
+ }
+ } else {
+ // nread < 0: Error
+ error = GRPC_ERROR_CREATE("TCP Read failed");
+ }
+ grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *read_slices, grpc_closure *cb) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ int status;
+ grpc_error *error = GRPC_ERROR_NONE;
+ GPR_ASSERT(tcp->read_cb == NULL);
+ tcp->read_cb = cb;
+ tcp->read_slices = read_slices;
+ gpr_slice_buffer_reset_and_unref(read_slices);
+ TCP_REF(tcp, "read");
+ // TODO(murgatroid99): figure out what the return value here means
+ status =
+ uv_read_start((uv_stream_t *)tcp->handle, alloc_uv_buf, read_callback);
+ if (status != 0) {
+ error = GRPC_ERROR_CREATE("TCP Read failed at start");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+ }
+ if (grpc_tcp_trace) {
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
+ }
+}
+
+static void write_callback(uv_write_t *req, int status) {
+ grpc_tcp *tcp = req->data;
+ grpc_error *error;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure *cb = tcp->write_cb;
+ tcp->write_cb = NULL;
+ TCP_UNREF(tcp, "write");
+ if (status == 0) {
+ error = GRPC_ERROR_NONE;
+ } else {
+ error = GRPC_ERROR_CREATE("TCP Write failed");
+ }
+ if (grpc_tcp_trace) {
+ const char *str = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
+ }
+ gpr_free(tcp->write_buffers);
+ gpr_free(req);
+ grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ gpr_slice_buffer *write_slices,
+ grpc_closure *cb) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ uv_buf_t *buffers;
+ unsigned int buffer_count;
+ unsigned int i;
+ gpr_slice *slice;
+ uv_write_t *write_req;
+
+ if (grpc_tcp_trace) {
+ size_t j;
+
+ for (j = 0; j < write_slices->count; j++) {
+ char *data = gpr_dump_slice(write_slices->slices[j],
+ GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
+ gpr_free(data);
+ }
+ }
+
+ if (tcp->shutting_down) {
+ grpc_exec_ctx_sched(exec_ctx, cb,
+ GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+ return;
+ }
+
+ GPR_ASSERT(tcp->write_cb == NULL);
+ tcp->write_slices = write_slices;
+ GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
+ if (tcp->write_slices->count == 0) {
+ // No slices means we don't have to do anything,
+ // and libuv doesn't like empty writes
+ grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+ return;
+ }
+
+ tcp->write_cb = cb;
+ buffer_count = (unsigned int)tcp->write_slices->count;
+ buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
+ for (i = 0; i < buffer_count; i++) {
+ slice = &tcp->write_slices->slices[i];
+ buffers[i].base = (char *)GPR_SLICE_START_PTR(*slice);
+ buffers[i].len = GPR_SLICE_LENGTH(*slice);
+ }
+ write_req = gpr_malloc(sizeof(uv_write_t));
+ write_req->data = tcp;
+ TCP_REF(tcp, "write");
+ // TODO(murgatroid99): figure out what the return value here means
+ uv_write(write_req, (uv_stream_t *)tcp->handle, buffers, buffer_count,
+ write_callback);
+}
+
+static void uv_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset *pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
+ (void)ep;
+ (void)pollset;
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ tcp->pollset = pollset;
+}
+
+static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
+ (void)ep;
+ (void)pollset;
+}
+
+static void shutdown_callback(uv_shutdown_t *req, int status) { gpr_free(req); }
+
+static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ if (!tcp->shutting_down) {
+ tcp->shutting_down = true;
+ uv_shutdown_t *req = gpr_malloc(sizeof(uv_shutdown_t));
+ uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);
+ }
+}
+
+static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+ grpc_network_status_unregister_endpoint(ep);
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
+ TCP_UNREF(tcp, "destroy");
+}
+
+static char *uv_get_peer(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return gpr_strdup(tcp->peer_string);
+}
+
+static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
+
+static grpc_endpoint_vtable vtable = {uv_endpoint_read,
+ uv_endpoint_write,
+ uv_get_workqueue,
+ uv_add_to_pollset,
+ uv_add_to_pollset_set,
+ uv_endpoint_shutdown,
+ uv_destroy,
+ uv_get_peer};
+
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
+ grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
+
+ if (grpc_tcp_trace) {
+ gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
+ }
+
+ /* Disable Nagle's Algorithm */
+ uv_tcp_nodelay(handle, 1);
+
+ memset(tcp, 0, sizeof(grpc_tcp));
+ tcp->base.vtable = &vtable;
+ tcp->handle = handle;
+ handle->data = tcp;
+ gpr_ref_init(&tcp->refcount, 1);
+ tcp->peer_string = gpr_strdup(peer_string);
+ tcp->shutting_down = false;
+ /* Tell network status tracking code about the new endpoint */
+ grpc_network_status_register_endpoint(&tcp->base);
+
+#ifndef GRPC_UV_TCP_HOLD_LOOP
+ uv_unref((uv_handle_t *)handle);
+#endif
+
+ return &tcp->base;
+}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
new file mode 100644
index 0000000000..eed41151ea
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_UV_H
+#define GRPC_CORE_LIB_IOMGR_TCP_UV_H
+/*
+ Low level TCP "bottom half" implementation, for use by transports built on
+ top of a TCP connection.
+
+ Note that this file does not (yet) include APIs for creating the socket in
+ the first place.
+
+ All calls passing slice transfer ownership of a slice refcount unless
+ otherwise specified.
+*/
+
+#include "src/core/lib/iomgr/endpoint.h"
+
+#include <uv.h>
+
+extern int grpc_tcp_trace;
+
+#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
+
+grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 448a72671c..46f0491d10 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_WINSOCK_SOCKET
+#ifdef GRPC_WINSOCK_SOCKET
#include <limits.h>
@@ -109,14 +109,29 @@ typedef struct grpc_tcp {
gpr_slice_buffer *write_slices;
gpr_slice_buffer *read_slices;
+ grpc_resource_user resource_user;
+
/* The IO Completion Port runs from another thread. We need some mechanism
to protect ourselves when requesting a shutdown. */
gpr_mu mu;
int shutting_down;
+ gpr_atm resource_user_shutdown_count;
+
char *peer_string;
} grpc_tcp;
+static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void win_maybe_shutdown_resource_user(grpc_exec_ctx *exec_ctx,
+ grpc_tcp *tcp) {
+ if (gpr_atm_full_fetch_add(&tcp->resource_user_shutdown_count, 1) == 0) {
+ grpc_resource_user_shutdown(exec_ctx, &tcp->resource_user,
+ grpc_closure_create(win_unref_closure, tcp));
+ }
+}
+
static void tcp_free(grpc_tcp *tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
@@ -155,6 +170,11 @@ static void tcp_unref(grpc_tcp *tcp) {
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
#endif
+static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ TCP_UNREF(arg, "resource_user");
+}
+
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
grpc_tcp *tcp = tcpp;
@@ -376,12 +396,14 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
callback. See the comments in on_read and on_write. */
tcp->shutting_down = 1;
grpc_winsocket_shutdown(tcp->socket);
+ win_maybe_shutdown_resource_user(exec_ctx, tcp);
gpr_mu_unlock(&tcp->mu);
}
static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
+ win_maybe_shutdown_resource_user(exec_ctx, tcp);
TCP_UNREF(tcp, "destroy");
}
@@ -392,6 +414,11 @@ static char *win_get_peer(grpc_endpoint *ep) {
static grpc_workqueue *win_get_workqueue(grpc_endpoint *ep) { return NULL; }
+static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ return &tcp->resource_user;
+}
+
static grpc_endpoint_vtable vtable = {win_read,
win_write,
win_get_workqueue,
@@ -399,22 +426,26 @@ static grpc_endpoint_vtable vtable = {win_read,
win_add_to_pollset_set,
win_shutdown,
win_destroy,
+ win_get_resource_user,
win_get_peer};
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
+ grpc_resource_quota *resource_quota,
+ char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->socket = socket;
gpr_mu_init(&tcp->mu);
- gpr_ref_init(&tcp->refcount, 1);
+ gpr_ref_init(&tcp->refcount, 2);
grpc_closure_init(&tcp->on_read, on_read, tcp);
grpc_closure_init(&tcp->on_write, on_write, tcp);
tcp->peer_string = gpr_strdup(peer_string);
+ grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
return &tcp->base;
}
-#endif /* GPR_WINSOCK_SOCKET */
+#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 86d777235e..4402de1c38 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -50,7 +50,9 @@
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string);
+grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket,
+ grpc_resource_quota *resource_quota,
+ char *peer_string);
grpc_error *grpc_tcp_prepare_socket(SOCKET sock);
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index a825d2a28b..20fe98c4a7 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -34,26 +34,27 @@
#ifndef GRPC_CORE_LIB_IOMGR_TIMER_H
#define GRPC_CORE_LIB_IOMGR_TIMER_H
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+#include "src/core/lib/iomgr/timer_uv.h"
+#else
+#include "src/core/lib/iomgr/timer_generic.h"
+#endif /* GRPC_UV */
+
#include <grpc/support/port_platform.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
-typedef struct grpc_timer {
- gpr_timespec deadline;
- uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
- int triggered;
- struct grpc_timer *next;
- struct grpc_timer *prev;
- grpc_closure closure;
-} grpc_timer;
+typedef struct grpc_timer grpc_timer;
/* Initialize *timer. When expired or canceled, timer_cb will be called with
- *timer_cb_arg and status to indicate if it expired (SUCCESS) or was
- canceled (CANCELLED). timer_cb is guaranteed to be called exactly once,
- and application code should check the status to determine how it was
- invoked. The application callback is also responsible for maintaining
- information about when to free up any user-level state. */
+ *timer_cb_arg and error set to indicate if it expired (GRPC_ERROR_NONE) or
+ was canceled (GRPC_ERROR_CANCELLED). timer_cb is guaranteed to be called
+ exactly once, and application code should check the error to determine
+ how it was invoked. The application callback is also responsible for
+ maintaining information about when to free up any user-level state. */
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
void *timer_cb_arg, gpr_timespec now);
@@ -74,8 +75,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
In all of these cases, the cancellation is still considered successful.
They are essentially distinguished in that the timer_cb will be run
- exactly once from either the cancellation (with status CANCELLED)
- or from the activation (with status SUCCESS)
+ exactly once from either the cancellation (with error GRPC_ERROR_CANCELLED)
+ or from the activation (with error GRPC_ERROR_NONE).
Note carefully that the callback function MAY occur in the same callstack
as grpc_timer_cancel. It's expected that most timers will be cancelled (their
@@ -83,14 +84,13 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
that cancellation costs as little as possible. Making callbacks run inline
matches this aim.
- Requires: cancel() must happen after add() on a given timer */
+ Requires: cancel() must happen after init() on a given timer */
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer);
/* iomgr internal api for dealing with timers */
/* Check for timers to be run, and run them.
Return true if timer callbacks were executed.
- Drops drop_mu if it is non-null before executing callbacks.
If next is non-null, TRY to update *next with the next running timer
IF that timer occurs before *next current value.
*next is never guaranteed to be updated on any given execution; however,
diff --git a/src/core/lib/iomgr/timer.c b/src/core/lib/iomgr/timer_generic.c
index 9975fa1671..00058f9d86 100644
--- a/src/core/lib/iomgr/timer.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -31,6 +31,10 @@
*
*/
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_TIMER_USE_GENERIC
+
#include "src/core/lib/iomgr/timer.h"
#include <grpc/support/log.h>
@@ -382,3 +386,5 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE("Shutting down timer system"));
}
+
+#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
new file mode 100644
index 0000000000..e4494adb5f
--- /dev/null
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H
+#define GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H
+
+#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+struct grpc_timer {
+ gpr_timespec deadline;
+ uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
+ int triggered;
+ struct grpc_timer *next;
+ struct grpc_timer *prev;
+ grpc_closure closure;
+};
+
+#endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */
diff --git a/src/core/lib/iomgr/timer_heap.c b/src/core/lib/iomgr/timer_heap.c
index 2ad9bb9cd2..f736d335e6 100644
--- a/src/core/lib/iomgr/timer_heap.c
+++ b/src/core/lib/iomgr/timer_heap.c
@@ -31,6 +31,10 @@
*
*/
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_TIMER_USE_GENERIC
+
#include "src/core/lib/iomgr/timer_heap.h"
#include <string.h>
@@ -144,3 +148,5 @@ grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap) {
void grpc_timer_heap_pop(grpc_timer_heap *heap) {
grpc_timer_heap_remove(heap, grpc_timer_heap_top(heap));
}
+
+#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
new file mode 100644
index 0000000000..cfcb89268b
--- /dev/null
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -0,0 +1,99 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#if GRPC_UV
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/iomgr/timer.h"
+
+#include <uv.h>
+
+static void timer_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+
+static void stop_uv_timer(uv_timer_t *handle) {
+ uv_timer_stop(handle);
+ uv_unref((uv_handle_t *)handle);
+ uv_close((uv_handle_t *)handle, timer_close_callback);
+}
+
+void run_expired_timer(uv_timer_t *handle) {
+ grpc_timer *timer = (grpc_timer *)handle->data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GPR_ASSERT(!timer->triggered);
+ timer->triggered = 1;
+ grpc_exec_ctx_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ stop_uv_timer(handle);
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
+ gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
+ void *timer_cb_arg, gpr_timespec now) {
+ uint64_t timeout;
+ uv_timer_t *uv_timer;
+ grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+ if (gpr_time_cmp(deadline, now) <= 0) {
+ timer->triggered = 1;
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+ return;
+ }
+ timer->triggered = 0;
+ timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ uv_timer = gpr_malloc(sizeof(uv_timer_t));
+ uv_timer_init(uv_default_loop(), uv_timer);
+ uv_timer->data = timer;
+ timer->uv_timer = uv_timer;
+ uv_timer_start(uv_timer, run_expired_timer, timeout, 0);
+}
+
+void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
+ if (!timer->triggered) {
+ timer->triggered = 1;
+ grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+ stop_uv_timer((uv_timer_t *)timer->uv_timer);
+ }
+}
+
+bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
+ gpr_timespec *next) {
+ return false;
+}
+
+void grpc_timer_list_init(gpr_timespec now) {}
+void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {}
+
+#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h
new file mode 100644
index 0000000000..3de383ebd5
--- /dev/null
+++ b/src/core/lib/iomgr/timer_uv.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TIMER_UV_H
+#define GRPC_CORE_LIB_IOMGR_TIMER_UV_H
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+struct grpc_timer {
+ grpc_closure closure;
+ /* This is actually a uv_timer_t*, but we want to keep platform-specific
+ types out of headers */
+ void *uv_timer;
+ int triggered;
+};
+
+#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index edf7b133e9..fd0c7a0f9d 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -36,9 +36,9 @@
#define _GNU_SOURCE
#endif
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET
#include "src/core/lib/iomgr/udp_server.h"
@@ -62,32 +62,30 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
+#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
-#define INIT_PORT_CAP 2
-
/* one listening port */
-typedef struct {
+typedef struct grpc_udp_listener grpc_udp_listener;
+struct grpc_udp_listener {
int fd;
grpc_fd *emfd;
grpc_udp_server *server;
- union {
- uint8_t untyped[GRPC_MAX_SOCKADDR_SIZE];
- struct sockaddr sockaddr;
- } addr;
- size_t addr_len;
+ grpc_resolved_address addr;
grpc_closure read_closure;
grpc_closure destroyed_closure;
grpc_udp_server_read_cb read_cb;
grpc_udp_server_orphan_cb orphan_cb;
-} server_port;
+
+ struct grpc_udp_listener *next;
+};
/* the overall server */
struct grpc_udp_server {
gpr_mu mu;
- gpr_cv cv;
/* active port count: how many ports are actually still listening */
size_t active_ports;
@@ -97,10 +95,10 @@ struct grpc_udp_server {
/* is this server shutting down? (boolean) */
int shutdown;
- /* all listening ports */
- server_port *ports;
- size_t nports;
- size_t port_capacity;
+ /* linked list of server ports */
+ grpc_udp_listener *head;
+ grpc_udp_listener *tail;
+ unsigned nports;
/* shutdown callback */
grpc_closure *shutdown_complete;
@@ -116,24 +114,29 @@ struct grpc_udp_server {
grpc_udp_server *grpc_udp_server_create(void) {
grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
- gpr_cv_init(&s->cv);
s->active_ports = 0;
s->destroyed_ports = 0;
s->shutdown = 0;
- s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
+ s->head = NULL;
+ s->tail = NULL;
s->nports = 0;
- s->port_capacity = INIT_PORT_CAP;
return s;
}
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
- grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ if (s->shutdown_complete != NULL) {
+ grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+ }
gpr_mu_destroy(&s->mu);
- gpr_cv_destroy(&s->cv);
- gpr_free(s->ports);
+ while (s->head) {
+ grpc_udp_listener *sp = s->head;
+ s->head = sp->next;
+ gpr_free(sp);
+ }
+
gpr_free(s);
}
@@ -154,8 +157,6 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
events will be received on them - at this point it's safe to destroy
things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
- size_t i;
-
/* delete ALL the things */
gpr_mu_lock(&s->mu);
@@ -164,9 +165,11 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
return;
}
- if (s->nports) {
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
+ if (s->head) {
+ grpc_udp_listener *sp;
+ for (sp = s->head; sp; sp = sp->next) {
+ grpc_unlink_if_unix_domain_socket(&sp->addr);
+
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
@@ -187,7 +190,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_closure *on_done) {
- size_t i;
+ grpc_udp_listener *sp;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@@ -197,14 +200,10 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
/* shutdown all fd's */
if (s->active_ports) {
- for (i = 0; i < s->nports; i++) {
- server_port *sp = &s->ports[i];
- /* Call the orphan_cb to signal that the FD is about to be closed and
- * should no longer be used. */
+ for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
sp->orphan_cb(sp->emfd);
-
- grpc_fd_shutdown(exec_ctx, s->ports[i].emfd);
+ grpc_fd_shutdown(exec_ctx, sp->emfd);
}
gpr_mu_unlock(&s->mu);
} else {
@@ -214,10 +213,9 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
}
/* Prepare a recently-created socket for listening. */
-static int prepare_socket(int fd, const struct sockaddr *addr,
- size_t addr_len) {
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+static int prepare_socket(int fd, const grpc_resolved_address *addr) {
+ grpc_resolved_address sockname_temp;
+ struct sockaddr *addr_ptr = (struct sockaddr *)addr->addr;
/* Set send/receive socket buffers to 1 MB */
int buffer_size_bytes = 1024 * 1024;
@@ -237,15 +235,15 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
if (grpc_set_socket_ip_pktinfo_if_possible(fd) != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Unable to set ip_pktinfo.");
goto error;
- } else if (addr->sa_family == AF_INET6) {
+ } else if (addr_ptr->sa_family == AF_INET6) {
if (grpc_set_socket_ipv6_recvpktinfo_if_possible(fd) != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Unable to set ipv6_recvpktinfo.");
goto error;
}
}
- GPR_ASSERT(addr_len < ~(socklen_t)0);
- if (bind(fd, addr, (socklen_t)addr_len) < 0) {
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ if (bind(fd, (struct sockaddr *)addr, (socklen_t)addr->len) < 0) {
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
@@ -253,8 +251,10 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
- sockname_len = sizeof(sockname_temp);
- if (getsockname(fd, (struct sockaddr *)&sockname_temp, &sockname_len) < 0) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+
+ if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len) < 0) {
goto error;
}
@@ -270,7 +270,7 @@ static int prepare_socket(int fd, const struct sockaddr *addr,
goto error;
}
- return grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ return grpc_sockaddr_get_port(&sockname_temp);
error:
if (fd >= 0) {
@@ -281,10 +281,10 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- server_port *sp = arg;
+ grpc_udp_listener *sp = arg;
+ gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
- gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(exec_ctx, sp->server);
@@ -300,34 +300,37 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+ gpr_mu_unlock(&sp->server->mu);
}
static int add_socket_to_server(grpc_udp_server *s, int fd,
- const struct sockaddr *addr, size_t addr_len,
+ const grpc_resolved_address *addr,
grpc_udp_server_read_cb read_cb,
grpc_udp_server_orphan_cb orphan_cb) {
- server_port *sp;
+ grpc_udp_listener *sp;
int port;
char *addr_str;
char *name;
- port = prepare_socket(fd, addr, addr_len);
+ port = prepare_socket(fd, addr);
if (port >= 0) {
- grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
+ grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
- /* append it to the list under a lock */
- if (s->nports == s->port_capacity) {
- s->port_capacity *= 2;
- s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
+ s->nports++;
+ sp = gpr_malloc(sizeof(grpc_udp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
}
- sp = &s->ports[s->nports++];
+ s->tail = sp;
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
- memcpy(sp->addr.untyped, addr, addr_len);
- sp->addr_len = addr_len;
+ memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
sp->read_cb = read_cb;
sp->orphan_cb = orphan_cb;
GPR_ASSERT(sp->emfd);
@@ -338,34 +341,34 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
return port;
}
-int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
- size_t addr_len, grpc_udp_server_read_cb read_cb,
+int grpc_udp_server_add_port(grpc_udp_server *s,
+ const grpc_resolved_address *addr,
+ grpc_udp_server_read_cb read_cb,
grpc_udp_server_orphan_cb orphan_cb) {
+ grpc_udp_listener *sp;
int allocated_port1 = -1;
int allocated_port2 = -1;
- unsigned i;
int fd;
grpc_dualstack_mode dsmode;
- struct sockaddr_in6 addr6_v4mapped;
- struct sockaddr_in wild4;
- struct sockaddr_in6 wild6;
- struct sockaddr_in addr4_copy;
- struct sockaddr *allocated_addr = NULL;
- struct sockaddr_storage sockname_temp;
- socklen_t sockname_len;
+ grpc_resolved_address addr6_v4mapped;
+ grpc_resolved_address wild4;
+ grpc_resolved_address wild6;
+ grpc_resolved_address addr4_copy;
+ grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address sockname_temp;
int port;
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
- for (i = 0; i < s->nports; i++) {
- sockname_len = sizeof(sockname_temp);
- if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
- &sockname_len)) {
- port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
+ for (sp = s->head; sp; sp = sp->next) {
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+ if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len)) {
+ port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(addr_len);
- memcpy(allocated_addr, addr, addr_len);
+ allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
break;
@@ -375,8 +378,7 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
}
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
- addr = (const struct sockaddr *)&addr6_v4mapped;
- addr_len = sizeof(addr6_v4mapped);
+ addr = &addr6_v4mapped;
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
@@ -384,22 +386,19 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
/* Try listening on IPv6 first. */
- addr = (struct sockaddr *)&wild6;
- addr_len = sizeof(wild6);
+ addr = &wild6;
// TODO(rjshade): Test and propagate the returned grpc_error*:
grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd);
- allocated_port1 =
- add_socket_to_server(s, fd, addr, addr_len, read_cb, orphan_cb);
+ allocated_port1 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && allocated_port1 > 0) {
- grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
+ grpc_sockaddr_set_port(&wild4, allocated_port1);
}
- addr = (struct sockaddr *)&wild4;
- addr_len = sizeof(wild4);
+ addr = &wild4;
}
// TODO(rjshade): Test and propagate the returned grpc_error*:
@@ -409,11 +408,9 @@ int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
}
if (dsmode == GRPC_DSMODE_IPV4 &&
grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
- addr = (struct sockaddr *)&addr4_copy;
- addr_len = sizeof(addr4_copy);
+ addr = &addr4_copy;
}
- allocated_port2 =
- add_socket_to_server(s, fd, addr, addr_len, read_cb, orphan_cb);
+ allocated_port2 = add_socket_to_server(s, fd, addr, read_cb, orphan_cb);
done:
gpr_free(allocated_addr);
@@ -421,27 +418,40 @@ done:
}
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
- return (port_index < s->nports) ? s->ports[port_index].fd : -1;
+ grpc_udp_listener *sp;
+ if (port_index >= s->nports) {
+ return -1;
+ }
+
+ for (sp = s->head; sp && port_index != 0; sp = sp->next) {
+ --port_index;
+ }
+ return sp->fd;
}
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
grpc_pollset **pollsets, size_t pollset_count,
grpc_server *server) {
- size_t i, j;
+ size_t i;
gpr_mu_lock(&s->mu);
+ grpc_udp_listener *sp;
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
s->grpc_server = server;
- for (i = 0; i < s->nports; i++) {
- for (j = 0; j < pollset_count; j++) {
- grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd);
+
+ sp = s->head;
+ while (sp != NULL) {
+ for (i = 0; i < pollset_count; i++) {
+ grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
}
- s->ports[i].read_closure.cb = on_read;
- s->ports[i].read_closure.cb_arg = &s->ports[i];
- grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd,
- &s->ports[i].read_closure);
+ sp->read_closure.cb = on_read;
+ sp->read_closure.cb_arg = sp;
+ grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+
s->active_ports++;
+ sp = sp->next;
}
+
gpr_mu_unlock(&s->mu);
}
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index 33c5ce11cd..f3c466a031 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/resolve_address.h"
/* Forward decl of struct grpc_server */
/* This is not typedef'ed to avoid a typedef-redefinition error */
@@ -59,7 +60,7 @@ void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
grpc_pollset **pollsets, size_t pollset_count,
struct grpc_server *server);
-int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
+int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);
/* Add a port to the server, returning port number on success, or negative
on failure.
@@ -71,8 +72,9 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
/* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
- size_t addr_len, grpc_udp_server_read_cb read_cb,
+int grpc_udp_server_add_port(grpc_udp_server *s,
+ const grpc_resolved_address *addr,
+ grpc_udp_server_read_cb read_cb,
grpc_udp_server_orphan_cb orphan_cb);
void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server,
diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.c
index 0e7670e5a5..030acd9811 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.c
+++ b/src/core/lib/iomgr/unix_sockets_posix.c
@@ -30,16 +30,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
+#include "src/core/lib/iomgr/port.h"
-#include "src/core/lib/iomgr/unix_sockets_posix.h"
+#ifdef GRPC_HAVE_UNIX_SOCKET
-#ifdef GPR_HAVE_UNIX_SOCKET
+#include "src/core/lib/iomgr/sockaddr.h"
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/un.h>
+#include "src/core/lib/iomgr/unix_sockets_posix.h"
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -61,15 +64,18 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
return GRPC_ERROR_NONE;
}
-int grpc_is_unix_socket(const struct sockaddr *addr) {
+int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
return addr->sa_family == AF_UNIX;
}
-void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr) {
+void grpc_unlink_if_unix_domain_socket(
+ const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family != AF_UNIX) {
return;
}
- struct sockaddr_un *un = (struct sockaddr_un *)addr;
+ struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
struct stat st;
if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) {
@@ -77,7 +83,9 @@ void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr) {
}
}
-char *grpc_sockaddr_to_uri_unix_if_possible(const struct sockaddr *addr) {
+char *grpc_sockaddr_to_uri_unix_if_possible(
+ const grpc_resolved_address *resolved_addr) {
+ const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
if (addr->sa_family != AF_UNIX) {
return NULL;
}
diff --git a/src/core/lib/iomgr/unix_sockets_posix.h b/src/core/lib/iomgr/unix_sockets_posix.h
index db0516d945..21afd3aa15 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.h
+++ b/src/core/lib/iomgr/unix_sockets_posix.h
@@ -34,22 +34,23 @@
#ifndef GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H
#define GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/sockaddr.h"
void grpc_create_socketpair_if_unix(int sv[2]);
grpc_error *grpc_resolve_unix_domain_address(
const char *name, grpc_resolved_addresses **addresses);
-int grpc_is_unix_socket(const struct sockaddr *addr);
+int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr);
-void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr);
+void grpc_unlink_if_unix_domain_socket(
+ const grpc_resolved_address *resolved_addr);
-char *grpc_sockaddr_to_uri_unix_if_possible(const struct sockaddr *addr);
+char *grpc_sockaddr_to_uri_unix_if_possible(
+ const grpc_resolved_address *resolved_addr);
#endif /* GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H */
diff --git a/src/core/lib/iomgr/unix_sockets_posix_noop.c b/src/core/lib/iomgr/unix_sockets_posix_noop.c
index 56b47c3daf..1daf5152c1 100644
--- a/src/core/lib/iomgr/unix_sockets_posix_noop.c
+++ b/src/core/lib/iomgr/unix_sockets_posix_noop.c
@@ -33,7 +33,7 @@
#include "src/core/lib/iomgr/unix_sockets_posix.h"
-#ifndef GPR_HAVE_UNIX_SOCKET
+#ifndef GRPC_HAVE_UNIX_SOCKET
#include <grpc/support/log.h>
@@ -50,11 +50,11 @@ grpc_error *grpc_resolve_unix_domain_address(
return GRPC_ERROR_CREATE("Unix domain sockets are not supported on Windows");
}
-int grpc_is_unix_socket(const struct sockaddr *addr) { return false; }
+int grpc_is_unix_socket(const grpc_resolved_address *addr) { return false; }
-void grpc_unlink_if_unix_domain_socket(const struct sockaddr *addr) {}
+void grpc_unlink_if_unix_domain_socket(const grpc_resolved_address *addr) {}
-char *grpc_sockaddr_to_uri_unix_if_possible(const struct sockaddr *addr) {
+char *grpc_sockaddr_to_uri_unix_if_possible(const grpc_resolved_address *addr) {
return NULL;
}
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.c b/src/core/lib/iomgr/wakeup_fd_cv.c
new file mode 100644
index 0000000000..da4c2870cd
--- /dev/null
+++ b/src/core/lib/iomgr/wakeup_fd_cv.c
@@ -0,0 +1,118 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_POSIX_WAKEUP_FD
+
+#include "src/core/lib/iomgr/wakeup_fd_cv.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+
+#define MAX_TABLE_RESIZE 256
+
+extern cv_fd_table g_cvfds;
+
+static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
+ unsigned int i, newsize;
+ int idx;
+ gpr_mu_lock(&g_cvfds.mu);
+ if (!g_cvfds.free_fds) {
+ newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
+ g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
+ for (i = g_cvfds.size; i < newsize; i++) {
+ g_cvfds.cvfds[i].is_set = 0;
+ g_cvfds.cvfds[i].cvs = NULL;
+ g_cvfds.cvfds[i].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[i];
+ }
+ g_cvfds.size = newsize;
+ }
+
+ idx = (int)(g_cvfds.free_fds - g_cvfds.cvfds);
+ g_cvfds.free_fds = g_cvfds.free_fds->next_free;
+ g_cvfds.cvfds[idx].cvs = NULL;
+ g_cvfds.cvfds[idx].is_set = 0;
+ fd_info->read_fd = IDX_TO_FD(idx);
+ fd_info->write_fd = -1;
+ gpr_mu_unlock(&g_cvfds.mu);
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
+ cv_node* cvn;
+ gpr_mu_lock(&g_cvfds.mu);
+ g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
+ cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
+ while (cvn) {
+ gpr_cv_signal(cvn->cv);
+ cvn = cvn->next;
+ }
+ gpr_mu_unlock(&g_cvfds.mu);
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
+ gpr_mu_lock(&g_cvfds.mu);
+ g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
+ gpr_mu_unlock(&g_cvfds.mu);
+ return GRPC_ERROR_NONE;
+}
+
+static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
+ if (fd_info->read_fd == 0) {
+ return;
+ }
+ gpr_mu_lock(&g_cvfds.mu);
+ // Assert that there are no active pollers
+ GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
+ g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
+ gpr_mu_unlock(&g_cvfds.mu);
+}
+
+static int cv_check_availability(void) { return 1; }
+
+const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable = {
+ cv_fd_init, cv_fd_consume, cv_fd_wakeup, cv_fd_destroy,
+ cv_check_availability};
+
+#endif /* GRPC_POSIX_WAKUP_FD */
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.h b/src/core/lib/iomgr/wakeup_fd_cv.h
new file mode 100644
index 0000000000..ac16be1750
--- /dev/null
+++ b/src/core/lib/iomgr/wakeup_fd_cv.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * wakeup_fd_cv uses condition variables to implement wakeup fds.
+ *
+ * It is intended for use only in cases when eventfd() and pipe() are not
+ * available. It can only be used with the "poll" engine.
+ *
+ * Implementation:
+ * A global table of cv wakeup fds is mantained. A cv wakeup fd is a negative
+ * file descriptor. poll() is then run in a background thread with only the
+ * real socket fds while we wait on a condition variable trigged by either the
+ * poll() completion or a wakeup_fd() call.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
+#define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H
+
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/iomgr/ev_posix.h"
+
+#define FD_TO_IDX(fd) (-(fd)-1)
+#define IDX_TO_FD(idx) (-(idx)-1)
+
+typedef struct cv_node {
+ gpr_cv* cv;
+ struct cv_node* next;
+} cv_node;
+
+typedef struct fd_node {
+ int is_set;
+ cv_node* cvs;
+ struct fd_node* next_free;
+} fd_node;
+
+typedef struct cv_fd_table {
+ gpr_mu mu;
+ int pollcount;
+ int shutdown;
+ gpr_cv shutdown_complete;
+ fd_node* cvfds;
+ fd_node* free_fds;
+ unsigned int size;
+ grpc_poll_function_type poll;
+} cv_fd_table;
+
+#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_eventfd.c b/src/core/lib/iomgr/wakeup_fd_eventfd.c
index 95f6102330..373e21d3e1 100644
--- a/src/core/lib/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/lib/iomgr/wakeup_fd_eventfd.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_LINUX_EVENTFD
+#ifdef GRPC_LINUX_EVENTFD
#include <errno.h>
#include <sys/eventfd.h>
@@ -94,4 +94,4 @@ const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy,
eventfd_check_availability};
-#endif /* GPR_LINUX_EVENTFD */
+#endif /* GRPC_LINUX_EVENTFD */
diff --git a/src/core/lib/iomgr/wakeup_fd_nospecial.c b/src/core/lib/iomgr/wakeup_fd_nospecial.c
index cb2f707dc5..611bced029 100644
--- a/src/core/lib/iomgr/wakeup_fd_nospecial.c
+++ b/src/core/lib/iomgr/wakeup_fd_nospecial.c
@@ -36,9 +36,9 @@
* systems without anything better than pipe.
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_NO_SPECIAL_WAKEUP_FD
+#ifdef GRPC_POSIX_NO_SPECIAL_WAKEUP_FD
#include <stddef.h>
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
@@ -48,4 +48,4 @@ static int check_availability_invalid(void) { return 0; }
const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
NULL, NULL, NULL, NULL, check_availability_invalid};
-#endif /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */
+#endif /* GRPC_POSIX_NO_SPECIAL_WAKEUP_FD */
diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.c b/src/core/lib/iomgr/wakeup_fd_pipe.c
index 4e5dbdcb73..183f0eb930 100644
--- a/src/core/lib/iomgr/wakeup_fd_pipe.c
+++ b/src/core/lib/iomgr/wakeup_fd_pipe.c
@@ -31,9 +31,9 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_WAKEUP_FD
+#ifdef GRPC_POSIX_WAKEUP_FD
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
@@ -47,11 +47,10 @@
static grpc_error* pipe_init(grpc_wakeup_fd* fd_info) {
int pipefd[2];
- /* TODO(klempner): Make this nonfatal */
int r = pipe(pipefd);
if (0 != r) {
gpr_log(GPR_ERROR, "pipe creation failed (%d): %s", errno, strerror(errno));
- abort();
+ return GRPC_OS_ERROR(errno, "pipe");
}
grpc_error* err;
err = grpc_set_socket_nonblocking(pipefd[0], 1);
@@ -95,8 +94,13 @@ static void pipe_destroy(grpc_wakeup_fd* fd_info) {
}
static int pipe_check_availability(void) {
- /* Assume that pipes are always available. */
- return 1;
+ grpc_wakeup_fd fd;
+ if (pipe_init(&fd) == GRPC_ERROR_NONE) {
+ pipe_destroy(&fd);
+ return 1;
+ } else {
+ return 0;
+ }
}
const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = {
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.c b/src/core/lib/iomgr/wakeup_fd_posix.c
index 046208abc8..85526402bd 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.c
+++ b/src/core/lib/iomgr/wakeup_fd_posix.c
@@ -31,42 +31,71 @@
*
*/
-#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
-#ifdef GPR_POSIX_WAKEUP_FD
+#ifdef GRPC_POSIX_WAKEUP_FD
#include <stddef.h>
+#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+extern grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
+
int grpc_allow_specialized_wakeup_fd = 1;
+int grpc_allow_pipe_wakeup_fd = 1;
+
+int has_real_wakeup_fd = 1;
+int cv_wakeup_fds_enabled = 0;
void grpc_wakeup_fd_global_init(void) {
if (grpc_allow_specialized_wakeup_fd &&
grpc_specialized_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
- } else {
+ } else if (grpc_allow_pipe_wakeup_fd &&
+ grpc_pipe_wakeup_fd_vtable.check_availability()) {
wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
+ } else {
+ has_real_wakeup_fd = 0;
}
}
void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; }
+int grpc_has_wakeup_fd(void) { return has_real_wakeup_fd; }
+
+int grpc_cv_wakeup_fds_enabled(void) { return cv_wakeup_fds_enabled; }
+
+void grpc_enable_cv_wakeup_fds(int enable) { cv_wakeup_fds_enabled = enable; }
+
grpc_error *grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
+ if (cv_wakeup_fds_enabled) {
+ return grpc_cv_wakeup_fd_vtable.init(fd_info);
+ }
return wakeup_fd_vtable->init(fd_info);
}
grpc_error *grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) {
+ if (cv_wakeup_fds_enabled) {
+ return grpc_cv_wakeup_fd_vtable.consume(fd_info);
+ }
return wakeup_fd_vtable->consume(fd_info);
}
grpc_error *grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) {
+ if (cv_wakeup_fds_enabled) {
+ return grpc_cv_wakeup_fd_vtable.wakeup(fd_info);
+ }
return wakeup_fd_vtable->wakeup(fd_info);
}
void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) {
- wakeup_fd_vtable->destroy(fd_info);
+ if (cv_wakeup_fds_enabled) {
+ grpc_cv_wakeup_fd_vtable.destroy(fd_info);
+ } else {
+ wakeup_fd_vtable->destroy(fd_info);
+ }
}
-#endif /* GPR_POSIX_WAKEUP_FD */
+#endif /* GRPC_POSIX_WAKEUP_FD */
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.h b/src/core/lib/iomgr/wakeup_fd_posix.h
index e269f242d8..71d32d97ba 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.h
+++ b/src/core/lib/iomgr/wakeup_fd_posix.h
@@ -71,6 +71,10 @@ void grpc_wakeup_fd_global_destroy(void);
* purposes only.*/
void grpc_wakeup_fd_global_init_force_fallback(void);
+int grpc_has_wakeup_fd(void);
+int grpc_cv_wakeup_fds_enabled(void);
+void grpc_enable_cv_wakeup_fds(int enable);
+
typedef struct grpc_wakeup_fd grpc_wakeup_fd;
typedef struct grpc_wakeup_fd_vtable {
@@ -88,6 +92,7 @@ struct grpc_wakeup_fd {
};
extern int grpc_allow_specialized_wakeup_fd;
+extern int grpc_allow_pipe_wakeup_fd;
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
index b2805dc66c..73d9849843 100644
--- a/src/core/lib/iomgr/workqueue.h
+++ b/src/core/lib/iomgr/workqueue.h
@@ -39,10 +39,7 @@
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
-
-#ifdef GPR_POSIX_SOCKET
-#include "src/core/lib/iomgr/workqueue_posix.h"
-#endif
+#include "src/core/lib/iomgr/port.h"
#ifdef GPR_WINDOWS
#include "src/core/lib/iomgr/workqueue_windows.h"
@@ -58,20 +55,20 @@
string will be printed alongside the refcount. When it is not defined, the
string will be discarded at compilation time. */
-//#define GRPC_WORKQUEUE_REFCOUNT_DEBUG
+/*#define GRPC_WORKQUEUE_REFCOUNT_DEBUG*/
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
#define GRPC_WORKQUEUE_REF(p, r) \
- (grpc_workqueue_ref((p), __FILE__, __LINE__, (r)), (p))
+ grpc_workqueue_ref((p), __FILE__, __LINE__, (r))
#define GRPC_WORKQUEUE_UNREF(exec_ctx, p, r) \
grpc_workqueue_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
- const char *reason);
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason);
#else
-#define GRPC_WORKQUEUE_REF(p, r) (grpc_workqueue_ref((p)), (p))
+#define GRPC_WORKQUEUE_REF(p, r) grpc_workqueue_ref((p))
#define GRPC_WORKQUEUE_UNREF(cl, p, r) grpc_workqueue_unref((cl), (p))
-void grpc_workqueue_ref(grpc_workqueue *workqueue);
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue);
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
#endif
diff --git a/src/core/lib/iomgr/workqueue_posix.c b/src/core/lib/iomgr/workqueue_posix.c
deleted file mode 100644
index ecfea68f56..0000000000
--- a/src/core/lib/iomgr/workqueue_posix.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_POSIX_SOCKET
-
-#include "src/core/lib/iomgr/workqueue.h"
-
-#include <stdio.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/profiling/timers.h"
-
-static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
-
-grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
- grpc_workqueue **workqueue) {
- char name[32];
- *workqueue = gpr_malloc(sizeof(grpc_workqueue));
- gpr_ref_init(&(*workqueue)->refs, 1);
- gpr_atm_no_barrier_store(&(*workqueue)->state, 1);
- grpc_error *err = grpc_wakeup_fd_init(&(*workqueue)->wakeup_fd);
- if (err != GRPC_ERROR_NONE) {
- gpr_free(*workqueue);
- return err;
- }
- sprintf(name, "workqueue:%p", (void *)(*workqueue));
- (*workqueue)->wakeup_read_fd = grpc_fd_create(
- GRPC_WAKEUP_FD_GET_READ_FD(&(*workqueue)->wakeup_fd), name);
- gpr_mpscq_init(&(*workqueue)->queue);
- grpc_closure_init(&(*workqueue)->read_closure, on_readable, *workqueue);
- grpc_fd_notify_on_read(exec_ctx, (*workqueue)->wakeup_read_fd,
- &(*workqueue)->read_closure);
- return GRPC_ERROR_NONE;
-}
-
-static void workqueue_destroy(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- grpc_fd_shutdown(exec_ctx, workqueue->wakeup_read_fd);
-}
-
-static void workqueue_orphan(grpc_exec_ctx *exec_ctx,
- grpc_workqueue *workqueue) {
- if (gpr_atm_full_fetch_add(&workqueue->state, -1) == 1) {
- workqueue_destroy(exec_ctx, workqueue);
- }
-}
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
- const char *reason) {
- if (workqueue == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p ref %d -> %d %s",
- workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count + 1,
- reason);
- gpr_ref(&workqueue->refs);
-}
-#else
-void grpc_workqueue_ref(grpc_workqueue *workqueue) {
- if (workqueue == NULL) return;
- gpr_ref(&workqueue->refs);
-}
-#endif
-
-#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- const char *file, int line, const char *reason) {
- if (workqueue == NULL) return;
- gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "WORKQUEUE:%p unref %d -> %d %s",
- workqueue, (int)workqueue->refs.count, (int)workqueue->refs.count - 1,
- reason);
- if (gpr_unref(&workqueue->refs)) {
- workqueue_orphan(exec_ctx, workqueue);
- }
-}
-#else
-void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- if (workqueue == NULL) return;
- if (gpr_unref(&workqueue->refs)) {
- workqueue_orphan(exec_ctx, workqueue);
- }
-}
-#endif
-
-static void drain(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- abort();
-}
-
-static void wakeup(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
- GPR_TIMER_MARK("workqueue.wakeup", 0);
- grpc_error *err = grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
- if (!GRPC_LOG_IF_ERROR("wakeupfd_wakeup", err)) {
- drain(exec_ctx, workqueue);
- }
-}
-
-static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.on_readable", 0);
-
- grpc_workqueue *workqueue = arg;
-
- if (error != GRPC_ERROR_NONE) {
- /* HACK: let wakeup_fd code know that we stole the fd */
- workqueue->wakeup_fd.read_fd = 0;
- grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
- grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
- GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0);
- gpr_free(workqueue);
- } else {
- error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
- gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue);
- if (error == GRPC_ERROR_NONE) {
- grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
- &workqueue->read_closure);
- } else {
- /* recurse to get error handling */
- on_readable(exec_ctx, arg, error);
- }
- if (n == NULL) {
- /* try again - queue in an inconsistant state */
- wakeup(exec_ctx, workqueue);
- } else {
- switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) {
- case 3: // had one count, one unorphaned --> done, unorphaned
- break;
- case 2: // had one count, one orphaned --> done, orphaned
- workqueue_destroy(exec_ctx, workqueue);
- break;
- case 1:
- case 0:
- // these values are illegal - representing an already done or
- // deleted workqueue
- GPR_UNREACHABLE_CODE(break);
- default:
- // schedule a wakeup since there's more to do
- wakeup(exec_ctx, workqueue);
- }
- grpc_closure *cl = (grpc_closure *)n;
- grpc_error *clerr = cl->error;
- cl->cb(exec_ctx, cl->cb_arg, clerr);
- GRPC_ERROR_UNREF(clerr);
- }
- }
-
- GPR_TIMER_END("workqueue.on_readable", 0);
-}
-
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
- grpc_closure *closure, grpc_error *error) {
- GPR_TIMER_BEGIN("workqueue.enqueue", 0);
- gpr_atm last = gpr_atm_full_fetch_add(&workqueue->state, 2);
- GPR_ASSERT(last & 1);
- closure->error = error;
- gpr_mpscq_push(&workqueue->queue, &closure->next_data.atm_next);
- if (last == 1) {
- wakeup(exec_ctx, workqueue);
- }
- GPR_TIMER_END("workqueue.enqueue", 0);
-}
-
-#endif /* GPR_POSIX_SOCKET */
diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c
new file mode 100644
index 0000000000..e58ca476cc
--- /dev/null
+++ b/src/core/lib/iomgr/workqueue_uv.c
@@ -0,0 +1,66 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_UV
+
+#include "src/core/lib/iomgr/workqueue.h"
+
+// Minimal implementation of grpc_workqueue for libuv
+// Works by directly enqueuing workqueue items onto the current execution
+// context, which is at least correct, if not performant or in the spirit of
+// workqueues.
+
+void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
+
+#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason) {
+ return workqueue;
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ const char *file, int line, const char *reason) {}
+#else
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
+void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
+#endif
+
+void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
+ grpc_closure *closure, grpc_error *error) {
+ grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+}
+
+#endif /* GPR_UV */
diff --git a/src/core/lib/iomgr/workqueue_uv.h b/src/core/lib/iomgr/workqueue_uv.h
new file mode 100644
index 0000000000..be3f8e4d93
--- /dev/null
+++ b/src/core/lib/iomgr/workqueue_uv.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
+#define GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H
+
+#endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_UV_H */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
index ee81dc248e..5c93d3c59e 100644
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ b/src/core/lib/iomgr/workqueue_windows.c
@@ -43,12 +43,16 @@
// workqueues.
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
-void grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file, int line,
- const char *reason) {}
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue, const char *file,
+ int line, const char *reason) {
+ return workqueue;
+}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
const char *file, int line, const char *reason) {}
#else
-void grpc_workqueue_ref(grpc_workqueue *workqueue) {}
+grpc_workqueue *grpc_workqueue_ref(grpc_workqueue *workqueue) {
+ return workqueue;
+}
void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
#endif
diff --git a/src/core/lib/profiling/basic_timers.c b/src/core/lib/profiling/basic_timers.c
index 51813d0461..bdf9af2339 100644
--- a/src/core/lib/profiling/basic_timers.c
+++ b/src/core/lib/profiling/basic_timers.c
@@ -83,6 +83,7 @@ static int g_shutdown;
static gpr_thd_id g_writing_thread;
static __thread int g_thread_id;
static int g_next_thread_id;
+static int g_writing_enabled = 1;
static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
if (list->head == NULL) {
@@ -177,7 +178,7 @@ static void flush_logs(gpr_timer_log_list *list) {
}
}
-static void finish_writing() {
+static void finish_writing(void) {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
@@ -230,6 +231,10 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
+ if (!g_writing_enabled) {
+ return;
+ }
+
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
@@ -261,6 +266,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
gpr_timers_log_add(tagstr, END, important, file, line);
}
+void gpr_timer_set_enabled(int enabled) { g_writing_enabled = enabled; }
+
/* Basic profiler specific API functions. */
void gpr_timers_global_init(void) {}
@@ -272,4 +279,6 @@ void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char *filename) {}
+
+void gpr_timer_set_enabled(int enabled) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/lib/profiling/timers.h b/src/core/lib/profiling/timers.h
index c8567e8137..621cdbf656 100644
--- a/src/core/lib/profiling/timers.h
+++ b/src/core/lib/profiling/timers.h
@@ -50,6 +50,8 @@ void gpr_timer_end(const char *tagstr, int important, const char *file,
void gpr_timers_set_log_filename(const char *filename);
+void gpr_timer_set_enabled(int enabled);
+
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
diff --git a/src/core/lib/security/credentials/google_default/credentials_posix.c b/src/core/lib/security/credentials/google_default/credentials_generic.c
index 42c9d7f997..d13d8c5200 100644
--- a/src/core/lib/security/credentials/google_default/credentials_posix.c
+++ b/src/core/lib/security/credentials/google_default/credentials_generic.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,6 @@
*
*/
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_POSIX_FILE
-
#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
#include <grpc/support/alloc.h>
@@ -46,16 +42,13 @@
char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
- char *home = gpr_getenv("HOME");
- if (home == NULL) {
- gpr_log(GPR_ERROR, "Could not get HOME environment variable.");
+ char *base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR);
+ if (base == NULL) {
+ gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_ENV_VAR
+ " environment variable.");
return NULL;
}
- gpr_asprintf(&result, "%s/.config/%s/%s", home,
- GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY,
- GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE);
- gpr_free(home);
+ gpr_asprintf(&result, "%s/%s", base, GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX);
+ gpr_free(base);
return result;
}
-
-#endif /* GPR_POSIX_FILE */
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index 312a3d4f90..cb5ba554b0 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -124,11 +124,14 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get(
- &exec_ctx, &context, &detector.pollent, &request,
+ &exec_ctx, &context, &detector.pollent, resource_quota, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
grpc_closure_create(on_compute_engine_detection_http_response, &detector),
&detector.response);
+ grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
grpc_exec_ctx_flush(&exec_ctx);
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index fac4377e2c..b55546ded0 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -34,12 +34,26 @@
#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H
#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/security/credentials/credentials.h"
#define GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY "gcloud"
#define GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE \
"application_default_credentials.json"
+#ifdef GPR_WINDOWS
+#define GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR "APPDATA"
+#define GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX \
+ GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY \
+ "/" GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE
+#else
+#define GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR "HOME"
+#define GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX \
+ ".config/" GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY \
+ "/" GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE
+#endif
+
void grpc_flush_cached_google_default_credentials(void);
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index 73eb2e3258..43eb642515 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -657,11 +657,17 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
*(req.host + (req.http.path - jwks_uri)) = '\0';
}
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
grpc_closure_create(on_keys_retrieved, ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
grpc_json_destroy(json);
gpr_free(req.host);
return;
@@ -764,10 +770,16 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
rsp_idx = HTTP_RESPONSE_OPENID;
}
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, &req,
+ exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
http_cb, &ctx->responses[rsp_idx]);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(req.host);
gpr_free(req.http.path);
return;
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index c22ea5c468..d980577c46 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -307,9 +307,15 @@ static void compute_engine_fetch_oauth2(
request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
- grpc_httpcli_get(exec_ctx, httpcli_context, pollent, &request, deadline,
- grpc_closure_create(response_cb, metadata_req),
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("oauth2_credentials");
+ grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
+ deadline, grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
}
grpc_call_credentials *grpc_google_compute_engine_credentials_create(
@@ -357,10 +363,16 @@ static void refresh_token_fetch_oauth2(
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
- grpc_httpcli_post(exec_ctx, httpcli_context, pollent, &request, body,
- strlen(body), deadline,
+ /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
+ channel. This would allow us to cancel an authentication query when under
+ extreme memory pressure. */
+ grpc_resource_quota *resource_quota =
+ grpc_resource_quota_create("oauth2_credentials_refresh");
+ grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
+ &request, body, strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
+ grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
gpr_free(body);
}
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index acb0113ea8..3924997d31 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -370,6 +370,12 @@ static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_workqueue(ep->wrapped_ep);
}
+static grpc_resource_user *endpoint_get_resource_user(
+ grpc_endpoint *secure_ep) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ return grpc_endpoint_get_resource_user(ep->wrapped_ep);
+}
+
static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_get_workqueue,
@@ -377,6 +383,7 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_add_to_pollset_set,
endpoint_shutdown,
endpoint_destroy,
+ endpoint_get_resource_user,
endpoint_get_peer};
grpc_endpoint *grpc_secure_endpoint_create(
diff --git a/src/core/lib/support/log.c b/src/core/lib/support/log.c
index 899f1218b6..af1651dae5 100644
--- a/src/core/lib/support/log.c
+++ b/src/core/lib/support/log.c
@@ -60,8 +60,9 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
void gpr_log_message(const char *file, int line, gpr_log_severity severity,
const char *message) {
- if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print))
+ if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print)) {
return;
+ }
gpr_log_func_args lfargs;
memset(&lfargs, 0, sizeof(lfargs));
@@ -82,11 +83,11 @@ void gpr_log_verbosity_init() {
gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR;
if (verbosity != NULL) {
- if (strcmp(verbosity, "DEBUG") == 0) {
+ if (gpr_stricmp(verbosity, "DEBUG") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG;
- } else if (strcmp(verbosity, "INFO") == 0) {
+ } else if (gpr_stricmp(verbosity, "INFO") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO;
- } else if (strcmp(verbosity, "ERROR") == 0) {
+ } else if (gpr_stricmp(verbosity, "ERROR") == 0) {
min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR;
}
gpr_free(verbosity);
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index 30c1e67647..d17fb9da4b 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -304,3 +304,14 @@ void gpr_strvec_add(gpr_strvec *sv, char *str) {
char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
}
+
+int gpr_stricmp(const char *a, const char *b) {
+ int ca, cb;
+ do {
+ ca = tolower(*a);
+ cb = tolower(*b);
+ ++a;
+ ++b;
+ } while (ca == cb && ca && cb);
+ return ca - cb;
+}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index 2b6bb3eec6..9a94e9471c 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -118,6 +118,10 @@ void gpr_strvec_add(gpr_strvec *strs, char *add);
total_length as per gpr_strjoin */
char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
+/** Case insensitive string comparison... return <0 if lower(a)<lower(b), ==0 if
+ lower(a)==lower(b), >0 if lower(a)>lower(b) */
+int gpr_stricmp(const char *a, const char *b);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/support/thd.c b/src/core/lib/support/thd.c
index 41daeb5d0e..40f53a18e5 100644
--- a/src/core/lib/support/thd.c
+++ b/src/core/lib/support/thd.c
@@ -33,7 +33,7 @@
/* Posix implementation for gpr threads. */
-#include <memory.h>
+#include <string.h>
#include <grpc/support/thd.h>
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index b0f66f4f61..6c25952c0a 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -30,6 +30,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
+
#include <assert.h>
#include <limits.h>
#include <stdio.h>
@@ -223,33 +224,37 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
-grpc_call *grpc_call_create(
- grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
- grpc_completion_queue *cq, grpc_pollset_set *pollset_set_alternative,
- const void *server_transport_data, grpc_mdelem **add_initial_metadata,
- size_t add_initial_metadata_count, gpr_timespec send_deadline) {
+grpc_error *grpc_call_create(const grpc_call_create_args *args,
+ grpc_call **out_call) {
size_t i, j;
- grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
+ grpc_channel_stack *channel_stack =
+ grpc_channel_get_channel_stack(args->channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ *out_call = call;
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
- call->channel = channel;
- call->cq = cq;
- call->parent = parent_call;
+ call->channel = args->channel;
+ call->cq = args->cq;
+ call->parent = args->parent_call;
/* Always support no compression */
GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
- call->is_client = server_transport_data == NULL;
+ call->is_client = args->server_transport_data == NULL;
+ grpc_mdstr *path = NULL;
if (call->is_client) {
- GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
- for (i = 0; i < add_initial_metadata_count; i++) {
- call->send_extra_metadata[i].md = add_initial_metadata[i];
+ GPR_ASSERT(args->add_initial_metadata_count <
+ MAX_SEND_EXTRA_METADATA_COUNT);
+ for (i = 0; i < args->add_initial_metadata_count; i++) {
+ call->send_extra_metadata[i].md = args->add_initial_metadata[i];
+ if (args->add_initial_metadata[i]->key == GRPC_MDSTR_PATH) {
+ path = GRPC_MDSTR_REF(args->add_initial_metadata[i]->value);
+ }
}
- call->send_extra_metadata_count = (int)add_initial_metadata_count;
+ call->send_extra_metadata_count = (int)args->add_initial_metadata_count;
} else {
- GPR_ASSERT(add_initial_metadata_count == 0);
+ GPR_ASSERT(args->add_initial_metadata_count == 0);
call->send_extra_metadata_count = 0;
}
for (i = 0; i < 2; i++) {
@@ -257,84 +262,87 @@ grpc_call *grpc_call_create(
call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
}
}
- send_deadline = gpr_convert_clock_type(send_deadline, GPR_CLOCK_MONOTONIC);
+ gpr_timespec send_deadline =
+ gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
- if (parent_call != NULL) {
- GRPC_CALL_INTERNAL_REF(parent_call, "child");
+ if (args->parent_call != NULL) {
+ GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
GPR_ASSERT(call->is_client);
- GPR_ASSERT(!parent_call->is_client);
+ GPR_ASSERT(!args->parent_call->is_client);
- gpr_mu_lock(&parent_call->mu);
+ gpr_mu_lock(&args->parent_call->mu);
- if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
+ if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
send_deadline = gpr_time_min(
gpr_convert_clock_type(send_deadline,
- parent_call->send_deadline.clock_type),
- parent_call->send_deadline);
+ args->parent_call->send_deadline.clock_type),
+ args->parent_call->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
/* TODO(ctiller): This should change to use the appropriate census start_op
* call. */
- if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
- GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
- grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
- parent_call->context[GRPC_CONTEXT_TRACING].value,
- NULL);
+ if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
+ GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ grpc_call_context_set(
+ call, GRPC_CONTEXT_TRACING,
+ args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
} else {
- GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
+ GPR_ASSERT(args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
}
- if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
+ if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
}
- if (parent_call->first_child == NULL) {
- parent_call->first_child = call;
+ if (args->parent_call->first_child == NULL) {
+ args->parent_call->first_child = call;
call->sibling_next = call->sibling_prev = call;
} else {
- call->sibling_next = parent_call->first_child;
- call->sibling_prev = parent_call->first_child->sibling_prev;
+ call->sibling_next = args->parent_call->first_child;
+ call->sibling_prev = args->parent_call->first_child->sibling_prev;
call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
call;
}
- gpr_mu_unlock(&parent_call->mu);
+ gpr_mu_unlock(&args->parent_call->mu);
}
call->send_deadline = send_deadline;
- GRPC_CHANNEL_INTERNAL_REF(channel, "call");
+ GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
- grpc_error *error = grpc_call_stack_init(
- &exec_ctx, channel_stack, 1, destroy_call, call, call->context,
- server_transport_data, send_deadline, CALL_STACK_FROM_CALL(call));
+ grpc_error *error =
+ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
+ call->context, args->server_transport_data, path,
+ send_deadline, CALL_STACK_FROM_CALL(call));
if (error != GRPC_ERROR_NONE) {
grpc_status_code status;
const char *error_str;
grpc_error_get_status(error, &status, &error_str);
close_with_status(&exec_ctx, call, status, error_str);
- GRPC_ERROR_UNREF(error);
}
- if (cq != NULL) {
+ if (args->cq != NULL) {
GPR_ASSERT(
- pollset_set_alternative == NULL &&
+ args->pollset_set_alternative == NULL &&
"Only one of 'cq' and 'pollset_set_alternative' should be non-NULL.");
- GRPC_CQ_INTERNAL_REF(cq, "bind");
+ GRPC_CQ_INTERNAL_REF(args->cq, "bind");
call->pollent =
- grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
+ grpc_polling_entity_create_from_pollset(grpc_cq_pollset(args->cq));
}
- if (pollset_set_alternative != NULL) {
- call->pollent =
- grpc_polling_entity_create_from_pollset_set(pollset_set_alternative);
+ if (args->pollset_set_alternative != NULL) {
+ call->pollent = grpc_polling_entity_create_from_pollset_set(
+ args->pollset_set_alternative);
}
if (!grpc_polling_entity_is_empty(&call->pollent)) {
grpc_call_stack_set_pollset_or_pollset_set(
&exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
}
+ if (path != NULL) GRPC_MDSTR_UNREF(path);
+
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_create", 0);
- return call;
+ return error;
}
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
@@ -1038,9 +1046,14 @@ static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
static void post_batch_completion(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
+ grpc_error *error = bctl->error;
+ if (bctl->recv_final_op) {
+ GRPC_ERROR_UNREF(error);
+ error = GRPC_ERROR_NONE;
+ }
if (bctl->is_notify_tag_closure) {
/* unrefs bctl->error */
- grpc_exec_ctx_sched(exec_ctx, bctl->notify_tag, bctl->error, NULL);
+ grpc_closure_run(exec_ctx, bctl->notify_tag, error);
gpr_mu_lock(&call->mu);
bctl->call->used_batches =
(uint8_t)(bctl->call->used_batches &
@@ -1049,7 +1062,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
- grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->error,
+ grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, error,
finish_batch_completion, bctl, &bctl->cq_completion);
}
}
@@ -1198,6 +1211,14 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
}
+static void add_batch_error(batch_control *bctl, grpc_error *error) {
+ if (error == GRPC_ERROR_NONE) return;
+ if (bctl->error == GRPC_ERROR_NONE) {
+ bctl->error = GRPC_ERROR_CREATE("Call batch operation failed");
+ }
+ bctl->error = grpc_error_add_child(bctl->error, error);
+}
+
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
batch_control *bctl = bctlp;
@@ -1205,9 +1226,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&call->mu);
- if (error != GRPC_ERROR_NONE) {
- bctl->error = GRPC_ERROR_REF(error);
- } else {
+ add_batch_error(bctl, GRPC_ERROR_REF(error));
+ if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
grpc_metadata_batch_filter(md, recv_initial_filter, call);
@@ -1304,8 +1324,7 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
- GRPC_ERROR_UNREF(bctl->error);
- bctl->error = GRPC_ERROR_REF(error);
+ add_batch_error(bctl, GRPC_ERROR_REF(error));
gpr_mu_unlock(&call->mu);
if (gpr_unref(&bctl->steps_to_complete)) {
post_batch_completion(exec_ctx, bctl);
@@ -1341,6 +1360,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *stream_op = &bctl->op;
memset(stream_op, 0, sizeof(*stream_op));
+ stream_op->covered_by_poller = true;
if (nops == 0) {
GRPC_CALL_INTERNAL_REF(call, "completion");
@@ -1496,8 +1516,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, STATUS_FROM_API_OVERRIDE,
GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
}
- set_status_code(call, STATUS_FROM_API_OVERRIDE,
- (uint32_t)op->data.send_status_from_server.status);
+ if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
+ set_status_code(call, STATUS_FROM_API_OVERRIDE,
+ (uint32_t)op->data.send_status_from_server.status);
+ }
if (!prepare_application_metadata(
call,
(int)op->data.send_status_from_server.trailing_metadata_count,
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 3a78fe3aa3..18af41b7fb 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -49,15 +49,29 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
grpc_call *call, int success,
void *user_data);
-grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask,
- grpc_completion_queue *cq,
- /* if not NULL, it'll be used in lieu of \a cq */
- grpc_pollset_set *pollset_set_alternative,
- const void *server_transport_data,
- grpc_mdelem **add_initial_metadata,
- size_t add_initial_metadata_count,
- gpr_timespec send_deadline);
+typedef struct grpc_call_create_args {
+ grpc_channel *channel;
+
+ grpc_call *parent_call;
+ uint32_t propagation_mask;
+
+ grpc_completion_queue *cq;
+ /* if not NULL, it'll be used in lieu of cq */
+ grpc_pollset_set *pollset_set_alternative;
+
+ const void *server_transport_data;
+
+ grpc_mdelem **add_initial_metadata;
+ size_t add_initial_metadata_count;
+
+ gpr_timespec send_deadline;
+} grpc_call_create_args;
+
+/* Create a new call based on \a args.
+ Regardless of success or failure, always returns a valid new call into *call
+ */
+grpc_error *grpc_call_create(const grpc_call_create_args *args,
+ grpc_call **call);
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index 6adb70a987..92d783b78d 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -193,9 +193,21 @@ static grpc_call *grpc_channel_create_call_internal(
send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority);
}
- return grpc_call_create(channel, parent_call, propagation_mask, cq,
- pollset_set_alternative, NULL, send_metadata,
- num_metadata, deadline);
+ grpc_call_create_args args;
+ memset(&args, 0, sizeof(args));
+ args.channel = channel;
+ args.parent_call = parent_call;
+ args.propagation_mask = propagation_mask;
+ args.cq = cq;
+ args.pollset_set_alternative = pollset_set_alternative;
+ args.server_transport_data = NULL;
+ args.add_initial_metadata = send_metadata;
+ args.add_initial_metadata_count = num_metadata;
+ args.send_deadline = deadline;
+
+ grpc_call *call;
+ GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
+ return call;
}
grpc_call *grpc_channel_create_call(grpc_channel *channel,
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 5978884db8..4e0feb56ac 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -39,6 +39,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/pollset.h"
@@ -50,6 +51,9 @@
#include "src/core/lib/surface/event_string.h"
int grpc_trace_operation_failures;
+#ifndef NDEBUG
+int grpc_trace_pending_tags;
+#endif
typedef struct {
grpc_pollset_worker **worker;
@@ -67,6 +71,9 @@ struct grpc_completion_queue {
gpr_refcount pending_events;
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
+ /** counter of how many things have ever been queued on this completion queue
+ useful for avoiding locks to check the queue */
+ gpr_atm things_queued_ever;
/** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
@@ -121,15 +128,6 @@ void grpc_cq_global_shutdown(void) {
}
}
-struct grpc_cq_alarm {
- grpc_timer alarm;
- grpc_cq_completion completion;
- /** completion queue where events about this alarm will be posted */
- grpc_completion_queue *cq;
- /** user supplied tag */
- void *tag;
-};
-
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc;
GPR_ASSERT(!reserved);
@@ -166,6 +164,7 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
cc->is_server_cq = 0;
cc->is_non_listening_server_cq = 0;
cc->num_pluckers = 0;
+ gpr_atm_no_barrier_store(&cc->things_queued_ever, 0);
#ifndef NDEBUG
cc->outstanding_tag_count = 0;
#endif
@@ -276,6 +275,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(found);
#endif
shutdown = gpr_unref(&cc->pending_events);
+ gpr_atm_no_barrier_fetch_add(&cc->things_queued_ever, 1);
if (!shutdown) {
cc->completed_tail->next =
((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@@ -313,13 +313,66 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GRPC_ERROR_UNREF(error);
}
+typedef struct {
+ gpr_atm last_seen_things_queued_ever;
+ grpc_completion_queue *cq;
+ gpr_timespec deadline;
+ grpc_cq_completion *stolen_completion;
+ void *tag; /* for pluck */
+ bool first_loop;
+} cq_is_finished_arg;
+
+static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
+ cq_is_finished_arg *a = arg;
+ grpc_completion_queue *cq = a->cq;
+ GPR_ASSERT(a->stolen_completion == NULL);
+ gpr_atm current_last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
+ gpr_mu_lock(cq->mu);
+ a->last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ if (cq->completed_tail != &cq->completed_head) {
+ a->stolen_completion = (grpc_cq_completion *)cq->completed_head.next;
+ cq->completed_head.next = a->stolen_completion->next & ~(uintptr_t)1;
+ if (a->stolen_completion == cq->completed_tail) {
+ cq->completed_tail = &cq->completed_head;
+ }
+ gpr_mu_unlock(cq->mu);
+ return true;
+ }
+ gpr_mu_unlock(cq->mu);
+ }
+ return !a->first_loop &&
+ gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+}
+
+#ifndef NDEBUG
+static void dump_pending_tags(grpc_completion_queue *cc) {
+ if (!grpc_trace_pending_tags) return;
+
+ gpr_strvec v;
+ gpr_strvec_init(&v);
+ gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
+ for (size_t i = 0; i < cc->outstanding_tag_count; i++) {
+ char *s;
+ gpr_asprintf(&s, " %p", cc->outstanding_tags[i]);
+ gpr_strvec_add(&v, s);
+ }
+ char *out = gpr_strvec_flatten(&v, NULL);
+ gpr_strvec_destroy(&v);
+ gpr_log(GPR_DEBUG, "%s", out);
+ gpr_free(out);
+}
+#else
+static void dump_pending_tags(grpc_completion_queue *cc) {}
+#endif
+
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
grpc_pollset_worker *worker = NULL;
- int first_loop = 1;
gpr_timespec now;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -333,11 +386,33 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
reserved));
GPR_ASSERT(!reserved);
+ dump_pending_tags(cc);
+
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(cc->mu);
+ cq_is_finished_arg is_finished_arg = {
+ .last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cc->things_queued_ever),
+ .cq = cc,
+ .deadline = deadline,
+ .stolen_completion = NULL,
+ .tag = NULL,
+ .first_loop = true};
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
+ cq_is_next_finished, &is_finished_arg);
for (;;) {
+ if (is_finished_arg.stolen_completion != NULL) {
+ gpr_mu_unlock(cc->mu);
+ grpc_cq_completion *c = is_finished_arg.stolen_completion;
+ is_finished_arg.stolen_completion = NULL;
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ break;
+ }
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
cc->completed_head.next = c->next & ~(uintptr_t)1;
@@ -358,13 +433,13 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
- first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@@ -387,13 +462,16 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
}
+ is_finished_arg.first_loop = false;
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_next", 0);
@@ -424,6 +502,37 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
GPR_UNREACHABLE_CODE(return );
}
+static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
+ cq_is_finished_arg *a = arg;
+ grpc_completion_queue *cq = a->cq;
+ GPR_ASSERT(a->stolen_completion == NULL);
+ gpr_atm current_last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
+ gpr_mu_lock(cq->mu);
+ a->last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cq->things_queued_ever);
+ grpc_cq_completion *c;
+ grpc_cq_completion *prev = &cq->completed_head;
+ while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
+ &cq->completed_head) {
+ if (c->tag == a->tag) {
+ prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
+ if (c == cq->completed_tail) {
+ cq->completed_tail = prev;
+ }
+ gpr_mu_unlock(cq->mu);
+ a->stolen_completion = c;
+ return true;
+ }
+ prev = c;
+ }
+ gpr_mu_unlock(cq->mu);
+ }
+ return !a->first_loop &&
+ gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+}
+
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline, void *reserved) {
grpc_event ret;
@@ -431,8 +540,6 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
gpr_timespec now;
- int first_loop = 1;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -448,11 +555,33 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
}
GPR_ASSERT(!reserved);
+ dump_pending_tags(cc);
+
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(cc->mu);
+ cq_is_finished_arg is_finished_arg = {
+ .last_seen_things_queued_ever =
+ gpr_atm_no_barrier_load(&cc->things_queued_ever),
+ .cq = cc,
+ .deadline = deadline,
+ .stolen_completion = NULL,
+ .tag = tag,
+ .first_loop = true};
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(
+ cq_is_pluck_finished, &is_finished_arg);
for (;;) {
+ if (is_finished_arg.stolen_completion != NULL) {
+ gpr_mu_unlock(cc->mu);
+ c = is_finished_arg.stolen_completion;
+ is_finished_arg.stolen_completion = NULL;
+ ret.type = GRPC_OP_COMPLETE;
+ ret.success = c->next & 1u;
+ ret.tag = c->tag;
+ c->done(&exec_ctx, c->done_arg, c);
+ break;
+ }
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
&cc->completed_head) {
@@ -485,17 +614,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
- first_loop = 0;
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
@@ -518,15 +648,18 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
GRPC_ERROR_UNREF(err);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
+ dump_pending_tags(cc);
break;
}
}
+ is_finished_arg.first_loop = false;
del_plucker(cc, tag, &worker);
}
done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
+ GPR_ASSERT(is_finished_arg.stolen_completion == NULL);
GPR_TIMER_END("grpc_completion_queue_pluck", 0);
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index 4dbf3aae63..c1cafba5f2 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -44,6 +44,9 @@
extern int grpc_cq_pluck_trace;
extern int grpc_cq_event_timeout_trace;
extern int grpc_trace_operation_failures;
+#ifndef NDEBUG
+extern int grpc_trace_pending_tags;
+#endif
typedef struct grpc_cq_completion {
/** user supplied tag */
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c
index 289f4ce8e8..7903f57a68 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.c
@@ -52,6 +52,7 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -184,12 +185,17 @@ void grpc_init(void) {
grpc_register_tracer("compression", &grpc_compression_trace);
grpc_register_tracer("queue_pluck", &grpc_cq_pluck_trace);
grpc_register_tracer("combiner", &grpc_combiner_trace);
+ grpc_register_tracer("server_channel", &grpc_server_channel_trace);
// Default pluck trace to 1
grpc_cq_pluck_trace = 1;
grpc_register_tracer("queue_timeout", &grpc_cq_event_timeout_trace);
// Default timeout trace to 1
grpc_cq_event_timeout_trace = 1;
grpc_register_tracer("op_failure", &grpc_trace_operation_failures);
+ grpc_register_tracer("resource_quota", &grpc_resource_quota_trace);
+#ifndef NDEBUG
+ grpc_register_tracer("pending_tags", &grpc_trace_pending_tags);
+#endif
grpc_security_pre_init();
grpc_iomgr_init();
grpc_executor_init();
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 7300d79b9f..3a90308058 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -71,6 +71,8 @@ typedef struct registered_method registered_method;
typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
+int grpc_server_channel_trace = 0;
+
typedef struct requested_call {
requested_call_type type;
size_t cq_idx;
@@ -280,6 +282,7 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
grpc_channel_element *elem;
op->send_goaway = send_goaway;
+ op->set_accept_stream = true;
sc->slice = gpr_slice_from_copied_string("Server shutdown");
op->goaway_message = &sc->slice;
op->goaway_status = GRPC_STATUS_OK;
@@ -439,6 +442,13 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
chand->finish_destroy_channel_closure.cb_arg = chand;
+ if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(error);
+ gpr_log(GPR_INFO, "Disconnected client: %s", msg);
+ grpc_error_free_string(msg);
+ }
+ GRPC_ERROR_UNREF(error);
+
grpc_transport_op *op =
grpc_make_transport_op(&chand->finish_destroy_channel_closure);
op->set_accept_stream = true;
@@ -446,13 +456,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_channel_stack_element(
grpc_channel_get_channel_stack(chand->channel), 0),
op);
-
- if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
- gpr_log(GPR_INFO, "Disconnected client: %s", msg);
- grpc_error_free_string(msg);
- }
- GRPC_ERROR_UNREF(error);
}
static void cpstr(char **dest, size_t *capacity, grpc_mdstr *value) {
@@ -773,8 +776,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
}
- grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv_initial_metadata, error,
- NULL);
+ grpc_closure_run(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
static void server_mutate_op(grpc_call_element *elem,
@@ -829,11 +831,20 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
const void *transport_server_data) {
channel_data *chand = cd;
/* create a call */
- grpc_call *call = grpc_call_create(chand->channel, NULL, 0, NULL, NULL,
- transport_server_data, NULL, 0,
- gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_call_create_args args;
+ memset(&args, 0, sizeof(args));
+ args.channel = chand->channel;
+ args.server_transport_data = transport_server_data;
+ args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ grpc_call *call;
+ grpc_error *error = grpc_call_create(&args, &call);
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
+ if (error != GRPC_ERROR_NONE) {
+ got_initial_metadata(exec_ctx, elem, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
call_data *calld = elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index 551a40a4ff..a85d9f4964 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -40,6 +40,9 @@
extern const grpc_channel_filter grpc_server_top_filter;
+/** Lightweight tracing of server channel state */
+extern int grpc_server_channel_trace;
+
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
void grpc_server_add_listener(
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 68d05e3a85..fdb5307814 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -180,7 +180,8 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
*w->current = tracker->current_state;
tracker->watchers = w->next;
if (grpc_connectivity_state_trace) {
- gpr_log(GPR_DEBUG, "NOTIFY: %p", w->notify);
+ gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
+ w->notify);
}
grpc_exec_ctx_sched(exec_ctx, w->notify,
GRPC_ERROR_REF(tracker->current_error), NULL);
diff --git a/src/core/lib/transport/mdstr_hash_table.c b/src/core/lib/transport/mdstr_hash_table.c
new file mode 100644
index 0000000000..8e914c420b
--- /dev/null
+++ b/src/core/lib/transport/mdstr_hash_table.c
@@ -0,0 +1,157 @@
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "src/core/lib/transport/mdstr_hash_table.h"
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/transport/metadata.h"
+
+struct grpc_mdstr_hash_table {
+ gpr_refcount refs;
+ size_t num_entries;
+ size_t size;
+ grpc_mdstr_hash_table_entry* entries;
+};
+
+// Helper function for insert and get operations that performs quadratic
+// probing (https://en.wikipedia.org/wiki/Quadratic_probing).
+static size_t grpc_mdstr_hash_table_find_index(
+ const grpc_mdstr_hash_table* table, const grpc_mdstr* key,
+ bool find_empty) {
+ for (size_t i = 0; i < table->size; ++i) {
+ const size_t idx = (key->hash + i * i) % table->size;
+ if (table->entries[idx].key == NULL) return find_empty ? idx : table->size;
+ if (table->entries[idx].key == key) return idx;
+ }
+ return table->size; // Not found.
+}
+
+static void grpc_mdstr_hash_table_add(
+ grpc_mdstr_hash_table* table, grpc_mdstr* key, void* value,
+ const grpc_mdstr_hash_table_vtable* vtable) {
+ GPR_ASSERT(value != NULL);
+ const size_t idx =
+ grpc_mdstr_hash_table_find_index(table, key, true /* find_empty */);
+ GPR_ASSERT(idx != table->size); // Table should never be full.
+ grpc_mdstr_hash_table_entry* entry = &table->entries[idx];
+ entry->key = GRPC_MDSTR_REF(key);
+ entry->value = vtable->copy_value(value);
+ entry->vtable = vtable;
+}
+
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
+ size_t num_entries, grpc_mdstr_hash_table_entry* entries) {
+ grpc_mdstr_hash_table* table = gpr_malloc(sizeof(*table));
+ memset(table, 0, sizeof(*table));
+ gpr_ref_init(&table->refs, 1);
+ table->num_entries = num_entries;
+ // Quadratic probing gets best performance when the table is no more
+ // than half full.
+ table->size = num_entries * 2;
+ const size_t entry_size = sizeof(grpc_mdstr_hash_table_entry) * table->size;
+ table->entries = gpr_malloc(entry_size);
+ memset(table->entries, 0, entry_size);
+ for (size_t i = 0; i < num_entries; ++i) {
+ grpc_mdstr_hash_table_entry* entry = &entries[i];
+ grpc_mdstr_hash_table_add(table, entry->key, entry->value, entry->vtable);
+ }
+ return table;
+}
+
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table) {
+ if (table != NULL) gpr_ref(&table->refs);
+ return table;
+}
+
+int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table) {
+ if (table != NULL && gpr_unref(&table->refs)) {
+ for (size_t i = 0; i < table->size; ++i) {
+ grpc_mdstr_hash_table_entry* entry = &table->entries[i];
+ if (entry->key != NULL) {
+ GRPC_MDSTR_UNREF(entry->key);
+ entry->vtable->destroy_value(entry->value);
+ }
+ }
+ gpr_free(table->entries);
+ gpr_free(table);
+ return 1;
+ }
+ return 0;
+}
+
+size_t grpc_mdstr_hash_table_num_entries(const grpc_mdstr_hash_table* table) {
+ return table->num_entries;
+}
+
+void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* key) {
+ const size_t idx =
+ grpc_mdstr_hash_table_find_index(table, key, false /* find_empty */);
+ if (idx == table->size) return NULL; // Not found.
+ return table->entries[idx].value;
+}
+
+int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
+ const grpc_mdstr_hash_table* table2) {
+ // Compare by num_entries.
+ if (table1->num_entries < table2->num_entries) return -1;
+ if (table1->num_entries > table2->num_entries) return 1;
+ for (size_t i = 0; i < table1->num_entries; ++i) {
+ grpc_mdstr_hash_table_entry* e1 = &table1->entries[i];
+ grpc_mdstr_hash_table_entry* e2 = &table2->entries[i];
+ // Compare keys by hash value.
+ if (e1->key->hash < e2->key->hash) return -1;
+ if (e1->key->hash > e2->key->hash) return 1;
+ // Compare by vtable (pointer equality).
+ if (e1->vtable < e2->vtable) return -1;
+ if (e1->vtable > e2->vtable) return 1;
+ // Compare values via vtable.
+ const int value_result = e1->vtable->compare_value(e1->value, e2->value);
+ if (value_result != 0) return value_result;
+ }
+ return 0;
+}
+
+void grpc_mdstr_hash_table_iterate(
+ const grpc_mdstr_hash_table* table,
+ void (*func)(const grpc_mdstr_hash_table_entry* entry, void* user_data),
+ void* user_data) {
+ for (size_t i = 0; i < table->size; ++i) {
+ if (table->entries[i].key != NULL) {
+ func(&table->entries[i], user_data);
+ }
+ }
+}
diff --git a/src/core/lib/transport/mdstr_hash_table.h b/src/core/lib/transport/mdstr_hash_table.h
new file mode 100644
index 0000000000..bceb4df93d
--- /dev/null
+++ b/src/core/lib/transport/mdstr_hash_table.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
+#define GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H
+
+#include "src/core/lib/transport/metadata.h"
+
+/** Hash table implementation.
+ *
+ * This implementation uses open addressing
+ * (https://en.wikipedia.org/wiki/Open_addressing) with quadratic
+ * probing (https://en.wikipedia.org/wiki/Quadratic_probing).
+ *
+ * The keys are \a grpc_mdstr objects. The values are arbitrary pointers
+ * with a common vtable.
+ *
+ * Hash tables are intentionally immutable, to avoid the need for locking.
+ */
+
+typedef struct grpc_mdstr_hash_table grpc_mdstr_hash_table;
+
+typedef struct grpc_mdstr_hash_table_vtable {
+ void (*destroy_value)(void* value);
+ void* (*copy_value)(void* value);
+ int (*compare_value)(void* value1, void* value2);
+} grpc_mdstr_hash_table_vtable;
+
+typedef struct grpc_mdstr_hash_table_entry {
+ grpc_mdstr* key;
+ void* value; /* Must not be NULL. */
+ const grpc_mdstr_hash_table_vtable* vtable;
+} grpc_mdstr_hash_table_entry;
+
+/** Creates a new hash table of containing \a entries, which is an array
+ of length \a num_entries.
+ Creates its own copy of all keys and values from \a entries. */
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_create(
+ size_t num_entries, grpc_mdstr_hash_table_entry* entries);
+
+grpc_mdstr_hash_table* grpc_mdstr_hash_table_ref(grpc_mdstr_hash_table* table);
+/** Returns 1 when \a table is destroyed. */
+int grpc_mdstr_hash_table_unref(grpc_mdstr_hash_table* table);
+
+/** Returns the number of entries in \a table. */
+size_t grpc_mdstr_hash_table_num_entries(const grpc_mdstr_hash_table* table);
+
+/** Returns the value from \a table associated with \a key.
+ Returns NULL if \a key is not found. */
+void* grpc_mdstr_hash_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* key);
+
+/** Compares two hash tables.
+ The sort order is stable but undefined. */
+int grpc_mdstr_hash_table_cmp(const grpc_mdstr_hash_table* table1,
+ const grpc_mdstr_hash_table* table2);
+
+/** Iterates over the entries in \a table, calling \a func for each entry. */
+void grpc_mdstr_hash_table_iterate(
+ const grpc_mdstr_hash_table* table,
+ void (*func)(const grpc_mdstr_hash_table_entry* entry, void* user_data),
+ void* user_data);
+
+#endif /* GRPC_CORE_LIB_TRANSPORT_MDSTR_HASH_TABLE_H */
diff --git a/src/core/lib/transport/method_config.c b/src/core/lib/transport/method_config.c
new file mode 100644
index 0000000000..57d97700bf
--- /dev/null
+++ b/src/core/lib/transport/method_config.c
@@ -0,0 +1,340 @@
+//
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "src/core/lib/transport/method_config.h"
+
+#include <string.h>
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+
+#include "src/core/lib/transport/mdstr_hash_table.h"
+#include "src/core/lib/transport/metadata.h"
+
+//
+// grpc_method_config
+//
+
+// bool vtable
+
+static void* bool_copy(void* valuep) {
+ bool value = *(bool*)valuep;
+ bool* new_value = gpr_malloc(sizeof(bool));
+ *new_value = value;
+ return new_value;
+}
+
+static int bool_cmp(void* v1, void* v2) {
+ bool b1 = *(bool*)v1;
+ bool b2 = *(bool*)v2;
+ if (!b1 && b2) return -1;
+ if (b1 && !b2) return 1;
+ return 0;
+}
+
+static grpc_mdstr_hash_table_vtable bool_vtable = {gpr_free, bool_copy,
+ bool_cmp};
+
+// timespec vtable
+
+static void* timespec_copy(void* valuep) {
+ gpr_timespec value = *(gpr_timespec*)valuep;
+ gpr_timespec* new_value = gpr_malloc(sizeof(gpr_timespec));
+ *new_value = value;
+ return new_value;
+}
+
+static int timespec_cmp(void* v1, void* v2) {
+ return gpr_time_cmp(*(gpr_timespec*)v1, *(gpr_timespec*)v2);
+}
+
+static grpc_mdstr_hash_table_vtable timespec_vtable = {gpr_free, timespec_copy,
+ timespec_cmp};
+
+// int32 vtable
+
+static void* int32_copy(void* valuep) {
+ int32_t value = *(int32_t*)valuep;
+ int32_t* new_value = gpr_malloc(sizeof(int32_t));
+ *new_value = value;
+ return new_value;
+}
+
+static int int32_cmp(void* v1, void* v2) {
+ int32_t i1 = *(int32_t*)v1;
+ int32_t i2 = *(int32_t*)v2;
+ if (i1 < i2) return -1;
+ if (i1 > i2) return 1;
+ return 0;
+}
+
+static grpc_mdstr_hash_table_vtable int32_vtable = {gpr_free, int32_copy,
+ int32_cmp};
+
+// Hash table keys.
+#define GRPC_METHOD_CONFIG_WAIT_FOR_READY "grpc.wait_for_ready" // bool
+#define GRPC_METHOD_CONFIG_TIMEOUT "grpc.timeout" // gpr_timespec
+#define GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES \
+ "grpc.max_request_message_bytes" // int32
+#define GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES \
+ "grpc.max_response_message_bytes" // int32
+
+struct grpc_method_config {
+ grpc_mdstr_hash_table* table;
+ grpc_mdstr* wait_for_ready_key;
+ grpc_mdstr* timeout_key;
+ grpc_mdstr* max_request_message_bytes_key;
+ grpc_mdstr* max_response_message_bytes_key;
+};
+
+grpc_method_config* grpc_method_config_create(
+ bool* wait_for_ready, gpr_timespec* timeout,
+ int32_t* max_request_message_bytes, int32_t* max_response_message_bytes) {
+ grpc_method_config* method_config = gpr_malloc(sizeof(grpc_method_config));
+ memset(method_config, 0, sizeof(grpc_method_config));
+ method_config->wait_for_ready_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_WAIT_FOR_READY);
+ method_config->timeout_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_TIMEOUT);
+ method_config->max_request_message_bytes_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_REQUEST_MESSAGE_BYTES);
+ method_config->max_response_message_bytes_key =
+ grpc_mdstr_from_string(GRPC_METHOD_CONFIG_MAX_RESPONSE_MESSAGE_BYTES);
+ grpc_mdstr_hash_table_entry entries[4];
+ size_t num_entries = 0;
+ if (wait_for_ready != NULL) {
+ entries[num_entries].key = method_config->wait_for_ready_key;
+ entries[num_entries].value = wait_for_ready;
+ entries[num_entries].vtable = &bool_vtable;
+ ++num_entries;
+ }
+ if (timeout != NULL) {
+ entries[num_entries].key = method_config->timeout_key;
+ entries[num_entries].value = timeout;
+ entries[num_entries].vtable = &timespec_vtable;
+ ++num_entries;
+ }
+ if (max_request_message_bytes != NULL) {
+ entries[num_entries].key = method_config->max_request_message_bytes_key;
+ entries[num_entries].value = max_request_message_bytes;
+ entries[num_entries].vtable = &int32_vtable;
+ ++num_entries;
+ }
+ if (max_response_message_bytes != NULL) {
+ entries[num_entries].key = method_config->max_response_message_bytes_key;
+ entries[num_entries].value = max_response_message_bytes;
+ entries[num_entries].vtable = &int32_vtable;
+ ++num_entries;
+ }
+ method_config->table = grpc_mdstr_hash_table_create(num_entries, entries);
+ return method_config;
+}
+
+grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config) {
+ grpc_mdstr_hash_table_ref(method_config->table);
+ return method_config;
+}
+
+void grpc_method_config_unref(grpc_method_config* method_config) {
+ if (grpc_mdstr_hash_table_unref(method_config->table)) {
+ GRPC_MDSTR_UNREF(method_config->wait_for_ready_key);
+ GRPC_MDSTR_UNREF(method_config->timeout_key);
+ GRPC_MDSTR_UNREF(method_config->max_request_message_bytes_key);
+ GRPC_MDSTR_UNREF(method_config->max_response_message_bytes_key);
+ gpr_free(method_config);
+ }
+}
+
+int grpc_method_config_cmp(const grpc_method_config* method_config1,
+ const grpc_method_config* method_config2) {
+ return grpc_mdstr_hash_table_cmp(method_config1->table,
+ method_config2->table);
+}
+
+const bool* grpc_method_config_get_wait_for_ready(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(method_config->table,
+ method_config->wait_for_ready_key);
+}
+
+const gpr_timespec* grpc_method_config_get_timeout(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(method_config->table,
+ method_config->timeout_key);
+}
+
+const int32_t* grpc_method_config_get_max_request_message_bytes(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(
+ method_config->table, method_config->max_request_message_bytes_key);
+}
+
+const int32_t* grpc_method_config_get_max_response_message_bytes(
+ const grpc_method_config* method_config) {
+ return grpc_mdstr_hash_table_get(
+ method_config->table, method_config->max_response_message_bytes_key);
+}
+
+//
+// grpc_method_config_table
+//
+
+static void method_config_unref(void* valuep) {
+ grpc_method_config_unref(valuep);
+}
+
+static void* method_config_ref(void* valuep) {
+ return grpc_method_config_ref(valuep);
+}
+
+static int method_config_cmp(void* valuep1, void* valuep2) {
+ return grpc_method_config_cmp(valuep1, valuep2);
+}
+
+static const grpc_mdstr_hash_table_vtable method_config_table_vtable = {
+ method_config_unref, method_config_ref, method_config_cmp};
+
+grpc_method_config_table* grpc_method_config_table_create(
+ size_t num_entries, grpc_method_config_table_entry* entries) {
+ grpc_mdstr_hash_table_entry* hash_table_entries =
+ gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) * num_entries);
+ for (size_t i = 0; i < num_entries; ++i) {
+ hash_table_entries[i].key = entries[i].method_name;
+ hash_table_entries[i].value = entries[i].method_config;
+ hash_table_entries[i].vtable = &method_config_table_vtable;
+ }
+ grpc_method_config_table* method_config_table =
+ grpc_mdstr_hash_table_create(num_entries, hash_table_entries);
+ gpr_free(hash_table_entries);
+ return method_config_table;
+}
+
+grpc_method_config_table* grpc_method_config_table_ref(
+ grpc_method_config_table* table) {
+ return grpc_mdstr_hash_table_ref(table);
+}
+
+void grpc_method_config_table_unref(grpc_method_config_table* table) {
+ grpc_mdstr_hash_table_unref(table);
+}
+
+int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
+ const grpc_method_config_table* table2) {
+ return grpc_mdstr_hash_table_cmp(table1, table2);
+}
+
+void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* path) {
+ void* value = grpc_mdstr_hash_table_get(table, path);
+ // If we didn't find a match for the path, try looking for a wildcard
+ // entry (i.e., change "/service/method" to "/service/*").
+ if (value == NULL) {
+ const char* path_str = grpc_mdstr_as_c_string(path);
+ const char* sep = strrchr(path_str, '/') + 1;
+ const size_t len = (size_t)(sep - path_str);
+ char* buf = gpr_malloc(len + 2); // '*' and NUL
+ memcpy(buf, path_str, len);
+ buf[len] = '*';
+ buf[len + 1] = '\0';
+ grpc_mdstr* wildcard_path = grpc_mdstr_from_string(buf);
+ gpr_free(buf);
+ value = grpc_mdstr_hash_table_get(table, wildcard_path);
+ GRPC_MDSTR_UNREF(wildcard_path);
+ }
+ return value;
+}
+
+static void* copy_arg(void* p) { return grpc_method_config_table_ref(p); }
+
+static void destroy_arg(void* p) { grpc_method_config_table_unref(p); }
+
+static int cmp_arg(void* p1, void* p2) {
+ return grpc_method_config_table_cmp(p1, p2);
+}
+
+static grpc_arg_pointer_vtable arg_vtable = {copy_arg, destroy_arg, cmp_arg};
+
+grpc_arg grpc_method_config_table_create_channel_arg(
+ grpc_method_config_table* table) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_SERVICE_CONFIG;
+ arg.value.pointer.p = table;
+ arg.value.pointer.vtable = &arg_vtable;
+ return arg;
+}
+
+// State used by convert_entry() below.
+typedef struct conversion_state {
+ void* (*convert_value)(const grpc_method_config* method_config);
+ const grpc_mdstr_hash_table_vtable* vtable;
+ size_t num_entries;
+ grpc_mdstr_hash_table_entry* entries;
+} conversion_state;
+
+// A function to be passed to grpc_mdstr_hash_table_iterate() to create
+// a copy of the entries.
+static void convert_entry(const grpc_mdstr_hash_table_entry* entry,
+ void* user_data) {
+ conversion_state* state = user_data;
+ state->entries[state->num_entries].key = GRPC_MDSTR_REF(entry->key);
+ state->entries[state->num_entries].value = state->convert_value(entry->value);
+ state->entries[state->num_entries].vtable = state->vtable;
+ ++state->num_entries;
+}
+
+grpc_mdstr_hash_table* grpc_method_config_table_convert(
+ const grpc_method_config_table* table,
+ void* (*convert_value)(const grpc_method_config* method_config),
+ const grpc_mdstr_hash_table_vtable* vtable) {
+ // Create an array of the entries in the table with converted values.
+ conversion_state state;
+ state.convert_value = convert_value;
+ state.vtable = vtable;
+ state.num_entries = 0;
+ state.entries = gpr_malloc(sizeof(grpc_mdstr_hash_table_entry) *
+ grpc_mdstr_hash_table_num_entries(table));
+ grpc_mdstr_hash_table_iterate(table, convert_entry, &state);
+ // Create a new table based on the array we just constructed.
+ grpc_mdstr_hash_table* new_table =
+ grpc_mdstr_hash_table_create(state.num_entries, state.entries);
+ // Clean up the array.
+ for (size_t i = 0; i < state.num_entries; ++i) {
+ GRPC_MDSTR_UNREF(state.entries[i].key);
+ vtable->destroy_value(state.entries[i].value);
+ }
+ gpr_free(state.entries);
+ // Return the new table.
+ return new_table;
+}
diff --git a/src/core/lib/transport/method_config.h b/src/core/lib/transport/method_config.h
new file mode 100644
index 0000000000..58fedd9436
--- /dev/null
+++ b/src/core/lib/transport/method_config.h
@@ -0,0 +1,136 @@
+//
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
+#define GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H
+
+#include <stdbool.h>
+
+#include <grpc/impl/codegen/gpr_types.h>
+#include <grpc/impl/codegen/grpc_types.h>
+
+#include "src/core/lib/transport/mdstr_hash_table.h"
+#include "src/core/lib/transport/metadata.h"
+
+/// Per-method configuration.
+typedef struct grpc_method_config grpc_method_config;
+
+/// Creates a grpc_method_config with the specified parameters.
+/// Any parameter may be NULL to indicate that the value is unset.
+///
+/// \a wait_for_ready indicates whether the client should wait until the
+/// request deadline for the channel to become ready, even if there is a
+/// temporary failure before the deadline while attempting to connect.
+///
+/// \a timeout indicates the timeout for calls.
+///
+/// \a max_request_message_bytes and \a max_response_message_bytes
+/// indicate the maximum sizes of the request (checked when sending) and
+/// response (checked when receiving) messages.
+grpc_method_config* grpc_method_config_create(
+ bool* wait_for_ready, gpr_timespec* timeout,
+ int32_t* max_request_message_bytes, int32_t* max_response_message_bytes);
+
+grpc_method_config* grpc_method_config_ref(grpc_method_config* method_config);
+void grpc_method_config_unref(grpc_method_config* method_config);
+
+/// Compares two grpc_method_configs.
+/// The sort order is stable but undefined.
+int grpc_method_config_cmp(const grpc_method_config* method_config1,
+ const grpc_method_config* method_config2);
+
+/// These methods return NULL if the requested field is unset.
+/// The caller does NOT take ownership of the result.
+const bool* grpc_method_config_get_wait_for_ready(
+ const grpc_method_config* method_config);
+const gpr_timespec* grpc_method_config_get_timeout(
+ const grpc_method_config* method_config);
+const int32_t* grpc_method_config_get_max_request_message_bytes(
+ const grpc_method_config* method_config);
+const int32_t* grpc_method_config_get_max_response_message_bytes(
+ const grpc_method_config* method_config);
+
+/// A table of method configs.
+typedef grpc_mdstr_hash_table grpc_method_config_table;
+
+typedef struct grpc_method_config_table_entry {
+ /// The name is of one of the following forms:
+ /// service/method -- specifies exact service and method name
+ /// service/* -- matches all methods for the specified service
+ grpc_mdstr* method_name;
+ grpc_method_config* method_config;
+} grpc_method_config_table_entry;
+
+/// Takes new references to all keys and values in \a entries.
+grpc_method_config_table* grpc_method_config_table_create(
+ size_t num_entries, grpc_method_config_table_entry* entries);
+
+grpc_method_config_table* grpc_method_config_table_ref(
+ grpc_method_config_table* table);
+void grpc_method_config_table_unref(grpc_method_config_table* table);
+
+/// Compares two grpc_method_config_tables.
+/// The sort order is stable but undefined.
+int grpc_method_config_table_cmp(const grpc_method_config_table* table1,
+ const grpc_method_config_table* table2);
+
+/// Gets the method config for the specified \a path, which should be of
+/// the form "/service/method".
+/// Returns NULL if the method has no config.
+/// Caller does NOT own a reference to the result.
+///
+/// Note: This returns a void* instead of a grpc_method_config* so that
+/// it can also be used for tables constructed via
+/// grpc_method_config_table_convert().
+void* grpc_method_config_table_get(const grpc_mdstr_hash_table* table,
+ const grpc_mdstr* path);
+
+/// Returns a channel arg containing \a table.
+grpc_arg grpc_method_config_table_create_channel_arg(
+ grpc_method_config_table* table);
+
+/// Generates a new table from \a table whose values are converted to a
+/// new form via the \a convert_value function. The new table will use
+/// \a vtable for its values.
+///
+/// This is generally used to convert the table's value type from
+/// grpc_method_config to a simple struct containing only the parameters
+/// relevant to a particular filter, thus avoiding the need for a hash
+/// table lookup on the fast path. In that scenario, \a convert_value
+/// will return a new instance of the struct containing the values from
+/// the grpc_method_config, and \a vtable provides the methods for
+/// operating on the struct type.
+grpc_mdstr_hash_table* grpc_method_config_table_convert(
+ const grpc_method_config_table* table,
+ void* (*convert_value)(const grpc_method_config* method_config),
+ const grpc_mdstr_hash_table_vtable* vtable);
+
+#endif /* GRPC_CORE_LIB_TRANSPORT_METHOD_CONFIG_H */
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
index 5e0352a467..8b22592b45 100644
--- a/src/core/lib/transport/static_metadata.c
+++ b/src/core/lib/transport/static_metadata.c
@@ -126,9 +126,9 @@ const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {
"if-range",
"if-unmodified-since",
"last-modified",
+ "lb-cost-bin",
+ "lb-token",
"link",
- "load-reporting-initial",
- "load-reporting-trailing",
"location",
"max-forwards",
":method",
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 5b9ee1a60a..28ad6f2961 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -175,12 +175,12 @@ extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (&grpc_static_mdstr_table[62])
/* "last-modified" */
#define GRPC_MDSTR_LAST_MODIFIED (&grpc_static_mdstr_table[63])
+/* "lb-cost-bin" */
+#define GRPC_MDSTR_LB_COST_BIN (&grpc_static_mdstr_table[64])
+/* "lb-token" */
+#define GRPC_MDSTR_LB_TOKEN (&grpc_static_mdstr_table[65])
/* "link" */
-#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[64])
-/* "load-reporting-initial" */
-#define GRPC_MDSTR_LOAD_REPORTING_INITIAL (&grpc_static_mdstr_table[65])
-/* "load-reporting-trailing" */
-#define GRPC_MDSTR_LOAD_REPORTING_TRAILING (&grpc_static_mdstr_table[66])
+#define GRPC_MDSTR_LINK (&grpc_static_mdstr_table[66])
/* "location" */
#define GRPC_MDSTR_LOCATION (&grpc_static_mdstr_table[67])
/* "max-forwards" */
@@ -337,13 +337,12 @@ extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];
#define GRPC_MDELEM_IF_UNMODIFIED_SINCE_EMPTY (&grpc_static_mdelem_table[44])
/* "last-modified": "" */
#define GRPC_MDELEM_LAST_MODIFIED_EMPTY (&grpc_static_mdelem_table[45])
+/* "lb-cost-bin": "" */
+#define GRPC_MDELEM_LB_COST_BIN_EMPTY (&grpc_static_mdelem_table[46])
+/* "lb-token": "" */
+#define GRPC_MDELEM_LB_TOKEN_EMPTY (&grpc_static_mdelem_table[47])
/* "link": "" */
-#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[46])
-/* "load-reporting-initial": "" */
-#define GRPC_MDELEM_LOAD_REPORTING_INITIAL_EMPTY (&grpc_static_mdelem_table[47])
-/* "load-reporting-trailing": "" */
-#define GRPC_MDELEM_LOAD_REPORTING_TRAILING_EMPTY \
- (&grpc_static_mdelem_table[48])
+#define GRPC_MDELEM_LINK_EMPTY (&grpc_static_mdelem_table[48])
/* "location": "" */
#define GRPC_MDELEM_LOCATION_EMPTY (&grpc_static_mdelem_table[49])
/* "max-forwards": "" */
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 82fc605218..75aec7a5b4 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -46,8 +46,9 @@
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p REF %d->%d %s", refcount->object_type,
- refcount, refcount->destroy.cb_arg, (int)val, (int)val + 1, reason);
+ gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
@@ -58,8 +59,9 @@ void grpc_stream_ref(grpc_stream_refcount *refcount) {
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
const char *reason) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
- gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
- refcount, refcount->destroy.cb_arg, (int)val, (int)val - 1, reason);
+ gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
+ refcount->object_type, refcount, refcount->destroy.cb_arg, val,
+ val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
grpc_stream_refcount *refcount) {
@@ -274,3 +276,28 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
op->op.on_consumed = &op->outer_on_complete;
return &op->op;
}
+
+typedef struct {
+ grpc_closure outer_on_complete;
+ grpc_closure *inner_on_complete;
+ grpc_transport_stream_op op;
+} made_transport_stream_op;
+
+static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ made_transport_stream_op *op = arg;
+ grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
+ NULL);
+ gpr_free(op);
+}
+
+grpc_transport_stream_op *grpc_make_transport_stream_op(
+ grpc_closure *on_complete) {
+ made_transport_stream_op *op = gpr_malloc(sizeof(*op));
+ grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
+ op);
+ op->inner_on_complete = on_complete;
+ memset(&op->op, 0, sizeof(op->op));
+ op->op.on_complete = &op->outer_on_complete;
+ return &op->op;
+}
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 8dc393fd61..50253ebad1 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -113,6 +113,10 @@ typedef struct grpc_transport_stream_op {
have been completed. */
grpc_closure *on_complete;
+ /** Is the completion of this op covered by a poller (if false: the op should
+ complete independently of some pollset being polled) */
+ bool covered_by_poller;
+
/** Send initial metadata to the peer, from the provided metadata batch.
idempotent_request MUST be set if this is non-null */
grpc_metadata_batch *send_initial_metadata;
@@ -252,6 +256,7 @@ void grpc_transport_stream_op_add_close(grpc_transport_stream_op *op,
gpr_slice *optional_message);
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op);
+char *grpc_transport_op_string(grpc_transport_op *op);
/* Send a batch of operations on a transport
@@ -293,6 +298,10 @@ char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
+/* Allocate a grpc_transport_stream_op, and preconfigure the on_consumed closure
+ to \a on_consumed and then delete the returned transport op */
+grpc_transport_stream_op *grpc_make_transport_stream_op(
+ grpc_closure *on_consumed);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c
index 138591db2a..533ec52077 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.c
@@ -41,6 +41,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
+#include "src/core/lib/transport/connectivity_state.h"
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
@@ -72,56 +73,51 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
char *tmp;
char *out;
- int first = 1;
gpr_strvec b;
gpr_strvec_init(&b);
+ gpr_strvec_add(
+ &b, gpr_strdup(op->covered_by_poller ? "[COVERED]" : "[UNCOVERED]"));
+
if (op->send_initial_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_INITIAL_METADATA{"));
put_metadata_list(&b, *op->send_initial_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->send_message != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d",
op->send_message->flags, op->send_message->length);
gpr_strvec_add(&b, tmp);
}
if (op->send_trailing_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("SEND_TRAILING_METADATA{"));
put_metadata_list(&b, *op->send_trailing_metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
}
if (op->recv_initial_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_INITIAL_METADATA"));
}
if (op->recv_message != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_MESSAGE"));
}
if (op->recv_trailing_metadata != NULL) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
gpr_strvec_add(&b, gpr_strdup("RECV_TRAILING_METADATA"));
}
if (op->cancel_error != GRPC_ERROR_NONE) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
grpc_error_free_string(msg);
@@ -129,8 +125,7 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
}
if (op->close_error != GRPC_ERROR_NONE) {
- if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
- first = 0;
+ gpr_strvec_add(&b, gpr_strdup(" "));
const char *msg = grpc_error_string(op->close_error);
gpr_asprintf(&tmp, "CLOSE:%s", msg);
grpc_error_free_string(msg);
@@ -143,6 +138,82 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
return out;
}
+char *grpc_transport_op_string(grpc_transport_op *op) {
+ char *tmp;
+ char *out;
+ bool first = true;
+
+ gpr_strvec b;
+ gpr_strvec_init(&b);
+
+ if (op->on_connectivity_state_change != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ if (op->connectivity_state != NULL) {
+ gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:from=%s",
+ op->on_connectivity_state_change,
+ grpc_connectivity_state_name(*op->connectivity_state));
+ gpr_strvec_add(&b, tmp);
+ } else {
+ gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:unsubscribe",
+ op->on_connectivity_state_change);
+ gpr_strvec_add(&b, tmp);
+ }
+ }
+
+ if (op->disconnect_with_error != GRPC_ERROR_NONE) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ const char *err = grpc_error_string(op->disconnect_with_error);
+ gpr_asprintf(&tmp, "DISCONNECT:%s", err);
+ gpr_strvec_add(&b, tmp);
+ grpc_error_free_string(err);
+ }
+
+ if (op->send_goaway) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ char *msg = op->goaway_message == NULL
+ ? "null"
+ : gpr_dump_slice(*op->goaway_message,
+ GPR_DUMP_ASCII | GPR_DUMP_HEX);
+ gpr_asprintf(&tmp, "SEND_GOAWAY:status=%d:msg=%s", op->goaway_status, msg);
+ if (op->goaway_message != NULL) gpr_free(msg);
+ gpr_strvec_add(&b, tmp);
+ }
+
+ if (op->set_accept_stream) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_asprintf(&tmp, "SET_ACCEPT_STREAM:%p(%p,...)", op->set_accept_stream_fn,
+ op->set_accept_stream_user_data);
+ gpr_strvec_add(&b, tmp);
+ }
+
+ if (op->bind_pollset != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET"));
+ }
+
+ if (op->bind_pollset_set != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
+ }
+
+ if (op->send_ping != NULL) {
+ if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
+ first = false;
+ gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
+ }
+
+ out = gpr_strvec_flatten(&b, NULL);
+ gpr_strvec_destroy(&b);
+
+ return out;
+}
+
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_stream_op *op) {
char *str = grpc_transport_stream_op_string(op);
diff --git a/src/core/lib/tsi/ssl_transport_security.c b/src/core/lib/tsi/ssl_transport_security.c
index e91c6316e7..749b46e19f 100644
--- a/src/core/lib/tsi/ssl_transport_security.c
+++ b/src/core/lib/tsi/ssl_transport_security.c
@@ -31,6 +31,9 @@
*
*/
+#include "src/core/lib/iomgr/sockaddr.h"
+
+#include "src/core/lib/iomgr/socket_utils.h"
#include "src/core/lib/tsi/ssl_transport_security.h"
#include <grpc/support/port_platform.h>
@@ -38,13 +41,6 @@
#include <limits.h>
#include <string.h>
-/* TODO(jboeuf): refactor inet_ntop into a portability header. */
-#ifdef GPR_WINSOCK_SOCKET
-#include <ws2tcpip.h>
-#else
-#include <arpa/inet.h>
-#endif
-
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@@ -353,8 +349,8 @@ static tsi_result add_subject_alt_names_properties_to_peer(
result = TSI_INTERNAL_ERROR;
break;
}
- const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data,
- ntop_buf, INET6_ADDRSTRLEN);
+ const char *name = grpc_inet_ntop(af, subject_alt_name->d.iPAddress->data,
+ ntop_buf, INET6_ADDRSTRLEN);
if (name == NULL) {
gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet.");
result = TSI_INTERNAL_ERROR;
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
index d0b5f5c702..d339ed327f 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.c
@@ -35,12 +35,12 @@
extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_client_config_init(void);
-extern void grpc_client_config_shutdown(void);
+extern void grpc_client_channel_init(void);
+extern void grpc_client_channel_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
- grpc_register_plugin(grpc_client_config_init,
- grpc_client_config_shutdown);
+ grpc_register_plugin(grpc_client_channel_init,
+ grpc_client_channel_shutdown);
}
diff --git a/src/core/plugin_registry/grpc_plugin_registry.c b/src/core/plugin_registry/grpc_plugin_registry.c
index 7a7a9ce477..2efd9cd1ad 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_plugin_registry.c
@@ -35,8 +35,8 @@
extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_client_config_init(void);
-extern void grpc_client_config_shutdown(void);
+extern void grpc_client_channel_init(void);
+extern void grpc_client_channel_shutdown(void);
extern void grpc_lb_policy_grpclb_init(void);
extern void grpc_lb_policy_grpclb_shutdown(void);
extern void grpc_lb_policy_pick_first_init(void);
@@ -55,8 +55,8 @@ extern void census_grpc_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
- grpc_register_plugin(grpc_client_config_init,
- grpc_client_config_shutdown);
+ grpc_register_plugin(grpc_client_channel_init,
+ grpc_client_channel_shutdown);
grpc_register_plugin(grpc_lb_policy_grpclb_init,
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
index ad4ddf0ff4..8b18af699d 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
@@ -35,8 +35,8 @@
extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_client_config_init(void);
-extern void grpc_client_config_shutdown(void);
+extern void grpc_client_channel_init(void);
+extern void grpc_client_channel_shutdown(void);
extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
@@ -55,8 +55,8 @@ extern void census_grpc_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_chttp2_plugin_init,
grpc_chttp2_plugin_shutdown);
- grpc_register_plugin(grpc_client_config_init,
- grpc_client_config_shutdown);
+ grpc_register_plugin(grpc_client_channel_init,
+ grpc_client_channel_shutdown);
grpc_register_plugin(grpc_resolver_dns_native_init,
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
diff --git a/src/cpp/client/cronet_credentials.cc b/src/cpp/client/cronet_credentials.cc
new file mode 100644
index 0000000000..60cad097db
--- /dev/null
+++ b/src/cpp/client/cronet_credentials.cc
@@ -0,0 +1,69 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/security/credentials.h>
+
+#include <grpc++/channel.h>
+#include <grpc++/support/channel_arguments.h>
+#include <grpc/grpc_cronet.h>
+#include "src/cpp/client/create_channel_internal.h"
+
+namespace grpc {
+
+class CronetChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
+ public:
+ CronetChannelCredentialsImpl(void* engine) : engine_(engine) {}
+
+ std::shared_ptr<grpc::Channel> CreateChannel(
+ const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE {
+ grpc_channel_args channel_args;
+ args.SetChannelArgs(&channel_args);
+ return CreateChannelInternal(
+ "", grpc_cronet_secure_channel_create(engine_, target.c_str(),
+ &channel_args, nullptr));
+ }
+
+ SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE {
+ return nullptr;
+ }
+
+ private:
+ void* engine_;
+};
+
+std::shared_ptr<ChannelCredentials> CronetChannelCredentials(void* engine) {
+ return std::shared_ptr<ChannelCredentials>(
+ new CronetChannelCredentialsImpl(engine));
+}
+
+} // namespace grpc
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index f297ae8587..d136d49c89 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -34,6 +34,7 @@
#include <sstream>
+#include <grpc++/resource_quota.h>
#include <grpc/impl/codegen/grpc_types.h>
#include <grpc/support/log.h>
#include "src/core/lib/channel/channel_args.h"
@@ -113,6 +114,13 @@ void ChannelArguments::SetUserAgentPrefix(
}
}
+void ChannelArguments::SetResourceQuota(
+ const grpc::ResourceQuota& resource_quota) {
+ SetPointerWithVtable(GRPC_ARG_RESOURCE_QUOTA,
+ resource_quota.c_resource_quota(),
+ grpc_resource_quota_arg_vtable());
+}
+
void ChannelArguments::SetInt(const grpc::string& key, int value) {
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
@@ -127,12 +135,18 @@ void ChannelArguments::SetPointer(const grpc::string& key, void* value) {
static const grpc_arg_pointer_vtable vtable = {
&PointerVtableMembers::Copy, &PointerVtableMembers::Destroy,
&PointerVtableMembers::Compare};
+ SetPointerWithVtable(key, value, &vtable);
+}
+
+void ChannelArguments::SetPointerWithVtable(
+ const grpc::string& key, void* value,
+ const grpc_arg_pointer_vtable* vtable) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
strings_.push_back(key);
arg.key = const_cast<char*>(strings_.back().c_str());
arg.value.pointer.p = value;
- arg.value.pointer.vtable = &vtable;
+ arg.value.pointer.vtable = vtable;
args_.push_back(arg);
}
diff --git a/src/cpp/common/resource_quota_cc.cc b/src/cpp/common/resource_quota_cc.cc
new file mode 100644
index 0000000000..335896ab91
--- /dev/null
+++ b/src/cpp/common/resource_quota_cc.cc
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/resource_quota.h>
+#include <grpc/grpc.h>
+
+namespace grpc {
+
+ResourceQuota::ResourceQuota() : impl_(grpc_resource_quota_create(nullptr)) {}
+
+ResourceQuota::ResourceQuota(const grpc::string& name)
+ : impl_(grpc_resource_quota_create(name.c_str())) {}
+
+ResourceQuota::~ResourceQuota() { grpc_resource_quota_unref(impl_); }
+
+ResourceQuota& ResourceQuota::Resize(size_t new_size) {
+ grpc_resource_quota_resize(impl_, new_size);
+ return *this;
+}
+
+} // namespace grpc
diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc
index 2980b16c56..00a90bb184 100644
--- a/src/cpp/server/server_builder.cc
+++ b/src/cpp/server/server_builder.cc
@@ -34,7 +34,9 @@
#include <grpc++/server_builder.h>
#include <grpc++/impl/service_type.h>
+#include <grpc++/resource_quota.h>
#include <grpc++/server.h>
+#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
@@ -54,6 +56,8 @@ static void do_plugin_list_init(void) {
ServerBuilder::ServerBuilder()
: max_receive_message_size_(-1),
max_send_message_size_(-1),
+ sync_server_settings_(SyncServerSettings()),
+ resource_quota_(nullptr),
generic_service_(nullptr) {
gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
for (auto it = g_plugin_factory_list->begin();
@@ -61,6 +65,7 @@ ServerBuilder::ServerBuilder()
auto& factory = *it;
plugins_.emplace_back(factory());
}
+
// all compression algorithms enabled by default.
enabled_compression_algorithms_bitset_ =
(1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
@@ -70,6 +75,12 @@ ServerBuilder::ServerBuilder()
sizeof(maybe_default_compression_algorithm_));
}
+ServerBuilder::~ServerBuilder() {
+ if (resource_quota_ != nullptr) {
+ grpc_resource_quota_unref(resource_quota_);
+ }
+}
+
std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
bool is_frequently_polled) {
ServerCompletionQueue* cq = new ServerCompletionQueue(is_frequently_polled);
@@ -94,7 +105,7 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
gpr_log(GPR_ERROR,
"Adding multiple AsyncGenericService is unsupported for now. "
"Dropping the service %p",
- service);
+ (void*)service);
} else {
generic_service_ = service;
}
@@ -107,6 +118,25 @@ ServerBuilder& ServerBuilder::SetOption(
return *this;
}
+ServerBuilder& ServerBuilder::SetSyncServerOption(
+ ServerBuilder::SyncServerOption option, int val) {
+ switch (option) {
+ case NUM_CQS:
+ sync_server_settings_.num_cqs = val;
+ break;
+ case MIN_POLLERS:
+ sync_server_settings_.min_pollers = val;
+ break;
+ case MAX_POLLERS:
+ sync_server_settings_.max_pollers = val;
+ break;
+ case CQ_TIMEOUT_MSEC:
+ sync_server_settings_.cq_timeout_msec = val;
+ break;
+ }
+ return *this;
+}
+
ServerBuilder& ServerBuilder::SetCompressionAlgorithmSupportStatus(
grpc_compression_algorithm algorithm, bool enabled) {
if (enabled) {
@@ -130,6 +160,16 @@ ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm(
return *this;
}
+ServerBuilder& ServerBuilder::SetResourceQuota(
+ const grpc::ResourceQuota& resource_quota) {
+ if (resource_quota_ != nullptr) {
+ grpc_resource_quota_unref(resource_quota_);
+ }
+ resource_quota_ = resource_quota.c_resource_quota();
+ grpc_resource_quota_ref(resource_quota_);
+ return *this;
+}
+
ServerBuilder& ServerBuilder::AddListeningPort(
const grpc::string& addr, std::shared_ptr<ServerCredentials> creds,
int* selected_port) {
@@ -139,35 +179,24 @@ ServerBuilder& ServerBuilder::AddListeningPort(
}
std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
- std::unique_ptr<ThreadPoolInterface> thread_pool;
- bool has_sync_methods = false;
- for (auto it = services_.begin(); it != services_.end(); ++it) {
- if ((*it)->service->has_synchronous_methods()) {
- if (!thread_pool) {
- thread_pool.reset(CreateDefaultThreadPool());
- has_sync_methods = true;
- break;
- }
- }
- }
ChannelArguments args;
for (auto option = options_.begin(); option != options_.end(); ++option) {
(*option)->UpdateArguments(&args);
(*option)->UpdatePlugins(&plugins_);
}
+
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
- if (!thread_pool && (*plugin)->has_sync_methods()) {
- thread_pool.reset(CreateDefaultThreadPool());
- has_sync_methods = true;
- }
(*plugin)->UpdateChannelArguments(&args);
}
+
if (max_receive_message_size_ >= 0) {
args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, max_receive_message_size_);
}
+
if (max_send_message_size_ >= 0) {
args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, max_send_message_size_);
}
+
args.SetInt(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
enabled_compression_algorithms_bitset_);
if (maybe_default_compression_level_.is_set) {
@@ -178,27 +207,89 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
maybe_default_compression_algorithm_.algorithm);
}
- std::unique_ptr<Server> server(new Server(thread_pool.release(), true,
- max_receive_message_size_, &args));
+
+ if (resource_quota_ != nullptr) {
+ args.SetPointerWithVtable(GRPC_ARG_RESOURCE_QUOTA, resource_quota_,
+ grpc_resource_quota_arg_vtable());
+ }
+
+ // == Determine if the server has any syncrhonous methods ==
+ bool has_sync_methods = false;
+ for (auto it = services_.begin(); it != services_.end(); ++it) {
+ if ((*it)->service->has_synchronous_methods()) {
+ has_sync_methods = true;
+ break;
+ }
+ }
+
+ if (!has_sync_methods) {
+ for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
+ if ((*plugin)->has_sync_methods()) {
+ has_sync_methods = true;
+ break;
+ }
+ }
+ }
+
+ // If this is a Sync server, i.e a server expositing sync API, then the server
+ // needs to create some completion queues to listen for incoming requests.
+ // 'sync_server_cqs' are those internal completion queues.
+ //
+ // This is different from the completion queues added to the server via
+ // ServerBuilder's AddCompletionQueue() method (those completion queues
+ // are in 'cqs_' member variable of ServerBuilder object)
+ std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
+ sync_server_cqs(std::make_shared<
+ std::vector<std::unique_ptr<ServerCompletionQueue>>>());
+
+ if (has_sync_methods) {
+ // This is a Sync server
+ gpr_log(GPR_INFO,
+ "Synchronous server. Num CQs: %d, Min pollers: %d, Max Pollers: "
+ "%d, CQ timeout (msec): %d",
+ sync_server_settings_.num_cqs, sync_server_settings_.min_pollers,
+ sync_server_settings_.max_pollers,
+ sync_server_settings_.cq_timeout_msec);
+
+ // Create completion queues to listen to incoming rpc requests
+ for (int i = 0; i < sync_server_settings_.num_cqs; i++) {
+ sync_server_cqs->emplace_back(new ServerCompletionQueue());
+ }
+ }
+
+ std::unique_ptr<Server> server(new Server(
+ max_receive_message_size_, &args, sync_server_cqs,
+ sync_server_settings_.min_pollers, sync_server_settings_.max_pollers,
+ sync_server_settings_.cq_timeout_msec));
+
ServerInitializer* initializer = server->initializer();
- // If the server has atleast one sync methods, we know that this is a Sync
- // server or a Hybrid server and the completion queue (server->cq_) would be
- // frequently polled.
- int num_frequently_polled_cqs = has_sync_methods ? 1 : 0;
-
- for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) {
- // A completion queue that is not polled frequently (by calling Next() or
- // AsyncNext()) is not safe to use for listening to incoming channels.
- // Register all such completion queues as non-listening completion queues
- // with the GRPC core library.
- if ((*cq)->IsFrequentlyPolled()) {
- grpc_server_register_completion_queue(server->server_, (*cq)->cq(),
+ // Register all the completion queues with the server. i.e
+ // 1. sync_server_cqs: internal completion queues created IF this is a sync
+ // server
+ // 2. cqs_: Completion queues added via AddCompletionQueue() call
+
+ // All sync cqs (if any) are frequently polled by ThreadManager
+ int num_frequently_polled_cqs = sync_server_cqs->size();
+
+ for (auto it = sync_server_cqs->begin(); it != sync_server_cqs->end(); ++it) {
+ grpc_server_register_completion_queue(server->server_, (*it)->cq(),
+ nullptr);
+ }
+
+ // cqs_ contains the completion queue added by calling the ServerBuilder's
+ // AddCompletionQueue() API. Some of them may not be frequently polled (i.e by
+ // calling Next() or AsyncNext()) and hence are not safe to be used for
+ // listening to incoming channels. Such completion queues must be registered
+ // as non-listening queues
+ for (auto it = cqs_.begin(); it != cqs_.end(); ++it) {
+ if ((*it)->IsFrequentlyPolled()) {
+ grpc_server_register_completion_queue(server->server_, (*it)->cq(),
nullptr);
num_frequently_polled_cqs++;
} else {
grpc_server_register_non_listening_completion_queue(server->server_,
- (*cq)->cq(), nullptr);
+ (*it)->cq(), nullptr);
}
}
@@ -214,9 +305,11 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
return nullptr;
}
}
+
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
(*plugin)->InitServer(initializer);
}
+
if (generic_service_) {
server->RegisterAsyncGenericService(generic_service_);
} else {
@@ -229,6 +322,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
}
}
}
+
for (auto port = ports_.begin(); port != ports_.end(); port++) {
int r = server->AddListeningPort(port->addr, port->creds.get());
if (!r) return nullptr;
@@ -236,13 +330,16 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
*port->selected_port = r;
}
}
+
auto cqs_data = cqs_.empty() ? nullptr : &cqs_[0];
if (!server->Start(cqs_data, cqs_.size())) {
return nullptr;
}
+
for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) {
(*plugin)->Finish(initializer);
}
+
return server;
}
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 3f89275370..d46942d257 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -1,5 +1,4 @@
/*
- *
* Copyright 2015, Google Inc.
* All rights reserved.
*
@@ -52,7 +51,7 @@
#include <grpc/support/log.h>
#include "src/core/lib/profiling/timers.h"
-#include "src/cpp/server/thread_pool_interface.h"
+#include "src/cpp/thread_manager/thread_manager.h"
namespace grpc {
@@ -118,12 +117,9 @@ class Server::UnimplementedAsyncResponse GRPC_FINAL
UnimplementedAsyncRequest* const request_;
};
-class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag {
+class ShutdownTag : public CompletionQueueTag {
public:
- bool FinalizeResult(void** tag, bool* status) {
- delete this;
- return false;
- }
+ bool FinalizeResult(void** tag, bool* status) { return false; }
};
class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
@@ -147,36 +143,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
grpc_metadata_array_destroy(&request_metadata_);
}
- static SyncRequest* Wait(CompletionQueue* cq, bool* ok) {
- void* tag = nullptr;
- *ok = false;
- if (!cq->Next(&tag, ok)) {
- return nullptr;
- }
- auto* mrd = static_cast<SyncRequest*>(tag);
- GPR_ASSERT(mrd->in_flight_);
- return mrd;
- }
-
- static bool AsyncWait(CompletionQueue* cq, SyncRequest** req, bool* ok,
- gpr_timespec deadline) {
- void* tag = nullptr;
- *ok = false;
- switch (cq->AsyncNext(&tag, ok, deadline)) {
- case CompletionQueue::TIMEOUT:
- *req = nullptr;
- return true;
- case CompletionQueue::SHUTDOWN:
- *req = nullptr;
- return false;
- case CompletionQueue::GOT_EVENT:
- *req = static_cast<SyncRequest*>(tag);
- GPR_ASSERT((*req)->in_flight_);
- return true;
- }
- GPR_UNREACHABLE_CODE(return false);
- }
-
void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); }
void TeardownRequest() {
@@ -266,7 +232,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
void* const tag_;
bool in_flight_;
const bool has_request_payload_;
- uint32_t incoming_flags_;
grpc_call* call_;
grpc_call_details* call_details_;
gpr_timespec deadline_;
@@ -275,33 +240,141 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
grpc_completion_queue* cq_;
};
+// Implementation of ThreadManager. Each instance of SyncRequestThreadManager
+// manages a pool of threads that poll for incoming Sync RPCs and call the
+// appropriate RPC handlers
+class Server::SyncRequestThreadManager : public ThreadManager {
+ public:
+ SyncRequestThreadManager(Server* server, CompletionQueue* server_cq,
+ std::shared_ptr<GlobalCallbacks> global_callbacks,
+ int min_pollers, int max_pollers,
+ int cq_timeout_msec)
+ : ThreadManager(min_pollers, max_pollers),
+ server_(server),
+ server_cq_(server_cq),
+ cq_timeout_msec_(cq_timeout_msec),
+ global_callbacks_(global_callbacks) {}
+
+ WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE {
+ *tag = nullptr;
+ gpr_timespec deadline =
+ gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN);
+
+ switch (server_cq_->AsyncNext(tag, ok, deadline)) {
+ case CompletionQueue::TIMEOUT:
+ return TIMEOUT;
+ case CompletionQueue::SHUTDOWN:
+ return SHUTDOWN;
+ case CompletionQueue::GOT_EVENT:
+ return WORK_FOUND;
+ }
+
+ GPR_UNREACHABLE_CODE(return TIMEOUT);
+ }
+
+ void DoWork(void* tag, bool ok) GRPC_OVERRIDE {
+ SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
+
+ if (!sync_req) {
+ // No tag. Nothing to work on. This is an unlikley scenario and possibly a
+ // bug in RPC Manager implementation.
+ gpr_log(GPR_ERROR, "Sync server. DoWork() was called with NULL tag");
+ return;
+ }
+
+ if (ok) {
+ // Calldata takes ownership of the completion queue inside sync_req
+ SyncRequest::CallData cd(server_, sync_req);
+ {
+ // Prepare for the next request
+ if (!IsShutdown()) {
+ sync_req->SetupRequest(); // Create new completion queue for sync_req
+ sync_req->Request(server_->c_server(), server_cq_->cq());
+ }
+ }
+
+ GPR_TIMER_SCOPE("cd.Run()", 0);
+ cd.Run(global_callbacks_);
+ }
+ // TODO (sreek) If ok is false here (which it isn't in case of
+ // grpc_request_registered_call), we should still re-queue the request
+ // object
+ }
+
+ void AddSyncMethod(RpcServiceMethod* method, void* tag) {
+ sync_requests_.emplace_back(new SyncRequest(method, tag));
+ }
+
+ void AddUnknownSyncMethod() {
+ if (!sync_requests_.empty()) {
+ unknown_method_.reset(new RpcServiceMethod(
+ "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
+ sync_requests_.emplace_back(
+ new SyncRequest(unknown_method_.get(), nullptr));
+ }
+ }
+
+ void ShutdownAndDrainCompletionQueue() {
+ server_cq_->Shutdown();
+
+ // Drain any pending items from the queue
+ void* tag;
+ bool ok;
+ while (server_cq_->Next(&tag, &ok)) {
+ // Nothing to be done here
+ }
+ }
+
+ void Start() {
+ if (!sync_requests_.empty()) {
+ for (auto m = sync_requests_.begin(); m != sync_requests_.end(); m++) {
+ (*m)->SetupRequest();
+ (*m)->Request(server_->c_server(), server_cq_->cq());
+ }
+
+ Initialize(); // ThreadManager's Initialize()
+ }
+ }
+
+ private:
+ Server* server_;
+ CompletionQueue* server_cq_;
+ int cq_timeout_msec_;
+ std::vector<std::unique_ptr<SyncRequest>> sync_requests_;
+ std::unique_ptr<RpcServiceMethod> unknown_method_;
+ std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
+};
+
static internal::GrpcLibraryInitializer g_gli_initializer;
-Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
- int max_receive_message_size, ChannelArguments* args)
+Server::Server(
+ int max_receive_message_size, ChannelArguments* args,
+ std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
+ sync_server_cqs,
+ int min_pollers, int max_pollers, int sync_cq_timeout_msec)
: max_receive_message_size_(max_receive_message_size),
+ sync_server_cqs_(sync_server_cqs),
started_(false),
shutdown_(false),
shutdown_notified_(false),
- num_running_cb_(0),
- sync_methods_(new std::list<SyncRequest>),
has_generic_service_(false),
server_(nullptr),
- thread_pool_(thread_pool),
- thread_pool_owned_(thread_pool_owned),
server_initializer_(new ServerInitializer(this)) {
g_gli_initializer.summon();
gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks);
global_callbacks_ = g_callbacks;
global_callbacks_->UpdateArguments(args);
+
+ for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end();
+ it++) {
+ sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
+ this, (*it).get(), global_callbacks_, min_pollers, max_pollers,
+ sync_cq_timeout_msec));
+ }
+
grpc_channel_args channel_args;
args->SetChannelArgs(&channel_args);
+
server_ = grpc_server_create(&channel_args, nullptr);
- if (thread_pool_ == nullptr) {
- grpc_server_register_non_listening_completion_queue(server_, cq_.cq(),
- nullptr);
- } else {
- grpc_server_register_completion_queue(server_, cq_.cq(), nullptr);
- }
}
Server::~Server() {
@@ -311,17 +384,14 @@ Server::~Server() {
lock.unlock();
Shutdown();
} else if (!started_) {
- cq_.Shutdown();
+ // Shutdown the completion queues
+ for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+ (*it)->ShutdownAndDrainCompletionQueue();
+ }
}
}
- void* got_tag;
- bool ok;
- GPR_ASSERT(!cq_.Next(&got_tag, &ok));
+
grpc_server_destroy(server_);
- if (thread_pool_owned_) {
- delete thread_pool_;
- }
- delete sync_methods_;
}
void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
@@ -352,12 +422,14 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
"Can only register an asynchronous service against one server.");
service->server_ = this;
}
+
const char* method_name = nullptr;
for (auto it = service->methods_.begin(); it != service->methods_.end();
++it) {
if (it->get() == nullptr) { // Handled by generic service if any.
continue;
}
+
RpcServiceMethod* method = it->get();
void* tag = grpc_server_register_method(
server_, method->name(), host ? host->c_str() : nullptr,
@@ -367,11 +439,15 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
method->name());
return false;
}
- if (method->handler() == nullptr) {
+
+ if (method->handler() == nullptr) { // Async method
method->set_server_tag(tag);
} else {
- sync_methods_->emplace_back(method, tag);
+ for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+ (*it)->AddSyncMethod(method, tag);
+ }
}
+
method_name = method->name();
}
@@ -406,28 +482,19 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
grpc_server_start(server_);
if (!has_generic_service_) {
- if (!sync_methods_->empty()) {
- unknown_method_.reset(new RpcServiceMethod(
- "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
- // Use of emplace_back with just constructor arguments is not accepted
- // here by gcc-4.4 because it can't match the anonymous nullptr with a
- // proper constructor implicitly. Construct the object and use push_back.
- sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr));
+ for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+ (*it)->AddUnknownSyncMethod();
}
+
for (size_t i = 0; i < num_cqs; i++) {
if (cqs[i]->IsFrequentlyPolled()) {
new UnimplementedAsyncRequest(this, cqs[i]);
}
}
}
- // Start processing rpcs.
- if (!sync_methods_->empty()) {
- for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) {
- m->SetupRequest();
- m->Request(server_, cq_.cq());
- }
- ScheduleCallback();
+ for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+ (*it)->Start();
}
return true;
@@ -437,29 +504,43 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
grpc::unique_lock<grpc::mutex> lock(mu_);
if (started_ && !shutdown_) {
shutdown_ = true;
- grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest());
- cq_.Shutdown();
- lock.unlock();
- // Spin, eating requests until the completion queue is completely shutdown.
- // If the deadline expires then cancel anything that's pending and keep
- // spinning forever until the work is actually drained.
- // Since nothing else needs to touch state guarded by mu_, holding it
- // through this loop is fine.
- SyncRequest* request;
+
+ /// The completion queue to use for server shutdown completion notification
+ CompletionQueue shutdown_cq;
+ ShutdownTag shutdown_tag; // Dummy shutdown tag
+ grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
+
+ // Shutdown all ThreadManagers. This will try to gracefully stop all the
+ // threads in the ThreadManagers (once they process any inflight requests)
+ for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+ (*it)->Shutdown(); // ThreadManager's Shutdown()
+ }
+
+ shutdown_cq.Shutdown();
+
+ void* tag;
bool ok;
- while (SyncRequest::AsyncWait(&cq_, &request, &ok, deadline)) {
- if (request == NULL) { // deadline expired
- grpc_server_cancel_all_calls(server_);
- deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- } else if (ok) {
- SyncRequest::CallData call_data(this, request);
- }
+ CompletionQueue::NextStatus status =
+ shutdown_cq.AsyncNext(&tag, &ok, deadline);
+
+ // If this timed out, it means we are done with the grace period for a clean
+ // shutdown. We should force a shutdown now by cancelling all inflight calls
+ if (status == CompletionQueue::NextStatus::TIMEOUT) {
+ grpc_server_cancel_all_calls(server_);
}
- lock.lock();
+ // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
+ // successfully shutdown
- // Wait for running callbacks to finish.
- while (num_running_cb_ != 0) {
- callback_cv_.wait(lock);
+ // Wait for threads in all ThreadManagers to terminate
+ for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+ (*it)->Wait();
+ (*it)->ShutdownAndDrainCompletionQueue();
+ }
+
+ // Drain the shutdown queue (if the previous call to AsyncNext() timed out
+ // and we didn't remove the tag from the queue yet)
+ while (shutdown_cq.Next(&tag, &ok)) {
+ // Nothing to be done here. Just ignore ok and tag values
}
shutdown_notified_ = true;
@@ -585,47 +666,6 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
request_->stream()->call_.PerformOps(this);
}
-void Server::ScheduleCallback() {
- {
- grpc::unique_lock<grpc::mutex> lock(mu_);
- num_running_cb_++;
- }
- thread_pool_->Add(std::bind(&Server::RunRpc, this));
-}
-
-void Server::RunRpc() {
- // Wait for one more incoming rpc.
- bool ok;
- GPR_TIMER_SCOPE("Server::RunRpc", 0);
- auto* mrd = SyncRequest::Wait(&cq_, &ok);
- if (mrd) {
- ScheduleCallback();
- if (ok) {
- SyncRequest::CallData cd(this, mrd);
- {
- mrd->SetupRequest();
- grpc::unique_lock<grpc::mutex> lock(mu_);
- if (!shutdown_) {
- mrd->Request(server_, cq_.cq());
- } else {
- // destroy the structure that was created
- mrd->TeardownRequest();
- }
- }
- GPR_TIMER_SCOPE("cd.Run()", 0);
- cd.Run(global_callbacks_);
- }
- }
-
- {
- grpc::unique_lock<grpc::mutex> lock(mu_);
- num_running_cb_--;
- if (shutdown_) {
- callback_cv_.notify_all();
- }
- }
-}
-
ServerInitializer* Server::initializer() { return server_initializer_.get(); }
} // namespace grpc
diff --git a/src/cpp/test/server_context_test_spouse.cc b/src/cpp/test/server_context_test_spouse.cc
new file mode 100644
index 0000000000..b93152eea0
--- /dev/null
+++ b/src/cpp/test/server_context_test_spouse.cc
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/test/server_context_test_spouse.h>
+
+namespace grpc {
+namespace testing {
+
+void ServerContextTestSpouse::AddClientMetadata(const grpc::string& key,
+ const grpc::string& value) {
+ client_metadata_storage_.insert(
+ std::pair<grpc::string, grpc::string>(key, value));
+ ctx_->client_metadata_.clear();
+ for (auto iter = client_metadata_storage_.begin();
+ iter != client_metadata_storage_.end(); ++iter) {
+ ctx_->client_metadata_.insert(std::pair<grpc::string_ref, grpc::string_ref>(
+ iter->first.c_str(), iter->second.c_str()));
+ }
+}
+
+} // namespace testing
+} // namespace grpc
diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc
new file mode 100644
index 0000000000..caae4c457d
--- /dev/null
+++ b/src/cpp/thread_manager/thread_manager.cc
@@ -0,0 +1,181 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc++/impl/sync.h>
+#include <grpc++/impl/thd.h>
+#include <grpc/support/log.h>
+#include <climits>
+
+#include "src/cpp/thread_manager/thread_manager.h"
+
+namespace grpc {
+
+ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr)
+ : thd_mgr_(thd_mgr), thd_(&ThreadManager::WorkerThread::Run, this) {}
+
+void ThreadManager::WorkerThread::Run() {
+ thd_mgr_->MainWorkLoop();
+ thd_mgr_->MarkAsCompleted(this);
+}
+
+ThreadManager::WorkerThread::~WorkerThread() { thd_.join(); }
+
+ThreadManager::ThreadManager(int min_pollers, int max_pollers)
+ : shutdown_(false),
+ num_pollers_(0),
+ min_pollers_(min_pollers),
+ max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers),
+ num_threads_(0) {}
+
+ThreadManager::~ThreadManager() {
+ {
+ std::unique_lock<grpc::mutex> lock(mu_);
+ GPR_ASSERT(num_threads_ == 0);
+ }
+
+ CleanupCompletedThreads();
+}
+
+void ThreadManager::Wait() {
+ std::unique_lock<grpc::mutex> lock(mu_);
+ while (num_threads_ != 0) {
+ shutdown_cv_.wait(lock);
+ }
+}
+
+void ThreadManager::Shutdown() {
+ std::unique_lock<grpc::mutex> lock(mu_);
+ shutdown_ = true;
+}
+
+bool ThreadManager::IsShutdown() {
+ std::unique_lock<grpc::mutex> lock(mu_);
+ return shutdown_;
+}
+
+void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
+ {
+ std::unique_lock<grpc::mutex> list_lock(list_mu_);
+ completed_threads_.push_back(thd);
+ }
+
+ grpc::unique_lock<grpc::mutex> lock(mu_);
+ num_threads_--;
+ if (num_threads_ == 0) {
+ shutdown_cv_.notify_one();
+ }
+}
+
+void ThreadManager::CleanupCompletedThreads() {
+ std::unique_lock<grpc::mutex> lock(list_mu_);
+ for (auto thd = completed_threads_.begin(); thd != completed_threads_.end();
+ thd = completed_threads_.erase(thd)) {
+ delete *thd;
+ }
+}
+
+void ThreadManager::Initialize() {
+ for (int i = 0; i < min_pollers_; i++) {
+ MaybeCreatePoller();
+ }
+}
+
+// If the number of pollers (i.e threads currently blocked in PollForWork()) is
+// less than max threshold (i.e max_pollers_) and the total number of threads is
+// below the maximum threshold, we can let the current thread continue as poller
+bool ThreadManager::MaybeContinueAsPoller() {
+ std::unique_lock<grpc::mutex> lock(mu_);
+ if (shutdown_ || num_pollers_ > max_pollers_) {
+ return false;
+ }
+
+ num_pollers_++;
+ return true;
+}
+
+// Create a new poller if the current number of pollers i.e num_pollers_ (i.e
+// threads currently blocked in PollForWork()) is below the threshold (i.e
+// min_pollers_) and the total number of threads is below the maximum threshold
+void ThreadManager::MaybeCreatePoller() {
+ grpc::unique_lock<grpc::mutex> lock(mu_);
+ if (!shutdown_ && num_pollers_ < min_pollers_) {
+ num_pollers_++;
+ num_threads_++;
+
+ // Create a new thread (which ends up calling the MainWorkLoop() function
+ new WorkerThread(this);
+ }
+}
+
+void ThreadManager::MainWorkLoop() {
+ void* tag;
+ bool ok;
+
+ /*
+ 1. Poll for work (i.e PollForWork())
+ 2. After returning from PollForWork, reduce the number of pollers by 1. If
+ PollForWork() returned a TIMEOUT, then it may indicate that we have more
+ polling threads than needed. Check if the number of pollers is greater
+ than min_pollers and if so, terminate the thread.
+ 3. Since we are short of one poller now, see if a new poller has to be
+ created (i.e see MaybeCreatePoller() for more details)
+ 4. Do the actual work (DoWork())
+ 5. After doing the work, see it this thread can resume polling work (i.e
+ see MaybeContinueAsPoller() for more details) */
+ do {
+ WorkStatus work_status = PollForWork(&tag, &ok);
+
+ {
+ grpc::unique_lock<grpc::mutex> lock(mu_);
+ num_pollers_--;
+
+ if (work_status == TIMEOUT && num_pollers_ > min_pollers_) {
+ break;
+ }
+ }
+
+ // Note that MaybeCreatePoller does check for shutdown and creates a new
+ // thread only if ThreadManager is not shutdown
+ if (work_status == WORK_FOUND) {
+ MaybeCreatePoller();
+ DoWork(tag, ok);
+ }
+ } while (MaybeContinueAsPoller());
+
+ CleanupCompletedThreads();
+
+ // If we are here, either ThreadManager is shutting down or it already has
+ // enough threads.
+}
+
+} // namespace grpc
diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h
new file mode 100644
index 0000000000..9cfdb8af25
--- /dev/null
+++ b/src/cpp/thread_manager/thread_manager.h
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H
+#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H
+
+#include <list>
+#include <memory>
+
+#include <grpc++/impl/sync.h>
+#include <grpc++/impl/thd.h>
+#include <grpc++/support/config.h>
+
+namespace grpc {
+
+class ThreadManager {
+ public:
+ explicit ThreadManager(int min_pollers, int max_pollers);
+ virtual ~ThreadManager();
+
+ // Initializes and Starts the Rpc Manager threads
+ void Initialize();
+
+ // The return type of PollForWork() function
+ enum WorkStatus { WORK_FOUND, SHUTDOWN, TIMEOUT };
+
+ // "Polls" for new work.
+ // If the return value is WORK_FOUND:
+ // - The implementaion of PollForWork() MAY set some opaque identifier to
+ // (identify the work item found) via the '*tag' parameter
+ // - The implementaion MUST set the value of 'ok' to 'true' or 'false'. A
+ // value of 'false' indicates some implemenation specific error (that is
+ // neither SHUTDOWN nor TIMEOUT)
+ // - ThreadManager does not interpret the values of 'tag' and 'ok'
+ // - ThreadManager WILL call DoWork() and pass '*tag' and 'ok' as input to
+ // DoWork()
+ //
+ // If the return value is SHUTDOWN:,
+ // - ThreadManager WILL NOT call DoWork() and terminates the thead
+ //
+ // If the return value is TIMEOUT:,
+ // - ThreadManager WILL NOT call DoWork()
+ // - ThreadManager MAY terminate the thread depending on the current number
+ // of active poller threads and mix_pollers/max_pollers settings
+ // - Also, the value of timeout is specific to the derived class
+ // implementation
+ virtual WorkStatus PollForWork(void** tag, bool* ok) = 0;
+
+ // The implementation of DoWork() is supposed to perform the work found by
+ // PollForWork(). The tag and ok parameters are the same as returned by
+ // PollForWork()
+ //
+ // The implementation of DoWork() should also do any setup needed to ensure
+ // that the next call to PollForWork() (not necessarily by the current thread)
+ // actually finds some work
+ virtual void DoWork(void* tag, bool ok) = 0;
+
+ // Mark the ThreadManager as shutdown and begin draining the work. This is a
+ // non-blocking call and the caller should call Wait(), a blocking call which
+ // returns only once the shutdown is complete
+ void Shutdown();
+
+ // Has Shutdown() been called
+ bool IsShutdown();
+
+ // A blocking call that returns only after the ThreadManager has shutdown and
+ // all the threads have drained all the outstanding work
+ void Wait();
+
+ private:
+ // Helper wrapper class around std::thread. This takes a ThreadManager object
+ // and starts a new std::thread to calls the Run() function.
+ //
+ // The Run() function calls ThreadManager::MainWorkLoop() function and once
+ // that completes, it marks the WorkerThread completed by calling
+ // ThreadManager::MarkAsCompleted()
+ class WorkerThread {
+ public:
+ WorkerThread(ThreadManager* thd_mgr);
+ ~WorkerThread();
+
+ private:
+ // Calls thd_mgr_->MainWorkLoop() and once that completes, calls
+ // thd_mgr_>MarkAsCompleted(this) to mark the thread as completed
+ void Run();
+
+ ThreadManager* thd_mgr_;
+ grpc::thread thd_;
+ };
+
+ // The main funtion in ThreadManager
+ void MainWorkLoop();
+
+ // Create a new poller if the number of current pollers is less than the
+ // minimum number of pollers needed (i.e min_pollers).
+ void MaybeCreatePoller();
+
+ // Returns true if the current thread can resume as a poller. i.e if the
+ // current number of pollers is less than the max_pollers.
+ bool MaybeContinueAsPoller();
+
+ void MarkAsCompleted(WorkerThread* thd);
+ void CleanupCompletedThreads();
+
+ // Protects shutdown_, num_pollers_ and num_threads_
+ // TODO: sreek - Change num_pollers and num_threads_ to atomics
+ grpc::mutex mu_;
+
+ bool shutdown_;
+ grpc::condition_variable shutdown_cv_;
+
+ // Number of threads doing polling
+ int num_pollers_;
+
+ // The minimum and maximum number of threads that should be doing polling
+ int min_pollers_;
+ int max_pollers_;
+
+ // The total number of threads (includes threads includes the threads that are
+ // currently polling i.e num_pollers_)
+ int num_threads_;
+
+ grpc::mutex list_mu_;
+ std::list<WorkerThread*> completed_threads_;
+};
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_THREAD_MANAGER_H
diff --git a/src/csharp/Grpc.Auth/GoogleAuthInterceptors.cs b/src/csharp/Grpc.Auth/GoogleAuthInterceptors.cs
index 96d6ee87ae..c5bca444b4 100644
--- a/src/csharp/Grpc.Auth/GoogleAuthInterceptors.cs
+++ b/src/csharp/Grpc.Auth/GoogleAuthInterceptors.cs
@@ -33,6 +33,7 @@
using System;
using System.Threading;
+using System.Threading.Tasks;
using Google.Apis.Auth.OAuth2;
using Grpc.Core;
@@ -72,9 +73,10 @@ namespace Grpc.Auth
public static AsyncAuthInterceptor FromAccessToken(string accessToken)
{
GrpcPreconditions.CheckNotNull(accessToken);
- return new AsyncAuthInterceptor(async (context, metadata) =>
+ return new AsyncAuthInterceptor((context, metadata) =>
{
metadata.Add(CreateBearerTokenHeader(accessToken));
+ return TaskUtils.CompletedTask;
});
}
diff --git a/src/csharp/Grpc.Core/DefaultCallInvoker.cs b/src/csharp/Grpc.Core/DefaultCallInvoker.cs
index 1a99e41153..b15aaefcd6 100644
--- a/src/csharp/Grpc.Core/DefaultCallInvoker.cs
+++ b/src/csharp/Grpc.Core/DefaultCallInvoker.cs
@@ -102,6 +102,7 @@ namespace Grpc.Core
return Calls.AsyncDuplexStreamingCall(call);
}
+ /// <summary>Creates call invocation details for given method.</summary>
protected virtual CallInvocationDetails<TRequest, TResponse> CreateCall<TRequest, TResponse>(Method<TRequest, TResponse> method, string host, CallOptions options)
where TRequest : class
where TResponse : class
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index 622813fb38..d315e6d667 100644
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -138,6 +138,8 @@
<Compile Include="Internal\CallError.cs" />
<Compile Include="Logging\LogLevel.cs" />
<Compile Include="Logging\LogLevelFilterLogger.cs" />
+ <Compile Include="Internal\RequestCallContextSafeHandle.cs" />
+ <Compile Include="Utils\TaskUtils.cs" />
</ItemGroup>
<ItemGroup>
<None Include="Grpc.Core.nuspec" />
diff --git a/src/csharp/Grpc.Core/GrpcEnvironment.cs b/src/csharp/Grpc.Core/GrpcEnvironment.cs
index 3ed2df203d..c57c904eb9 100644
--- a/src/csharp/Grpc.Core/GrpcEnvironment.cs
+++ b/src/csharp/Grpc.Core/GrpcEnvironment.cs
@@ -59,7 +59,6 @@ namespace Grpc.Core
static ILogger logger = new NullLogger();
- readonly object myLock = new object();
readonly GrpcThreadPool threadPool;
readonly DebugStats debugStats = new DebugStats();
readonly AtomicCounter cqPickerCounter = new AtomicCounter();
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
index 9abaf1120f..5e61e9ec12 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
@@ -52,9 +52,8 @@ namespace Grpc.Core.Internal
// Completion of a pending unary response if not null.
TaskCompletionSource<TResponse> unaryResponseTcs;
- // TODO(jtattermusch): this field doesn't need to be initialized for unary response calls.
- // Indicates that response streaming call has finished.
- TaskCompletionSource<object> streamingCallFinishedTcs = new TaskCompletionSource<object>();
+ // Completion of a streaming response call if not null.
+ TaskCompletionSource<object> streamingResponseCallFinishedTcs;
// TODO(jtattermusch): this field could be lazy-initialized (only if someone requests the response headers).
// Response headers set here once received.
@@ -198,6 +197,7 @@ namespace Grpc.Core.Internal
byte[] payload = UnsafeSerialize(msg);
+ streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartServerStreaming(HandleFinished, payload, metadataArray, GetWriteFlagsForCall());
@@ -219,6 +219,7 @@ namespace Grpc.Core.Internal
Initialize(details.Channel.CompletionQueue);
+ streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartDuplexStreaming(HandleFinished, metadataArray);
@@ -265,7 +266,7 @@ namespace Grpc.Core.Internal
// the halfclose has already been done implicitly, so just return
// completed task here.
halfcloseRequested = true;
- return Task.FromResult<object>(null);
+ return TaskUtils.CompletedTask;
}
call.StartSendCloseFromClient(HandleSendFinished);
@@ -276,13 +277,13 @@ namespace Grpc.Core.Internal
}
/// <summary>
- /// Get the task that completes once if streaming call finishes with ok status and throws RpcException with given status otherwise.
+ /// Get the task that completes once if streaming response call finishes with ok status and throws RpcException with given status otherwise.
/// </summary>
- public Task StreamingCallFinishedTask
+ public Task StreamingResponseCallFinishedTask
{
get
{
- return streamingCallFinishedTcs.Task;
+ return streamingResponseCallFinishedTcs.Task;
}
}
@@ -529,11 +530,11 @@ namespace Grpc.Core.Internal
var status = receivedStatus.Status;
if (status.StatusCode != StatusCode.OK)
{
- streamingCallFinishedTcs.SetException(new RpcException(status));
+ streamingResponseCallFinishedTcs.SetException(new RpcException(status));
return;
}
- streamingCallFinishedTcs.SetResult(null);
+ streamingResponseCallFinishedTcs.SetResult(null);
}
}
}
diff --git a/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs b/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
index c28a6f64d3..26449ee539 100644
--- a/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
@@ -93,21 +93,6 @@ namespace Grpc.Core.Internal
return data;
}
- // Gets data of server_rpc_new completion.
- public ServerRpcNew GetServerRpcNew(Server server)
- {
- var call = Native.grpcsharp_batch_context_server_rpc_new_call(this);
-
- var method = Marshal.PtrToStringAnsi(Native.grpcsharp_batch_context_server_rpc_new_method(this));
- var host = Marshal.PtrToStringAnsi(Native.grpcsharp_batch_context_server_rpc_new_host(this));
- var deadline = Native.grpcsharp_batch_context_server_rpc_new_deadline(this);
-
- IntPtr metadataArrayPtr = Native.grpcsharp_batch_context_server_rpc_new_request_metadata(this);
- var metadata = MetadataArraySafeHandle.ReadMetadataFromPtrUnsafe(metadataArrayPtr);
-
- return new ServerRpcNew(server, call, method, host, deadline, metadata);
- }
-
// Gets data of receive_close_on_server completion.
public bool GetReceivedCloseOnServerCancelled()
{
diff --git a/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs b/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
index ad9423ff58..65bf60269a 100644
--- a/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
@@ -73,7 +73,7 @@ namespace Grpc.Core.Internal
if (result == null)
{
- await call.StreamingCallFinishedTask.ConfigureAwait(false);
+ await call.StreamingResponseCallFinishedTask.ConfigureAwait(false);
return false;
}
return true;
diff --git a/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs b/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
index 628844f242..7e2f0e9c6c 100644
--- a/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
+++ b/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
@@ -44,6 +44,8 @@ namespace Grpc.Core.Internal
internal delegate void BatchCompletionDelegate(bool success, BatchContextSafeHandle ctx);
+ internal delegate void RequestCallCompletionDelegate(bool success, RequestCallContextSafeHandle ctx);
+
internal class CompletionRegistry
{
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<CompletionRegistry>();
@@ -68,6 +70,12 @@ namespace Grpc.Core.Internal
Register(ctx.Handle, opCallback);
}
+ public void RegisterRequestCallCompletion(RequestCallContextSafeHandle ctx, RequestCallCompletionDelegate callback)
+ {
+ OpCompletionDelegate opCallback = ((success) => HandleRequestCallCompletion(success, ctx, callback));
+ Register(ctx.Handle, opCallback);
+ }
+
public OpCompletionDelegate Extract(IntPtr key)
{
OpCompletionDelegate value;
@@ -84,7 +92,26 @@ namespace Grpc.Core.Internal
}
catch (Exception e)
{
- Logger.Error(e, "Exception occured while invoking completion delegate.");
+ Logger.Error(e, "Exception occured while invoking batch completion delegate.");
+ }
+ finally
+ {
+ if (ctx != null)
+ {
+ ctx.Dispose();
+ }
+ }
+ }
+
+ private static void HandleRequestCallCompletion(bool success, RequestCallContextSafeHandle ctx, RequestCallCompletionDelegate callback)
+ {
+ try
+ {
+ callback(success, ctx);
+ }
+ catch (Exception e)
+ {
+ Logger.Error(e, "Exception occured while invoking request call completion delegate.");
}
finally
{
diff --git a/src/csharp/Grpc.Core/Internal/InterceptingCallInvoker.cs b/src/csharp/Grpc.Core/Internal/InterceptingCallInvoker.cs
index ef48dc7121..0c63e2092a 100644
--- a/src/csharp/Grpc.Core/Internal/InterceptingCallInvoker.cs
+++ b/src/csharp/Grpc.Core/Internal/InterceptingCallInvoker.cs
@@ -48,7 +48,7 @@ namespace Grpc.Core.Internal
readonly Func<CallOptions, CallOptions> callOptionsInterceptor;
/// <summary>
- /// Initializes a new instance of the <see cref="Grpc.Core.InterceptingCallInvoker"/> class.
+ /// Initializes a new instance of the <see cref="Grpc.Core.Internal.InterceptingCallInvoker"/> class.
/// </summary>
public InterceptingCallInvoker(CallInvoker callInvoker,
Func<string, string> hostInterceptor = null,
diff --git a/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs b/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
index 26af6311d5..b3714481eb 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
@@ -78,7 +78,10 @@ namespace Grpc.Core.Internal
{
var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr),
Marshal.PtrToStringAnsi(methodNamePtr));
- StartGetMetadata(context, callbackPtr, userDataPtr);
+ // Don't await, we are in a native callback and need to return.
+ #pragma warning disable 4014
+ GetMetadataAsync(context, callbackPtr, userDataPtr);
+ #pragma warning restore 4014
}
catch (Exception e)
{
@@ -87,7 +90,7 @@ namespace Grpc.Core.Internal
}
}
- private async Task StartGetMetadata(AuthInterceptorContext context, IntPtr callbackPtr, IntPtr userDataPtr)
+ private async Task GetMetadataAsync(AuthInterceptorContext context, IntPtr callbackPtr, IntPtr userDataPtr)
{
try
{
diff --git a/src/csharp/Grpc.Core/Internal/NativeMethods.cs b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
index f457c9dbf1..40ba7e30cb 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMethods.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
@@ -64,14 +64,17 @@ namespace Grpc.Core.Internal
public readonly Delegates.grpcsharp_batch_context_recv_status_on_client_status_delegate grpcsharp_batch_context_recv_status_on_client_status;
public readonly Delegates.grpcsharp_batch_context_recv_status_on_client_details_delegate grpcsharp_batch_context_recv_status_on_client_details;
public readonly Delegates.grpcsharp_batch_context_recv_status_on_client_trailing_metadata_delegate grpcsharp_batch_context_recv_status_on_client_trailing_metadata;
- public readonly Delegates.grpcsharp_batch_context_server_rpc_new_call_delegate grpcsharp_batch_context_server_rpc_new_call;
- public readonly Delegates.grpcsharp_batch_context_server_rpc_new_method_delegate grpcsharp_batch_context_server_rpc_new_method;
- public readonly Delegates.grpcsharp_batch_context_server_rpc_new_host_delegate grpcsharp_batch_context_server_rpc_new_host;
- public readonly Delegates.grpcsharp_batch_context_server_rpc_new_deadline_delegate grpcsharp_batch_context_server_rpc_new_deadline;
- public readonly Delegates.grpcsharp_batch_context_server_rpc_new_request_metadata_delegate grpcsharp_batch_context_server_rpc_new_request_metadata;
public readonly Delegates.grpcsharp_batch_context_recv_close_on_server_cancelled_delegate grpcsharp_batch_context_recv_close_on_server_cancelled;
public readonly Delegates.grpcsharp_batch_context_destroy_delegate grpcsharp_batch_context_destroy;
+ public readonly Delegates.grpcsharp_request_call_context_create_delegate grpcsharp_request_call_context_create;
+ public readonly Delegates.grpcsharp_request_call_context_call_delegate grpcsharp_request_call_context_call;
+ public readonly Delegates.grpcsharp_request_call_context_method_delegate grpcsharp_request_call_context_method;
+ public readonly Delegates.grpcsharp_request_call_context_host_delegate grpcsharp_request_call_context_host;
+ public readonly Delegates.grpcsharp_request_call_context_deadline_delegate grpcsharp_request_call_context_deadline;
+ public readonly Delegates.grpcsharp_request_call_context_request_metadata_delegate grpcsharp_request_call_context_request_metadata;
+ public readonly Delegates.grpcsharp_request_call_context_destroy_delegate grpcsharp_request_call_context_destroy;
+
public readonly Delegates.grpcsharp_composite_call_credentials_create_delegate grpcsharp_composite_call_credentials_create;
public readonly Delegates.grpcsharp_call_credentials_release_delegate grpcsharp_call_credentials_release;
@@ -170,14 +173,17 @@ namespace Grpc.Core.Internal
this.grpcsharp_batch_context_recv_status_on_client_status = GetMethodDelegate<Delegates.grpcsharp_batch_context_recv_status_on_client_status_delegate>(library);
this.grpcsharp_batch_context_recv_status_on_client_details = GetMethodDelegate<Delegates.grpcsharp_batch_context_recv_status_on_client_details_delegate>(library);
this.grpcsharp_batch_context_recv_status_on_client_trailing_metadata = GetMethodDelegate<Delegates.grpcsharp_batch_context_recv_status_on_client_trailing_metadata_delegate>(library);
- this.grpcsharp_batch_context_server_rpc_new_call = GetMethodDelegate<Delegates.grpcsharp_batch_context_server_rpc_new_call_delegate>(library);
- this.grpcsharp_batch_context_server_rpc_new_method = GetMethodDelegate<Delegates.grpcsharp_batch_context_server_rpc_new_method_delegate>(library);
- this.grpcsharp_batch_context_server_rpc_new_host = GetMethodDelegate<Delegates.grpcsharp_batch_context_server_rpc_new_host_delegate>(library);
- this.grpcsharp_batch_context_server_rpc_new_deadline = GetMethodDelegate<Delegates.grpcsharp_batch_context_server_rpc_new_deadline_delegate>(library);
- this.grpcsharp_batch_context_server_rpc_new_request_metadata = GetMethodDelegate<Delegates.grpcsharp_batch_context_server_rpc_new_request_metadata_delegate>(library);
this.grpcsharp_batch_context_recv_close_on_server_cancelled = GetMethodDelegate<Delegates.grpcsharp_batch_context_recv_close_on_server_cancelled_delegate>(library);
this.grpcsharp_batch_context_destroy = GetMethodDelegate<Delegates.grpcsharp_batch_context_destroy_delegate>(library);
+ this.grpcsharp_request_call_context_create = GetMethodDelegate<Delegates.grpcsharp_request_call_context_create_delegate>(library);
+ this.grpcsharp_request_call_context_call = GetMethodDelegate<Delegates.grpcsharp_request_call_context_call_delegate>(library);
+ this.grpcsharp_request_call_context_method = GetMethodDelegate<Delegates.grpcsharp_request_call_context_method_delegate>(library);
+ this.grpcsharp_request_call_context_host = GetMethodDelegate<Delegates.grpcsharp_request_call_context_host_delegate>(library);
+ this.grpcsharp_request_call_context_deadline = GetMethodDelegate<Delegates.grpcsharp_request_call_context_deadline_delegate>(library);
+ this.grpcsharp_request_call_context_request_metadata = GetMethodDelegate<Delegates.grpcsharp_request_call_context_request_metadata_delegate>(library);
+ this.grpcsharp_request_call_context_destroy = GetMethodDelegate<Delegates.grpcsharp_request_call_context_destroy_delegate>(library);
+
this.grpcsharp_composite_call_credentials_create = GetMethodDelegate<Delegates.grpcsharp_composite_call_credentials_create_delegate>(library);
this.grpcsharp_call_credentials_release = GetMethodDelegate<Delegates.grpcsharp_call_credentials_release_delegate>(library);
@@ -302,14 +308,17 @@ namespace Grpc.Core.Internal
public delegate StatusCode grpcsharp_batch_context_recv_status_on_client_status_delegate(BatchContextSafeHandle ctx);
public delegate IntPtr grpcsharp_batch_context_recv_status_on_client_details_delegate(BatchContextSafeHandle ctx); // returns const char*
public delegate IntPtr grpcsharp_batch_context_recv_status_on_client_trailing_metadata_delegate(BatchContextSafeHandle ctx);
- public delegate CallSafeHandle grpcsharp_batch_context_server_rpc_new_call_delegate(BatchContextSafeHandle ctx);
- public delegate IntPtr grpcsharp_batch_context_server_rpc_new_method_delegate(BatchContextSafeHandle ctx); // returns const char*
- public delegate IntPtr grpcsharp_batch_context_server_rpc_new_host_delegate(BatchContextSafeHandle ctx); // returns const char*
- public delegate Timespec grpcsharp_batch_context_server_rpc_new_deadline_delegate(BatchContextSafeHandle ctx);
- public delegate IntPtr grpcsharp_batch_context_server_rpc_new_request_metadata_delegate(BatchContextSafeHandle ctx);
public delegate int grpcsharp_batch_context_recv_close_on_server_cancelled_delegate(BatchContextSafeHandle ctx);
public delegate void grpcsharp_batch_context_destroy_delegate(IntPtr ctx);
+ public delegate RequestCallContextSafeHandle grpcsharp_request_call_context_create_delegate();
+ public delegate CallSafeHandle grpcsharp_request_call_context_call_delegate(RequestCallContextSafeHandle ctx);
+ public delegate IntPtr grpcsharp_request_call_context_method_delegate(RequestCallContextSafeHandle ctx); // returns const char*
+ public delegate IntPtr grpcsharp_request_call_context_host_delegate(RequestCallContextSafeHandle ctx); // returns const char*
+ public delegate Timespec grpcsharp_request_call_context_deadline_delegate(RequestCallContextSafeHandle ctx);
+ public delegate IntPtr grpcsharp_request_call_context_request_metadata_delegate(RequestCallContextSafeHandle ctx);
+ public delegate void grpcsharp_request_call_context_destroy_delegate(IntPtr ctx);
+
public delegate CallCredentialsSafeHandle grpcsharp_composite_call_credentials_create_delegate(CallCredentialsSafeHandle creds1, CallCredentialsSafeHandle creds2);
public delegate void grpcsharp_call_credentials_release_delegate(IntPtr credentials);
@@ -393,7 +402,7 @@ namespace Grpc.Core.Internal
public delegate int grpcsharp_server_add_insecure_http2_port_delegate(ServerSafeHandle server, string addr);
public delegate int grpcsharp_server_add_secure_http2_port_delegate(ServerSafeHandle server, string addr, ServerCredentialsSafeHandle creds);
public delegate void grpcsharp_server_start_delegate(ServerSafeHandle server);
- public delegate CallError grpcsharp_server_request_call_delegate(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx);
+ public delegate CallError grpcsharp_server_request_call_delegate(ServerSafeHandle server, CompletionQueueSafeHandle cq, RequestCallContextSafeHandle ctx);
public delegate void grpcsharp_server_cancel_all_calls_delegate(ServerSafeHandle server);
public delegate void grpcsharp_server_shutdown_and_notify_callback_delegate(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx);
public delegate void grpcsharp_server_destroy_delegate(IntPtr server);
diff --git a/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs b/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
new file mode 100644
index 0000000000..ea7819d7b1
--- /dev/null
+++ b/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
@@ -0,0 +1,85 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Runtime.InteropServices;
+using Grpc.Core;
+
+namespace Grpc.Core.Internal
+{
+ /// <summary>
+ /// grpcsharp_request_call_context
+ /// </summary>
+ internal class RequestCallContextSafeHandle : SafeHandleZeroIsInvalid
+ {
+ static readonly NativeMethods Native = NativeMethods.Get();
+
+ private RequestCallContextSafeHandle()
+ {
+ }
+
+ public static RequestCallContextSafeHandle Create()
+ {
+ return Native.grpcsharp_request_call_context_create();
+ }
+
+ public IntPtr Handle
+ {
+ get
+ {
+ return handle;
+ }
+ }
+
+ // Gets data of server_rpc_new completion.
+ public ServerRpcNew GetServerRpcNew(Server server)
+ {
+ var call = Native.grpcsharp_request_call_context_call(this);
+
+ var method = Marshal.PtrToStringAnsi(Native.grpcsharp_request_call_context_method(this));
+ var host = Marshal.PtrToStringAnsi(Native.grpcsharp_request_call_context_host(this));
+ var deadline = Native.grpcsharp_request_call_context_deadline(this);
+
+ IntPtr metadataArrayPtr = Native.grpcsharp_request_call_context_request_metadata(this);
+ var metadata = MetadataArraySafeHandle.ReadMetadataFromPtrUnsafe(metadataArrayPtr);
+
+ return new ServerRpcNew(server, call, method, host, deadline, metadata);
+ }
+
+ protected override bool ReleaseHandle()
+ {
+ Native.grpcsharp_request_call_context_destroy(handle);
+ return true;
+ }
+ }
+}
diff --git a/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs b/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs
index 230faacff6..a637a54f58 100644
--- a/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs
+++ b/src/csharp/Grpc.Core/Internal/SafeHandleZeroIsInvalid.cs
@@ -45,10 +45,6 @@ namespace Grpc.Core.Internal
{
}
- public SafeHandleZeroIsInvalid(bool ownsHandle) : base(IntPtr.Zero, ownsHandle)
- {
- }
-
public override bool IsInvalid
{
get
@@ -56,11 +52,5 @@ namespace Grpc.Core.Internal
return handle == IntPtr.Zero;
}
}
-
- protected override bool ReleaseHandle()
- {
- // handle is not owned.
- return true;
- }
}
}
diff --git a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
index 6a2f520163..600811ddd6 100644
--- a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
@@ -76,7 +76,7 @@ namespace Grpc.Core.Internal
Status status;
Tuple<TResponse,WriteFlags> responseTuple = null;
- var context = HandlerUtils.NewContext(newRpc, asyncCall.Peer, responseStream, asyncCall.CancellationToken);
+ var context = HandlerUtils.NewContext(newRpc, responseStream, asyncCall.CancellationToken);
try
{
GrpcPreconditions.CheckArgument(await requestStream.MoveNext().ConfigureAwait(false));
@@ -134,7 +134,7 @@ namespace Grpc.Core.Internal
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
Status status;
- var context = HandlerUtils.NewContext(newRpc, asyncCall.Peer, responseStream, asyncCall.CancellationToken);
+ var context = HandlerUtils.NewContext(newRpc, responseStream, asyncCall.CancellationToken);
try
{
GrpcPreconditions.CheckArgument(await requestStream.MoveNext().ConfigureAwait(false));
@@ -193,7 +193,7 @@ namespace Grpc.Core.Internal
Status status;
Tuple<TResponse,WriteFlags> responseTuple = null;
- var context = HandlerUtils.NewContext(newRpc, asyncCall.Peer, responseStream, asyncCall.CancellationToken);
+ var context = HandlerUtils.NewContext(newRpc, responseStream, asyncCall.CancellationToken);
try
{
var response = await handler(requestStream, context).ConfigureAwait(false);
@@ -250,7 +250,7 @@ namespace Grpc.Core.Internal
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
Status status;
- var context = HandlerUtils.NewContext(newRpc, asyncCall.Peer, responseStream, asyncCall.CancellationToken);
+ var context = HandlerUtils.NewContext(newRpc, responseStream, asyncCall.CancellationToken);
try
{
await handler(requestStream, responseStream, context).ConfigureAwait(false);
@@ -277,20 +277,31 @@ namespace Grpc.Core.Internal
}
}
- internal class NoSuchMethodCallHandler : IServerCallHandler
+ internal class UnimplementedMethodCallHandler : IServerCallHandler
{
- public static readonly NoSuchMethodCallHandler Instance = new NoSuchMethodCallHandler();
+ public static readonly UnimplementedMethodCallHandler Instance = new UnimplementedMethodCallHandler();
- public async Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
+ DuplexStreamingServerCallHandler<byte[], byte[]> callHandlerImpl;
+
+ public UnimplementedMethodCallHandler()
{
- // We don't care about the payload type here.
- var asyncCall = new AsyncCallServer<byte[], byte[]>(
- (payload) => payload, (payload) => payload, newRpc.Server);
-
- asyncCall.Initialize(newRpc.Call, cq);
- var finishedTask = asyncCall.ServerSideCallAsync();
- await asyncCall.SendStatusFromServerAsync(new Status(StatusCode.Unimplemented, ""), Metadata.Empty, null).ConfigureAwait(false);
- await finishedTask.ConfigureAwait(false);
+ var marshaller = new Marshaller<byte[]>((payload) => payload, (payload) => payload);
+ var method = new Method<byte[], byte[]>(MethodType.DuplexStreaming, "", "", marshaller, marshaller);
+ this.callHandlerImpl = new DuplexStreamingServerCallHandler<byte[], byte[]>(method, new DuplexStreamingServerMethod<byte[], byte[]>(UnimplementedMethod));
+ }
+
+ /// <summary>
+ /// Handler used for unimplemented method.
+ /// </summary>
+ private Task UnimplementedMethod(IAsyncStreamReader<byte[]> requestStream, IServerStreamWriter<byte[]> responseStream, ServerCallContext ctx)
+ {
+ ctx.Status = new Status(StatusCode.Unimplemented, "");
+ return TaskUtils.CompletedTask;
+ }
+
+ public Task HandleCall(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
+ {
+ return callHandlerImpl.HandleCall(newRpc, cq);
}
}
@@ -313,13 +324,13 @@ namespace Grpc.Core.Internal
return writeOptions != null ? writeOptions.Flags : default(WriteFlags);
}
- public static ServerCallContext NewContext<TRequest, TResponse>(ServerRpcNew newRpc, string peer, ServerResponseStream<TRequest, TResponse> serverResponseStream, CancellationToken cancellationToken)
+ public static ServerCallContext NewContext<TRequest, TResponse>(ServerRpcNew newRpc, ServerResponseStream<TRequest, TResponse> serverResponseStream, CancellationToken cancellationToken)
where TRequest : class
where TResponse : class
{
DateTime realtimeDeadline = newRpc.Deadline.ToClockType(ClockType.Realtime).ToDateTime();
- return new ServerCallContext(newRpc.Call, newRpc.Method, newRpc.Host, peer, realtimeDeadline,
+ return new ServerCallContext(newRpc.Call, newRpc.Method, newRpc.Host, realtimeDeadline,
newRpc.RequestMetadata, cancellationToken, serverResponseStream.WriteResponseHeadersAsync, serverResponseStream);
}
}
diff --git a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
index 014a8db78f..7d7b838616 100644
--- a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
@@ -85,12 +85,12 @@ namespace Grpc.Core.Internal
}
}
- public void RequestCall(BatchCompletionDelegate callback, CompletionQueueSafeHandle completionQueue)
+ public void RequestCall(RequestCallCompletionDelegate callback, CompletionQueueSafeHandle completionQueue)
{
using (completionQueue.NewScope())
{
- var ctx = BatchContextSafeHandle.Create();
- completionQueue.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
+ var ctx = RequestCallContextSafeHandle.Create();
+ completionQueue.CompletionRegistry.RegisterRequestCallCompletion(ctx, callback);
Native.grpcsharp_server_request_call(this, completionQueue, ctx).CheckOk();
}
}
diff --git a/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs b/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs
index 31e1402849..1553bdd687 100644
--- a/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs
+++ b/src/csharp/Grpc.Core/Internal/UnmanagedLibrary.cs
@@ -134,7 +134,11 @@ namespace Grpc.Core.Internal
{
throw new MissingMethodException(string.Format("The native method \"{0}\" does not exist", methodName));
}
- return Marshal.GetDelegateForFunctionPointer(ptr, typeof(T)) as T;
+#if NETSTANDARD1_5
+ return Marshal.GetDelegateForFunctionPointer<T>(ptr); // non-generic version is obsolete
+#else
+ return Marshal.GetDelegateForFunctionPointer(ptr, typeof(T)) as T; // generic version not available in .NET45
+#endif
}
/// <summary>
diff --git a/src/csharp/Grpc.Core/Metadata.cs b/src/csharp/Grpc.Core/Metadata.cs
index 915bec146c..6fc715d6ee 100644
--- a/src/csharp/Grpc.Core/Metadata.cs
+++ b/src/csharp/Grpc.Core/Metadata.cs
@@ -95,11 +95,18 @@ namespace Grpc.Core
#region IList members
+
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public int IndexOf(Metadata.Entry item)
{
return entries.IndexOf(item);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void Insert(int index, Metadata.Entry item)
{
GrpcPreconditions.CheckNotNull(item);
@@ -107,12 +114,18 @@ namespace Grpc.Core
entries.Insert(index, item);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void RemoveAt(int index)
{
CheckWriteable();
entries.RemoveAt(index);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public Metadata.Entry this[int index]
{
get
@@ -128,6 +141,9 @@ namespace Grpc.Core
}
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void Add(Metadata.Entry item)
{
GrpcPreconditions.CheckNotNull(item);
@@ -135,48 +151,75 @@ namespace Grpc.Core
entries.Add(item);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void Add(string key, string value)
{
Add(new Entry(key, value));
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void Add(string key, byte[] valueBytes)
{
Add(new Entry(key, valueBytes));
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void Clear()
{
CheckWriteable();
entries.Clear();
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public bool Contains(Metadata.Entry item)
{
return entries.Contains(item);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public void CopyTo(Metadata.Entry[] array, int arrayIndex)
{
entries.CopyTo(array, arrayIndex);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public int Count
{
get { return entries.Count; }
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public bool IsReadOnly
{
get { return readOnly; }
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public bool Remove(Metadata.Entry item)
{
CheckWriteable();
return entries.Remove(item);
}
+ /// <summary>
+ /// <see cref="T:IList`1"/>
+ /// </summary>
public IEnumerator<Metadata.Entry> GetEnumerator()
{
return entries.GetEnumerator();
@@ -221,7 +264,7 @@ namespace Grpc.Core
public Entry(string key, byte[] valueBytes)
{
this.key = NormalizeKey(key);
- GrpcPreconditions.CheckArgument(this.key.EndsWith(BinaryHeaderSuffix),
+ GrpcPreconditions.CheckArgument(HasBinaryHeaderSuffix(this.key),
"Key for binary valued metadata entry needs to have suffix indicating binary value.");
this.value = null;
GrpcPreconditions.CheckNotNull(valueBytes, "valueBytes");
@@ -237,7 +280,7 @@ namespace Grpc.Core
public Entry(string key, string value)
{
this.key = NormalizeKey(key);
- GrpcPreconditions.CheckArgument(!this.key.EndsWith(BinaryHeaderSuffix),
+ GrpcPreconditions.CheckArgument(!HasBinaryHeaderSuffix(this.key),
"Key for ASCII valued metadata entry cannot have suffix indicating binary value.");
this.value = GrpcPreconditions.CheckNotNull(value, "value");
this.valueBytes = null;
@@ -324,7 +367,7 @@ namespace Grpc.Core
/// </summary>
internal static Entry CreateUnsafe(string key, byte[] valueBytes)
{
- if (key.EndsWith(BinaryHeaderSuffix))
+ if (HasBinaryHeaderSuffix(key))
{
return new Entry(key, null, valueBytes);
}
@@ -338,6 +381,27 @@ namespace Grpc.Core
"Metadata entry key not valid. Keys can only contain lowercase alphanumeric characters, underscores and hyphens.");
return normalized;
}
+
+ /// <summary>
+ /// Returns <c>true</c> if the key has "-bin" binary header suffix.
+ /// </summary>
+ private static bool HasBinaryHeaderSuffix(string key)
+ {
+ // We don't use just string.EndsWith because its implementation is extremely slow
+ // on CoreCLR and we've seen significant differences in gRPC benchmarks caused by it.
+ // See https://github.com/dotnet/coreclr/issues/5612
+
+ int len = key.Length;
+ if (len >= 4 &&
+ key[len - 4] == '-' &&
+ key[len - 3] == 'b' &&
+ key[len - 2] == 'i' &&
+ key[len - 1] == 'n')
+ {
+ return true;
+ }
+ return false;
+ }
}
}
}
diff --git a/src/csharp/Grpc.Core/Server.cs b/src/csharp/Grpc.Core/Server.cs
index 3b554e5e87..63c1d9cd00 100644
--- a/src/csharp/Grpc.Core/Server.cs
+++ b/src/csharp/Grpc.Core/Server.cs
@@ -47,7 +47,7 @@ namespace Grpc.Core
/// </summary>
public class Server
{
- const int InitialAllowRpcTokenCountPerCq = 10;
+ const int DefaultRequestCallTokensPerCq = 2000;
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<Server>();
readonly AtomicCounter activeCallCounter = new AtomicCounter();
@@ -66,7 +66,7 @@ namespace Grpc.Core
bool startRequested;
volatile bool shutdownRequested;
-
+ int requestCallTokensPerCq = DefaultRequestCallTokensPerCq;
/// <summary>
/// Creates a new server.
@@ -133,6 +133,27 @@ namespace Grpc.Core
}
/// <summary>
+ /// Experimental API. Might anytime change without prior notice.
+ /// Number or calls requested via grpc_server_request_call at any given time for each completion queue.
+ /// </summary>
+ public int RequestCallTokensPerCompletionQueue
+ {
+ get
+ {
+ return requestCallTokensPerCq;
+ }
+ set
+ {
+ lock (myLock)
+ {
+ GrpcPreconditions.CheckState(!startRequested);
+ GrpcPreconditions.CheckArgument(value > 0);
+ requestCallTokensPerCq = value;
+ }
+ }
+ }
+
+ /// <summary>
/// Starts the server.
/// </summary>
public void Start()
@@ -145,9 +166,7 @@ namespace Grpc.Core
handle.Start();
- // Starting with more than one AllowOneRpc tokens can significantly increase
- // unary RPC throughput.
- for (int i = 0; i < InitialAllowRpcTokenCountPerCq; i++)
+ for (int i = 0; i < requestCallTokensPerCq; i++)
{
foreach (var cq in environment.CompletionQueues)
{
@@ -310,14 +329,14 @@ namespace Grpc.Core
/// <summary>
/// Selects corresponding handler for given call and handles the call.
/// </summary>
- private async Task HandleCallAsync(ServerRpcNew newRpc, CompletionQueueSafeHandle cq)
+ private async Task HandleCallAsync(ServerRpcNew newRpc, CompletionQueueSafeHandle cq, Action continuation)
{
try
{
IServerCallHandler callHandler;
if (!callHandlers.TryGetValue(newRpc.Method, out callHandler))
{
- callHandler = NoSuchMethodCallHandler.Instance;
+ callHandler = UnimplementedMethodCallHandler.Instance;
}
await callHandler.HandleCall(newRpc, cq).ConfigureAwait(false);
}
@@ -325,25 +344,40 @@ namespace Grpc.Core
{
Logger.Warning(e, "Exception while handling RPC.");
}
+ finally
+ {
+ continuation();
+ }
}
/// <summary>
/// Handles the native callback.
/// </summary>
- private void HandleNewServerRpc(bool success, BatchContextSafeHandle ctx, CompletionQueueSafeHandle cq)
+ private void HandleNewServerRpc(bool success, RequestCallContextSafeHandle ctx, CompletionQueueSafeHandle cq)
{
- Task.Run(() => AllowOneRpc(cq));
-
+ bool nextRpcRequested = false;
if (success)
{
- ServerRpcNew newRpc = ctx.GetServerRpcNew(this);
+ var newRpc = ctx.GetServerRpcNew(this);
// after server shutdown, the callback returns with null call
if (!newRpc.Call.IsInvalid)
{
- HandleCallAsync(newRpc, cq); // we don't need to await.
+ nextRpcRequested = true;
+
+ // Start asynchronous handler for the call.
+ // Don't await, the continuations will run on gRPC thread pool once triggered
+ // by cq.Next().
+ #pragma warning disable 4014
+ HandleCallAsync(newRpc, cq, () => AllowOneRpc(cq));
+ #pragma warning restore 4014
}
}
+
+ if (!nextRpcRequested)
+ {
+ AllowOneRpc(cq);
+ }
}
/// <summary>
diff --git a/src/csharp/Grpc.Core/ServerCallContext.cs b/src/csharp/Grpc.Core/ServerCallContext.cs
index 09a6b882a6..8f28fbc045 100644
--- a/src/csharp/Grpc.Core/ServerCallContext.cs
+++ b/src/csharp/Grpc.Core/ServerCallContext.cs
@@ -48,7 +48,6 @@ namespace Grpc.Core
private readonly CallSafeHandle callHandle;
private readonly string method;
private readonly string host;
- private readonly string peer;
private readonly DateTime deadline;
private readonly Metadata requestHeaders;
private readonly CancellationToken cancellationToken;
@@ -58,13 +57,12 @@ namespace Grpc.Core
private Func<Metadata, Task> writeHeadersFunc;
private IHasWriteOptions writeOptionsHolder;
- internal ServerCallContext(CallSafeHandle callHandle, string method, string host, string peer, DateTime deadline, Metadata requestHeaders, CancellationToken cancellationToken,
+ internal ServerCallContext(CallSafeHandle callHandle, string method, string host, DateTime deadline, Metadata requestHeaders, CancellationToken cancellationToken,
Func<Metadata, Task> writeHeadersFunc, IHasWriteOptions writeOptionsHolder)
{
this.callHandle = callHandle;
this.method = method;
this.host = host;
- this.peer = peer;
this.deadline = deadline;
this.requestHeaders = requestHeaders;
this.cancellationToken = cancellationToken;
@@ -115,7 +113,10 @@ namespace Grpc.Core
{
get
{
- return this.peer;
+ // Getting the peer lazily is fine as the native call is guaranteed
+ // not to be disposed before user-supplied server side handler returns.
+ // Most users won't need to read this field anyway.
+ return this.callHandle.GetPeer();
}
}
diff --git a/src/csharp/Grpc.Core/Utils/TaskUtils.cs b/src/csharp/Grpc.Core/Utils/TaskUtils.cs
new file mode 100644
index 0000000000..2cf1144143
--- /dev/null
+++ b/src/csharp/Grpc.Core/Utils/TaskUtils.cs
@@ -0,0 +1,59 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Threading.Tasks;
+
+namespace Grpc.Core.Utils
+{
+ /// <summary>
+ /// Utility methods for task parallel library.
+ /// </summary>
+ public static class TaskUtils
+ {
+ /// <summary>
+ /// Framework independent equivalent of <c>Task.CompletedTask</c>.
+ /// </summary>
+ public static Task CompletedTask
+ {
+ get
+ {
+#if NETSTANDARD1_5
+ return Task.CompletedTask;
+#else
+ return Task.FromResult<object>(null); // for .NET45, emulate the functionality
+#endif
+ }
+ }
+ }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json b/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json
index 1b900c8af3..fe200f8d44 100644
--- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/project.json
@@ -67,5 +67,10 @@
}
}
}
+ },
+ "runtimeOptions": {
+ "configProperties": {
+ "System.GC.Server": true
+ }
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/InterarrivalTimers.cs b/src/csharp/Grpc.IntegrationTesting/InterarrivalTimers.cs
index 6492d34890..8bea083bc2 100644
--- a/src/csharp/Grpc.IntegrationTesting/InterarrivalTimers.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InterarrivalTimers.cs
@@ -69,7 +69,7 @@ namespace Grpc.IntegrationTesting
public Task WaitForNextAsync()
{
- return Task.FromResult<object>(null);
+ return TaskUtils.CompletedTask;
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
index 79fd18b6d5..5ba83b143e 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs
@@ -195,8 +195,11 @@ namespace Grpc.IntegrationTesting
case "status_code_and_message":
await RunStatusCodeAndMessageAsync(client);
break;
+ case "unimplemented_service":
+ RunUnimplementedService(new UnimplementedService.UnimplementedServiceClient(channel));
+ break;
case "unimplemented_method":
- RunUnimplementedMethod(new UnimplementedService.UnimplementedServiceClient(channel));
+ RunUnimplementedMethod(client);
break;
case "client_compressed_unary":
RunClientCompressedUnary(client);
@@ -520,12 +523,12 @@ namespace Grpc.IntegrationTesting
};
var call = client.FullDuplexCall(headers: CreateTestMetadata());
- var responseHeaders = await call.ResponseHeadersAsync;
await call.RequestStream.WriteAsync(request);
await call.RequestStream.CompleteAsync();
await call.ResponseStream.ToListAsync();
+ var responseHeaders = await call.ResponseHeadersAsync;
var responseTrailers = call.GetTrailers();
Assert.AreEqual("test_initial_metadata_value", responseHeaders.First((entry) => entry.Key == "x-grpc-test-echo-initial").Value);
@@ -577,13 +580,21 @@ namespace Grpc.IntegrationTesting
Console.WriteLine("Passed!");
}
- public static void RunUnimplementedMethod(UnimplementedService.UnimplementedServiceClient client)
+ public static void RunUnimplementedService(UnimplementedService.UnimplementedServiceClient client)
+ {
+ Console.WriteLine("running unimplemented_service");
+ var e = Assert.Throws<RpcException>(() => client.UnimplementedCall(new Empty()));
+
+ Assert.AreEqual(StatusCode.Unimplemented, e.Status.StatusCode);
+ Console.WriteLine("Passed!");
+ }
+
+ public static void RunUnimplementedMethod(TestService.TestServiceClient client)
{
Console.WriteLine("running unimplemented_method");
var e = Assert.Throws<RpcException>(() => client.UnimplementedCall(new Empty()));
Assert.AreEqual(StatusCode.Unimplemented, e.Status.StatusCode);
- Assert.AreEqual("", e.Status.Detail);
Console.WriteLine("Passed!");
}
diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs b/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
index f907f630da..4960a53f92 100644
--- a/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
@@ -146,9 +146,15 @@ namespace Grpc.IntegrationTesting
}
[Test]
+ public void UnimplementedService()
+ {
+ InteropClient.RunUnimplementedService(new UnimplementedService.UnimplementedServiceClient(channel));
+ }
+
+ [Test]
public void UnimplementedMethod()
{
- InteropClient.RunUnimplementedMethod(new UnimplementedService.UnimplementedServiceClient(channel));
+ InteropClient.RunUnimplementedMethod(client);
}
}
}
diff --git a/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs b/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs
index 865556c242..62a7347d42 100644
--- a/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs
+++ b/src/csharp/Grpc.IntegrationTesting/QpsWorker.cs
@@ -76,6 +76,11 @@ namespace Grpc.IntegrationTesting
private async Task RunAsync()
{
+ // (ThreadPoolSize == ProcessorCount) gives best throughput in benchmarks
+ // and doesn't seem to harm performance even when server and client
+ // are running on the same machine.
+ GrpcEnvironment.SetThreadPoolSize(Environment.ProcessorCount);
+
string host = "0.0.0.0";
int port = options.DriverPort;
diff --git a/src/csharp/Grpc.IntegrationTesting/Test.cs b/src/csharp/Grpc.IntegrationTesting/Test.cs
index 88c2b8a921..d2fa9f8013 100644
--- a/src/csharp/Grpc.IntegrationTesting/Test.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Test.cs
@@ -24,25 +24,28 @@ namespace Grpc.Testing {
string.Concat(
"CiFzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3Rlc3QucHJvdG8SDGdycGMudGVz",
"dGluZxoic3JjL3Byb3RvL2dycGMvdGVzdGluZy9lbXB0eS5wcm90bxolc3Jj",
- "L3Byb3RvL2dycGMvdGVzdGluZy9tZXNzYWdlcy5wcm90bzK7BAoLVGVzdFNl",
+ "L3Byb3RvL2dycGMvdGVzdGluZy9tZXNzYWdlcy5wcm90bzLLBQoLVGVzdFNl",
"cnZpY2USNQoJRW1wdHlDYWxsEhMuZ3JwYy50ZXN0aW5nLkVtcHR5GhMuZ3Jw",
"Yy50ZXN0aW5nLkVtcHR5EkYKCVVuYXJ5Q2FsbBIbLmdycGMudGVzdGluZy5T",
- "aW1wbGVSZXF1ZXN0GhwuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlc3BvbnNlEmwK",
- "E1N0cmVhbWluZ091dHB1dENhbGwSKC5ncnBjLnRlc3RpbmcuU3RyZWFtaW5n",
- "T3V0cHV0Q2FsbFJlcXVlc3QaKS5ncnBjLnRlc3RpbmcuU3RyZWFtaW5nT3V0",
- "cHV0Q2FsbFJlc3BvbnNlMAESaQoSU3RyZWFtaW5nSW5wdXRDYWxsEicuZ3Jw",
- "Yy50ZXN0aW5nLlN0cmVhbWluZ0lucHV0Q2FsbFJlcXVlc3QaKC5ncnBjLnRl",
- "c3RpbmcuU3RyZWFtaW5nSW5wdXRDYWxsUmVzcG9uc2UoARJpCg5GdWxsRHVw",
- "bGV4Q2FsbBIoLmdycGMudGVzdGluZy5TdHJlYW1pbmdPdXRwdXRDYWxsUmVx",
- "dWVzdBopLmdycGMudGVzdGluZy5TdHJlYW1pbmdPdXRwdXRDYWxsUmVzcG9u",
- "c2UoATABEmkKDkhhbGZEdXBsZXhDYWxsEiguZ3JwYy50ZXN0aW5nLlN0cmVh",
- "bWluZ091dHB1dENhbGxSZXF1ZXN0GikuZ3JwYy50ZXN0aW5nLlN0cmVhbWlu",
- "Z091dHB1dENhbGxSZXNwb25zZSgBMAEyVQoUVW5pbXBsZW1lbnRlZFNlcnZp",
- "Y2USPQoRVW5pbXBsZW1lbnRlZENhbGwSEy5ncnBjLnRlc3RpbmcuRW1wdHka",
- "Ey5ncnBjLnRlc3RpbmcuRW1wdHkyiQEKEFJlY29ubmVjdFNlcnZpY2USOwoF",
- "U3RhcnQSHS5ncnBjLnRlc3RpbmcuUmVjb25uZWN0UGFyYW1zGhMuZ3JwYy50",
- "ZXN0aW5nLkVtcHR5EjgKBFN0b3ASEy5ncnBjLnRlc3RpbmcuRW1wdHkaGy5n",
- "cnBjLnRlc3RpbmcuUmVjb25uZWN0SW5mb2IGcHJvdG8z"));
+ "aW1wbGVSZXF1ZXN0GhwuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlc3BvbnNlEk8K",
+ "EkNhY2hlYWJsZVVuYXJ5Q2FsbBIbLmdycGMudGVzdGluZy5TaW1wbGVSZXF1",
+ "ZXN0GhwuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlc3BvbnNlEmwKE1N0cmVhbWlu",
+ "Z091dHB1dENhbGwSKC5ncnBjLnRlc3RpbmcuU3RyZWFtaW5nT3V0cHV0Q2Fs",
+ "bFJlcXVlc3QaKS5ncnBjLnRlc3RpbmcuU3RyZWFtaW5nT3V0cHV0Q2FsbFJl",
+ "c3BvbnNlMAESaQoSU3RyZWFtaW5nSW5wdXRDYWxsEicuZ3JwYy50ZXN0aW5n",
+ "LlN0cmVhbWluZ0lucHV0Q2FsbFJlcXVlc3QaKC5ncnBjLnRlc3RpbmcuU3Ry",
+ "ZWFtaW5nSW5wdXRDYWxsUmVzcG9uc2UoARJpCg5GdWxsRHVwbGV4Q2FsbBIo",
+ "LmdycGMudGVzdGluZy5TdHJlYW1pbmdPdXRwdXRDYWxsUmVxdWVzdBopLmdy",
+ "cGMudGVzdGluZy5TdHJlYW1pbmdPdXRwdXRDYWxsUmVzcG9uc2UoATABEmkK",
+ "DkhhbGZEdXBsZXhDYWxsEiguZ3JwYy50ZXN0aW5nLlN0cmVhbWluZ091dHB1",
+ "dENhbGxSZXF1ZXN0GikuZ3JwYy50ZXN0aW5nLlN0cmVhbWluZ091dHB1dENh",
+ "bGxSZXNwb25zZSgBMAESPQoRVW5pbXBsZW1lbnRlZENhbGwSEy5ncnBjLnRl",
+ "c3RpbmcuRW1wdHkaEy5ncnBjLnRlc3RpbmcuRW1wdHkyVQoUVW5pbXBsZW1l",
+ "bnRlZFNlcnZpY2USPQoRVW5pbXBsZW1lbnRlZENhbGwSEy5ncnBjLnRlc3Rp",
+ "bmcuRW1wdHkaEy5ncnBjLnRlc3RpbmcuRW1wdHkyiQEKEFJlY29ubmVjdFNl",
+ "cnZpY2USOwoFU3RhcnQSHS5ncnBjLnRlc3RpbmcuUmVjb25uZWN0UGFyYW1z",
+ "GhMuZ3JwYy50ZXN0aW5nLkVtcHR5EjgKBFN0b3ASEy5ncnBjLnRlc3Rpbmcu",
+ "RW1wdHkaGy5ncnBjLnRlc3RpbmcuUmVjb25uZWN0SW5mb2IGcHJvdG8z"));
descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
new pbr::FileDescriptor[] { global::Grpc.Testing.EmptyReflection.Descriptor, global::Grpc.Testing.MessagesReflection.Descriptor, },
new pbr::GeneratedClrTypeInfo(null, null));
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index 61f2ed4015..8d649bf5c5 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -71,6 +71,13 @@ namespace Grpc.Testing {
__Marshaller_SimpleRequest,
__Marshaller_SimpleResponse);
+ static readonly Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_CacheableUnaryCall = new Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ MethodType.Unary,
+ __ServiceName,
+ "CacheableUnaryCall",
+ __Marshaller_SimpleRequest,
+ __Marshaller_SimpleResponse);
+
static readonly Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_StreamingOutputCall = new Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
MethodType.ServerStreaming,
__ServiceName,
@@ -99,6 +106,13 @@ namespace Grpc.Testing {
__Marshaller_StreamingOutputCallRequest,
__Marshaller_StreamingOutputCallResponse);
+ static readonly Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_UnimplementedCall = new Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
+ MethodType.Unary,
+ __ServiceName,
+ "UnimplementedCall",
+ __Marshaller_Empty,
+ __Marshaller_Empty);
+
/// <summary>Service descriptor</summary>
public static global::Google.Protobuf.Reflection.ServiceDescriptor Descriptor
{
@@ -125,6 +139,16 @@ namespace Grpc.Testing {
}
/// <summary>
+ /// One request followed by one response. Response has cache control
+ /// headers set such that a caching HTTP proxy (such as GFE) can
+ /// satisfy subsequent requests.
+ /// </summary>
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context)
+ {
+ throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ }
+
+ /// <summary>
/// One request followed by a sequence of responses (streamed download).
/// The server returns the payload with client desired type and sizes.
/// </summary>
@@ -163,6 +187,15 @@ namespace Grpc.Testing {
throw new RpcException(new Status(StatusCode.Unimplemented, ""));
}
+ /// <summary>
+ /// The test server will not implement this method. It will be used
+ /// to test the behavior when clients call unimplemented methods.
+ /// </summary>
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context)
+ {
+ throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ }
+
}
/// <summary>Client for TestService</summary>
@@ -245,6 +278,42 @@ namespace Grpc.Testing {
return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request);
}
/// <summary>
+ /// One request followed by one response. Response has cache control
+ /// headers set such that a caching HTTP proxy (such as GFE) can
+ /// satisfy subsequent requests.
+ /// </summary>
+ public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return CacheableUnaryCall(request, new CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// One request followed by one response. Response has cache control
+ /// headers set such that a caching HTTP proxy (such as GFE) can
+ /// satisfy subsequent requests.
+ /// </summary>
+ public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ {
+ return CallInvoker.BlockingUnaryCall(__Method_CacheableUnaryCall, null, options, request);
+ }
+ /// <summary>
+ /// One request followed by one response. Response has cache control
+ /// headers set such that a caching HTTP proxy (such as GFE) can
+ /// satisfy subsequent requests.
+ /// </summary>
+ public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return CacheableUnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// One request followed by one response. Response has cache control
+ /// headers set such that a caching HTTP proxy (such as GFE) can
+ /// satisfy subsequent requests.
+ /// </summary>
+ public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ {
+ return CallInvoker.AsyncUnaryCall(__Method_CacheableUnaryCall, null, options, request);
+ }
+ /// <summary>
/// One request followed by a sequence of responses (streamed download).
/// The server returns the payload with client desired type and sizes.
/// </summary>
@@ -314,6 +383,38 @@ namespace Grpc.Testing {
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_HalfDuplexCall, null, options);
}
+ /// <summary>
+ /// The test server will not implement this method. It will be used
+ /// to test the behavior when clients call unimplemented methods.
+ /// </summary>
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// The test server will not implement this method. It will be used
+ /// to test the behavior when clients call unimplemented methods.
+ /// </summary>
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options)
+ {
+ return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request);
+ }
+ /// <summary>
+ /// The test server will not implement this method. It will be used
+ /// to test the behavior when clients call unimplemented methods.
+ /// </summary>
+ public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ {
+ return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ }
+ /// <summary>
+ /// The test server will not implement this method. It will be used
+ /// to test the behavior when clients call unimplemented methods.
+ /// </summary>
+ public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options)
+ {
+ return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request);
+ }
/// <summary>Creates a new instance of client from given <c>ClientBaseConfiguration</c>.</summary>
protected override TestServiceClient NewInstance(ClientBaseConfiguration configuration)
{
@@ -327,10 +428,12 @@ namespace Grpc.Testing {
return ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_EmptyCall, serviceImpl.EmptyCall)
.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
+ .AddMethod(__Method_CacheableUnaryCall, serviceImpl.CacheableUnaryCall)
.AddMethod(__Method_StreamingOutputCall, serviceImpl.StreamingOutputCall)
.AddMethod(__Method_StreamingInputCall, serviceImpl.StreamingInputCall)
.AddMethod(__Method_FullDuplexCall, serviceImpl.FullDuplexCall)
- .AddMethod(__Method_HalfDuplexCall, serviceImpl.HalfDuplexCall).Build();
+ .AddMethod(__Method_HalfDuplexCall, serviceImpl.HalfDuplexCall)
+ .AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall).Build();
}
}
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index 068bf709b8..9a5d7869d3 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -84,11 +84,6 @@ typedef struct grpcsharp_batch_context {
size_t status_details_capacity;
} recv_status_on_client;
int recv_close_on_server_cancelled;
- struct {
- grpc_call *call;
- grpc_call_details call_details;
- grpc_metadata_array request_metadata;
- } server_rpc_new;
} grpcsharp_batch_context;
GPR_EXPORT grpcsharp_batch_context *GPR_CALLTYPE grpcsharp_batch_context_create() {
@@ -97,6 +92,18 @@ GPR_EXPORT grpcsharp_batch_context *GPR_CALLTYPE grpcsharp_batch_context_create(
return ctx;
}
+typedef struct {
+ grpc_call *call;
+ grpc_call_details call_details;
+ grpc_metadata_array request_metadata;
+} grpcsharp_request_call_context;
+
+GPR_EXPORT grpcsharp_request_call_context *GPR_CALLTYPE grpcsharp_request_call_context_create() {
+ grpcsharp_request_call_context *ctx = gpr_malloc(sizeof(grpcsharp_request_call_context));
+ memset(ctx, 0, sizeof(grpcsharp_request_call_context));
+ return ctx;
+}
+
/*
* Destroys array->metadata.
* The array pointer itself is not freed.
@@ -230,13 +237,20 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_destroy(grpcsharp_batch_con
&(ctx->recv_status_on_client.trailing_metadata));
gpr_free((void *)ctx->recv_status_on_client.status_details);
+ gpr_free(ctx);
+}
+
+GPR_EXPORT void GPR_CALLTYPE grpcsharp_request_call_context_destroy(grpcsharp_request_call_context *ctx) {
+ if (!ctx) {
+ return;
+ }
/* NOTE: ctx->server_rpc_new.call is not destroyed because callback handler is
supposed
to take its ownership. */
- grpc_call_details_destroy(&(ctx->server_rpc_new.call_details));
+ grpc_call_details_destroy(&(ctx->call_details));
grpcsharp_metadata_array_destroy_metadata_only(
- &(ctx->server_rpc_new.request_metadata));
+ &(ctx->request_metadata));
gpr_free(ctx);
}
@@ -303,32 +317,32 @@ grpcsharp_batch_context_recv_status_on_client_trailing_metadata(
return &(ctx->recv_status_on_client.trailing_metadata);
}
-GPR_EXPORT grpc_call *GPR_CALLTYPE grpcsharp_batch_context_server_rpc_new_call(
- const grpcsharp_batch_context *ctx) {
- return ctx->server_rpc_new.call;
+GPR_EXPORT grpc_call *GPR_CALLTYPE grpcsharp_request_call_context_call(
+ const grpcsharp_request_call_context *ctx) {
+ return ctx->call;
}
GPR_EXPORT const char *GPR_CALLTYPE
-grpcsharp_batch_context_server_rpc_new_method(
- const grpcsharp_batch_context *ctx) {
- return ctx->server_rpc_new.call_details.method;
+grpcsharp_request_call_context_method(
+ const grpcsharp_request_call_context *ctx) {
+ return ctx->call_details.method;
}
-GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_batch_context_server_rpc_new_host(
- const grpcsharp_batch_context *ctx) {
- return ctx->server_rpc_new.call_details.host;
+GPR_EXPORT const char *GPR_CALLTYPE grpcsharp_request_call_context_host(
+ const grpcsharp_request_call_context *ctx) {
+ return ctx->call_details.host;
}
GPR_EXPORT gpr_timespec GPR_CALLTYPE
-grpcsharp_batch_context_server_rpc_new_deadline(
- const grpcsharp_batch_context *ctx) {
- return ctx->server_rpc_new.call_details.deadline;
+grpcsharp_request_call_context_deadline(
+ const grpcsharp_request_call_context *ctx) {
+ return ctx->call_details.deadline;
}
GPR_EXPORT const grpc_metadata_array *GPR_CALLTYPE
-grpcsharp_batch_context_server_rpc_new_request_metadata(
- const grpcsharp_batch_context *ctx) {
- return &(ctx->server_rpc_new.request_metadata);
+grpcsharp_request_call_context_request_metadata(
+ const grpcsharp_request_call_context *ctx) {
+ return &(ctx->request_metadata);
}
GPR_EXPORT int32_t GPR_CALLTYPE
@@ -853,10 +867,10 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_destroy(grpc_server *server) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_server_request_call(grpc_server *server, grpc_completion_queue *cq,
- grpcsharp_batch_context *ctx) {
+ grpcsharp_request_call_context *ctx) {
return grpc_server_request_call(
- server, &(ctx->server_rpc_new.call), &(ctx->server_rpc_new.call_details),
- &(ctx->server_rpc_new.request_metadata), cq, cq, ctx);
+ server, &(ctx->call), &(ctx->call_details),
+ &(ctx->request_metadata), cq, cq, ctx);
}
/* Security */
diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc
index a3f678f32c..ad7d0ec8c8 100644
--- a/src/node/ext/byte_buffer.cc
+++ b/src/node/ext/byte_buffer.cc
@@ -44,8 +44,8 @@
namespace grpc {
namespace node {
+using Nan::MaybeLocal;
-using v8::Context;
using v8::Function;
using v8::Local;
using v8::Object;
@@ -89,15 +89,19 @@ Local<Value> ByteBufferToBuffer(grpc_byte_buffer *buffer) {
Local<Value> MakeFastBuffer(Local<Value> slowBuffer) {
Nan::EscapableHandleScope scope;
Local<Object> globalObj = Nan::GetCurrentContext()->Global();
+ MaybeLocal<Value> constructorValue = Nan::Get(
+ globalObj, Nan::New("Buffer").ToLocalChecked());
Local<Function> bufferConstructor = Local<Function>::Cast(
- globalObj->Get(Nan::New("Buffer").ToLocalChecked()));
- Local<Value> consArgs[3] = {
+ constructorValue.ToLocalChecked());
+ const int argc = 3;
+ Local<Value> consArgs[argc] = {
slowBuffer,
Nan::New<Number>(::node::Buffer::Length(slowBuffer)),
Nan::New<Number>(0)
};
- Local<Object> fastBuffer = bufferConstructor->NewInstance(3, consArgs);
- return scope.Escape(fastBuffer);
+ MaybeLocal<Object> fastBuffer = Nan::NewInstance(bufferConstructor,
+ argc, consArgs);
+ return scope.Escape(fastBuffer.ToLocalChecked());
}
} // namespace node
} // namespace grpc
diff --git a/src/node/ext/call.cc b/src/node/ext/call.cc
index 9f023b5883..191e763e0e 100644
--- a/src/node/ext/call.cc
+++ b/src/node/ext/call.cc
@@ -45,6 +45,7 @@
#include "byte_buffer.h"
#include "call.h"
#include "channel.h"
+#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "call_credentials.h"
#include "timeval.h"
@@ -222,6 +223,9 @@ class SendMetadataOp : public Op {
out->data.send_initial_metadata.metadata = array.metadata;
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
protected:
std::string GetTypeString() const {
return "send_metadata";
@@ -263,6 +267,9 @@ class SendMessageOp : public Op {
resources->handles.push_back(unique_ptr<PersistentValue>(handle));
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
protected:
std::string GetTypeString() const {
return "send_message";
@@ -281,6 +288,9 @@ class SendClientCloseOp : public Op {
shared_ptr<Resources> resources) {
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
protected:
std::string GetTypeString() const {
return "client_close";
@@ -341,6 +351,9 @@ class SendServerStatusOp : public Op {
out->data.send_status_from_server.status_details = **str;
return true;
}
+ bool IsFinalOp() {
+ return true;
+ }
protected:
std::string GetTypeString() const {
return "send_status";
@@ -367,6 +380,9 @@ class GetMetadataOp : public Op {
out->data.recv_initial_metadata = &recv_metadata;
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
protected:
std::string GetTypeString() const {
@@ -397,6 +413,9 @@ class ReadMessageOp : public Op {
out->data.recv_message = &recv_message;
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
protected:
std::string GetTypeString() const {
@@ -442,6 +461,9 @@ class ClientStatusOp : public Op {
ParseMetadata(&metadata_array));
return scope.Escape(status_obj);
}
+ bool IsFinalOp() {
+ return true;
+ }
protected:
std::string GetTypeString() const {
return "status";
@@ -465,6 +487,9 @@ class ServerCloseResponseOp : public Op {
out->data.recv_close_on_server.cancelled = &cancelled;
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
protected:
std::string GetTypeString() const {
@@ -476,8 +501,8 @@ class ServerCloseResponseOp : public Op {
};
tag::tag(Callback *callback, OpVec *ops,
- shared_ptr<Resources> resources) :
- callback(callback), ops(ops), resources(resources){
+ shared_ptr<Resources> resources, Call *call) :
+ callback(callback), ops(ops), resources(resources), call(call){
}
tag::~tag() {
@@ -502,16 +527,36 @@ Callback *GetTagCallback(void *tag) {
return tag_struct->callback;
}
+void CompleteTag(void *tag) {
+ struct tag *tag_struct = reinterpret_cast<struct tag *>(tag);
+ bool is_final_op = false;
+ if (tag_struct->call == NULL) {
+ return;
+ }
+ for (vector<unique_ptr<Op> >::iterator it = tag_struct->ops->begin();
+ it != tag_struct->ops->end(); ++it) {
+ Op *op_ptr = it->get();
+ if (op_ptr->IsFinalOp()) {
+ is_final_op = true;
+ }
+ }
+ tag_struct->call->CompleteBatch(is_final_op);
+}
+
void DestroyTag(void *tag) {
struct tag *tag_struct = reinterpret_cast<struct tag *>(tag);
delete tag_struct;
}
-Call::Call(grpc_call *call) : wrapped_call(call) {
+Call::Call(grpc_call *call) : wrapped_call(call),
+ pending_batches(0),
+ has_final_op_completed(false) {
}
Call::~Call() {
- grpc_call_destroy(wrapped_call);
+ if (wrapped_call != NULL) {
+ grpc_call_destroy(wrapped_call);
+ }
}
void Call::Init(Local<Object> exports) {
@@ -552,6 +597,17 @@ Local<Value> Call::WrapStruct(grpc_call *call) {
}
}
+void Call::CompleteBatch(bool is_final_op) {
+ if (is_final_op) {
+ this->has_final_op_completed = true;
+ }
+ this->pending_batches--;
+ if (this->has_final_op_completed && this->pending_batches == 0) {
+ grpc_call_destroy(this->wrapped_call);
+ this->wrapped_call = NULL;
+ }
+}
+
NAN_METHOD(Call::New) {
if (info.IsConstructCall()) {
Call *call;
@@ -602,27 +658,27 @@ NAN_METHOD(Call::New) {
Utf8String host_override(info[3]);
wrapped_call = grpc_channel_create_call(
wrapped_channel, parent_call, propagate_flags,
- CompletionQueueAsyncWorker::GetQueue(), *method,
+ GetCompletionQueue(), *method,
*host_override, MillisecondsToTimespec(deadline), NULL);
} else if (info[3]->IsUndefined() || info[3]->IsNull()) {
wrapped_call = grpc_channel_create_call(
wrapped_channel, parent_call, propagate_flags,
- CompletionQueueAsyncWorker::GetQueue(), *method,
+ GetCompletionQueue(), *method,
NULL, MillisecondsToTimespec(deadline), NULL);
} else {
return Nan::ThrowTypeError("Call's fourth argument must be a string");
}
call = new Call(wrapped_call);
- info.This()->SetHiddenValue(Nan::New("channel_").ToLocalChecked(),
- channel_object);
+ Nan::Set(info.This(), Nan::New("channel_").ToLocalChecked(),
+ channel_object);
}
call->Wrap(info.This());
info.GetReturnValue().Set(info.This());
} else {
const int argc = 4;
Local<Value> argv[argc] = {info[0], info[1], info[2], info[3]};
- MaybeLocal<Object> maybe_instance = constructor->GetFunction()->NewInstance(
- argc, argv);
+ MaybeLocal<Object> maybe_instance = Nan::NewInstance(
+ constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
// There's probably a pending exception
return;
@@ -697,11 +753,12 @@ NAN_METHOD(Call::StartBatch) {
Callback *callback = new Callback(callback_func);
grpc_call_error error = grpc_call_start_batch(
call->wrapped_call, &ops[0], nops, new struct tag(
- callback, op_vector.release(), resources), NULL);
+ callback, op_vector.release(), resources, call), NULL);
if (error != GRPC_CALL_OK) {
return Nan::ThrowError(nanErrorWithCode("startBatch failed", error));
}
- CompletionQueueAsyncWorker::Next();
+ call->pending_batches++;
+ CompletionQueueNext();
}
NAN_METHOD(Call::Cancel) {
diff --git a/src/node/ext/call.h b/src/node/ext/call.h
index 1e3c3ba18d..31c6566d14 100644
--- a/src/node/ext/call.h
+++ b/src/node/ext/call.h
@@ -66,34 +66,6 @@ bool CreateMetadataArray(v8::Local<v8::Object> metadata,
grpc_metadata_array *array,
shared_ptr<Resources> resources);
-class Op {
- public:
- virtual v8::Local<v8::Value> GetNodeValue() const = 0;
- virtual bool ParseOp(v8::Local<v8::Value> value, grpc_op *out,
- shared_ptr<Resources> resources) = 0;
- virtual ~Op();
- v8::Local<v8::Value> GetOpType() const;
-
- protected:
- virtual std::string GetTypeString() const = 0;
-};
-
-typedef std::vector<unique_ptr<Op>> OpVec;
-struct tag {
- tag(Nan::Callback *callback, OpVec *ops,
- shared_ptr<Resources> resources);
- ~tag();
- Nan::Callback *callback;
- OpVec *ops;
- shared_ptr<Resources> resources;
-};
-
-v8::Local<v8::Value> GetTagNodeValue(void *tag);
-
-Nan::Callback *GetTagCallback(void *tag);
-
-void DestroyTag(void *tag);
-
/* Wrapper class for grpc_call structs. */
class Call : public Nan::ObjectWrap {
public:
@@ -102,6 +74,8 @@ class Call : public Nan::ObjectWrap {
/* Wrap a grpc_call struct in a javascript object */
static v8::Local<v8::Value> WrapStruct(grpc_call *call);
+ void CompleteBatch(bool is_final_op);
+
private:
explicit Call(grpc_call *call);
~Call();
@@ -121,8 +95,46 @@ class Call : public Nan::ObjectWrap {
static Nan::Persistent<v8::FunctionTemplate> fun_tpl;
grpc_call *wrapped_call;
+ // The number of ops that were started but not completed on this call
+ int pending_batches;
+ /* Indicates whether the "final" op on a call has completed. For a client
+ call, this is GRPC_OP_RECV_STATUS_ON_CLIENT and for a server call, this
+ is GRPC_OP_SEND_STATUS_FROM_SERVER */
+ bool has_final_op_completed;
};
+class Op {
+ public:
+ virtual v8::Local<v8::Value> GetNodeValue() const = 0;
+ virtual bool ParseOp(v8::Local<v8::Value> value, grpc_op *out,
+ shared_ptr<Resources> resources) = 0;
+ virtual ~Op();
+ v8::Local<v8::Value> GetOpType() const;
+ virtual bool IsFinalOp() = 0;
+
+ protected:
+ virtual std::string GetTypeString() const = 0;
+};
+
+typedef std::vector<unique_ptr<Op>> OpVec;
+struct tag {
+ tag(Nan::Callback *callback, OpVec *ops,
+ shared_ptr<Resources> resources, Call *call);
+ ~tag();
+ Nan::Callback *callback;
+ OpVec *ops;
+ shared_ptr<Resources> resources;
+ Call *call;
+};
+
+v8::Local<v8::Value> GetTagNodeValue(void *tag);
+
+Nan::Callback *GetTagCallback(void *tag);
+
+void DestroyTag(void *tag);
+
+void CompleteTag(void *tag);
+
} // namespace node
} // namespace grpc
diff --git a/src/node/ext/channel.cc b/src/node/ext/channel.cc
index 00fcca6dc8..5bc58b9b32 100644
--- a/src/node/ext/channel.cc
+++ b/src/node/ext/channel.cc
@@ -41,6 +41,7 @@
#include "grpc/grpc_security.h"
#include "call.h"
#include "channel.h"
+#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "channel_credentials.h"
#include "timeval.h"
@@ -140,6 +141,7 @@ void DeallocateChannelArgs(grpc_channel_args *channel_args) {
Channel::Channel(grpc_channel *channel) : wrapped_channel(channel) {}
Channel::~Channel() {
+ gpr_log(GPR_DEBUG, "Destroying channel");
if (wrapped_channel != NULL) {
grpc_channel_destroy(wrapped_channel);
}
@@ -206,8 +208,8 @@ NAN_METHOD(Channel::New) {
} else {
const int argc = 3;
Local<Value> argv[argc] = {info[0], info[1], info[2]};
- MaybeLocal<Object> maybe_instance = constructor->GetFunction()->NewInstance(
- argc, argv);
+ MaybeLocal<Object> maybe_instance = Nan::NewInstance(
+ constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
// There's probably a pending exception
return;
@@ -276,11 +278,11 @@ NAN_METHOD(Channel::WatchConnectivityState) {
unique_ptr<OpVec> ops(new OpVec());
grpc_channel_watch_connectivity_state(
channel->wrapped_channel, last_state, MillisecondsToTimespec(deadline),
- CompletionQueueAsyncWorker::GetQueue(),
+ GetCompletionQueue(),
new struct tag(callback,
ops.release(),
- shared_ptr<Resources>(nullptr)));
- CompletionQueueAsyncWorker::Next();
+ shared_ptr<Resources>(nullptr), NULL));
+ CompletionQueueNext();
}
} // namespace node
diff --git a/src/node/ext/completion_queue.cc b/src/node/ext/completion_queue.cc
new file mode 100644
index 0000000000..fcfa77b39c
--- /dev/null
+++ b/src/node/ext/completion_queue.cc
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <uv.h>
+#include <node.h>
+#include <v8.h>
+#include <grpc/grpc.h>
+
+#include "call.h"
+#include "completion_queue.h"
+#include "completion_queue_async_worker.h"
+
+namespace grpc {
+namespace node {
+
+using v8::Local;
+using v8::Object;
+using v8::Value;
+
+grpc_completion_queue *queue;
+uv_prepare_t prepare;
+int pending_batches;
+
+void drain_completion_queue(uv_prepare_t *handle) {
+ Nan::HandleScope scope;
+ grpc_event event;
+ (void)handle;
+ do {
+ event = grpc_completion_queue_next(
+ queue, gpr_inf_past(GPR_CLOCK_MONOTONIC), NULL);
+
+ if (event.type == GRPC_OP_COMPLETE) {
+ Nan::Callback *callback = grpc::node::GetTagCallback(event.tag);
+ if (event.success) {
+ Local<Value> argv[] = {Nan::Null(),
+ grpc::node::GetTagNodeValue(event.tag)};
+ callback->Call(2, argv);
+ } else {
+ Local<Value> argv[] = {Nan::Error(
+ "The async function encountered an error")};
+ callback->Call(1, argv);
+ }
+ grpc::node::CompleteTag(event.tag);
+ grpc::node::DestroyTag(event.tag);
+ pending_batches--;
+ if (pending_batches == 0) {
+ uv_prepare_stop(&prepare);
+ }
+ }
+ } while (event.type != GRPC_QUEUE_TIMEOUT);
+}
+
+grpc_completion_queue *GetCompletionQueue() {
+#ifdef GRPC_UV
+ return queue;
+#else
+ return CompletionQueueAsyncWorker::GetQueue();
+#endif
+}
+
+void CompletionQueueNext() {
+#ifdef GRPC_UV
+ if (pending_batches == 0) {
+ GPR_ASSERT(!uv_is_active((uv_handle_t *)&prepare));
+ uv_prepare_start(&prepare, drain_completion_queue);
+ }
+ pending_batches++;
+#else
+ CompletionQueueAsyncWorker::Next();
+#endif
+}
+
+void CompletionQueueInit(Local<Object> exports) {
+#ifdef GRPC_UV
+ queue = grpc_completion_queue_create(NULL);
+ uv_prepare_init(uv_default_loop(), &prepare);
+ pending_batches = 0;
+#else
+ CompletionQueueAsyncWorker::Init(exports);
+#endif
+}
+
+} // namespace node
+} // namespace grpc
diff --git a/src/node/ext/completion_queue.h b/src/node/ext/completion_queue.h
new file mode 100644
index 0000000000..bf280f768b
--- /dev/null
+++ b/src/node/ext/completion_queue.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <v8.h>
+
+namespace grpc {
+namespace node {
+
+grpc_completion_queue *GetCompletionQueue();
+
+void CompletionQueueNext();
+
+void CompletionQueueInit(v8::Local<v8::Object> exports);
+
+} // namespace node
+} // namespace grpc
diff --git a/src/node/ext/completion_queue_async_worker.cc b/src/node/ext/completion_queue_async_worker.cc
index 619ea41515..f5e03b277b 100644
--- a/src/node/ext/completion_queue_async_worker.cc
+++ b/src/node/ext/completion_queue_async_worker.cc
@@ -74,6 +74,7 @@ void CompletionQueueAsyncWorker::Execute() {
grpc_completion_queue *CompletionQueueAsyncWorker::GetQueue() { return queue; }
void CompletionQueueAsyncWorker::Next() {
+#ifndef GRPC_UV
Nan::HandleScope scope;
if (current_threads < max_queue_threads) {
current_threads += 1;
@@ -85,6 +86,7 @@ void CompletionQueueAsyncWorker::Next() {
GPR_ASSERT(current_threads <= max_queue_threads);
GPR_ASSERT((current_threads == max_queue_threads) ||
(waiting_next_calls == 0));
+#endif
}
void CompletionQueueAsyncWorker::Init(Local<Object> exports) {
diff --git a/src/node/ext/node_grpc.cc b/src/node/ext/node_grpc.cc
index 745b5023d5..9b9eee85b7 100644
--- a/src/node/ext/node_grpc.cc
+++ b/src/node/ext/node_grpc.cc
@@ -42,6 +42,13 @@
#include "grpc/support/log.h"
#include "grpc/support/time.h"
+// TODO(murgatroid99): Remove this when the endpoint API becomes public
+#ifdef GRPC_UV
+extern "C" {
+#include "src/core/lib/iomgr/pollset_uv.h"
+}
+#endif
+
#include "call.h"
#include "call_credentials.h"
#include "channel.h"
@@ -50,6 +57,7 @@
#include "completion_queue_async_worker.h"
#include "server_credentials.h"
#include "timeval.h"
+#include "completion_queue.h"
using v8::FunctionTemplate;
using v8::Local;
@@ -261,10 +269,10 @@ void InitLogConstants(Local<Object> exports) {
Nan::HandleScope scope;
Local<Object> log_verbosity = Nan::New<Object>();
Nan::Set(exports, Nan::New("logVerbosity").ToLocalChecked(), log_verbosity);
- Local<Value> DEBUG(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_DEBUG));
- Nan::Set(log_verbosity, Nan::New("DEBUG").ToLocalChecked(), DEBUG);
- Local<Value> INFO(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_INFO));
- Nan::Set(log_verbosity, Nan::New("INFO").ToLocalChecked(), INFO);
+ Local<Value> LOG_DEBUG(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_DEBUG));
+ Nan::Set(log_verbosity, Nan::New("DEBUG").ToLocalChecked(), LOG_DEBUG);
+ Local<Value> LOG_INFO(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_INFO));
+ Nan::Set(log_verbosity, Nan::New("INFO").ToLocalChecked(), LOG_INFO);
Local<Value> LOG_ERROR(Nan::New<Uint32, uint32_t>(GPR_LOG_SEVERITY_ERROR));
Nan::Set(log_verbosity, Nan::New("ERROR").ToLocalChecked(), LOG_ERROR);
}
@@ -428,14 +436,19 @@ void init(Local<Object> exports) {
InitWriteFlags(exports);
InitLogConstants(exports);
+#ifdef GRPC_UV
+ grpc_pollset_work_run_loop = 0;
+#endif
+
grpc::node::Call::Init(exports);
grpc::node::CallCredentials::Init(exports);
grpc::node::Channel::Init(exports);
grpc::node::ChannelCredentials::Init(exports);
grpc::node::Server::Init(exports);
- grpc::node::CompletionQueueAsyncWorker::Init(exports);
grpc::node::ServerCredentials::Init(exports);
+ grpc::node::CompletionQueueInit(exports);
+
// Attach a few utility functions directly to the module
Nan::Set(exports, Nan::New("metadataKeyIsLegal").ToLocalChecked(),
Nan::GetFunction(
diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc
index dd1b777ac8..70d5b96f39 100644
--- a/src/node/ext/server.cc
+++ b/src/node/ext/server.cc
@@ -40,6 +40,7 @@
#include <vector>
#include "call.h"
+#include "completion_queue.h"
#include "completion_queue_async_worker.h"
#include "grpc/grpc.h"
#include "grpc/grpc_security.h"
@@ -64,6 +65,7 @@ using v8::Array;
using v8::Boolean;
using v8::Date;
using v8::Exception;
+using v8::External;
using v8::Function;
using v8::FunctionTemplate;
using v8::Local;
@@ -75,6 +77,8 @@ using v8::Value;
Nan::Callback *Server::constructor;
Persistent<FunctionTemplate> Server::fun_tpl;
+static Callback *shutdown_callback;
+
class NewCallOp : public Op {
public:
NewCallOp() {
@@ -111,6 +115,9 @@ class NewCallOp : public Op {
shared_ptr<Resources> resources) {
return true;
}
+ bool IsFinalOp() {
+ return false;
+ }
grpc_call *call;
grpc_call_details details;
@@ -120,17 +127,50 @@ class NewCallOp : public Op {
std::string GetTypeString() const { return "new_call"; }
};
+class ServerShutdownOp : public Op {
+ public:
+ ServerShutdownOp(grpc_server *server): server(server) {
+ }
+
+ ~ServerShutdownOp() {
+ }
+
+ Local<Value> GetNodeValue() const {
+ return Nan::New<External>(reinterpret_cast<void *>(server));
+ }
+
+ bool ParseOp(Local<Value> value, grpc_op *out,
+ shared_ptr<Resources> resources) {
+ return true;
+ }
+ bool IsFinalOp() {
+ return false;
+ }
+
+ grpc_server *server;
+
+ protected:
+ std::string GetTypeString() const { return "shutdown"; }
+};
+
+NAN_METHOD(ServerShutdownCallback) {
+ if (!info[0]->IsNull()) {
+ return Nan::ThrowError("forceShutdown failed somehow");
+ }
+ MaybeLocal<Object> maybe_result = Nan::To<Object>(info[1]);
+ Local<Object> result = maybe_result.ToLocalChecked();
+ Local<Value> server_val = Nan::Get(
+ result, Nan::New("shutdown").ToLocalChecked()).ToLocalChecked();
+ Local<External> server_extern = server_val.As<External>();
+ grpc_server *server = reinterpret_cast<grpc_server *>(server_extern->Value());
+ grpc_server_destroy(server);
+}
+
Server::Server(grpc_server *server) : wrapped_server(server) {
- shutdown_queue = grpc_completion_queue_create(NULL);
- grpc_server_register_non_listening_completion_queue(server, shutdown_queue,
- NULL);
}
Server::~Server() {
this->ShutdownServer();
- grpc_completion_queue_shutdown(this->shutdown_queue);
- grpc_server_destroy(this->wrapped_server);
- grpc_completion_queue_destroy(this->shutdown_queue);
}
void Server::Init(Local<Object> exports) {
@@ -147,6 +187,11 @@ void Server::Init(Local<Object> exports) {
Local<Function> ctr = Nan::GetFunction(tpl).ToLocalChecked();
Nan::Set(exports, Nan::New("Server").ToLocalChecked(), ctr);
constructor = new Callback(ctr);
+
+ Local<FunctionTemplate>callback_tpl =
+ Nan::New<FunctionTemplate>(ServerShutdownCallback);
+ shutdown_callback = new Callback(
+ Nan::GetFunction(callback_tpl).ToLocalChecked());
}
bool Server::HasInstance(Local<Value> val) {
@@ -155,11 +200,19 @@ bool Server::HasInstance(Local<Value> val) {
}
void Server::ShutdownServer() {
- grpc_server_shutdown_and_notify(this->wrapped_server, this->shutdown_queue,
- NULL);
- grpc_server_cancel_all_calls(this->wrapped_server);
- grpc_completion_queue_pluck(this->shutdown_queue, NULL,
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ if (this->wrapped_server != NULL) {
+ ServerShutdownOp *op = new ServerShutdownOp(this->wrapped_server);
+ unique_ptr<OpVec> ops(new OpVec());
+ ops->push_back(unique_ptr<Op>(op));
+
+ grpc_server_shutdown_and_notify(
+ this->wrapped_server, GetCompletionQueue(),
+ new struct tag(new Callback(**shutdown_callback), ops.release(),
+ shared_ptr<Resources>(nullptr), NULL));
+ grpc_server_cancel_all_calls(this->wrapped_server);
+ CompletionQueueNext();
+ this->wrapped_server = NULL;
+ }
}
NAN_METHOD(Server::New) {
@@ -169,7 +222,7 @@ NAN_METHOD(Server::New) {
const int argc = 1;
Local<Value> argv[argc] = {info[0]};
MaybeLocal<Object> maybe_instance =
- constructor->GetFunction()->NewInstance(argc, argv);
+ Nan::NewInstance(constructor->GetFunction(), argc, argv);
if (maybe_instance.IsEmpty()) {
// There's probably a pending exception
return;
@@ -179,7 +232,7 @@ NAN_METHOD(Server::New) {
}
}
grpc_server *wrapped_server;
- grpc_completion_queue *queue = CompletionQueueAsyncWorker::GetQueue();
+ grpc_completion_queue *queue = GetCompletionQueue();
grpc_channel_args *channel_args;
if (!ParseChannelArgs(info[0], &channel_args)) {
DeallocateChannelArgs(channel_args);
@@ -205,14 +258,14 @@ NAN_METHOD(Server::RequestCall) {
ops->push_back(unique_ptr<Op>(op));
grpc_call_error error = grpc_server_request_call(
server->wrapped_server, &op->call, &op->details, &op->request_metadata,
- CompletionQueueAsyncWorker::GetQueue(),
- CompletionQueueAsyncWorker::GetQueue(),
+ GetCompletionQueue(),
+ GetCompletionQueue(),
new struct tag(new Callback(info[0].As<Function>()), ops.release(),
- shared_ptr<Resources>(nullptr)));
+ shared_ptr<Resources>(nullptr), NULL));
if (error != GRPC_CALL_OK) {
return Nan::ThrowError(nanErrorWithCode("requestCall failed", error));
}
- CompletionQueueAsyncWorker::Next();
+ CompletionQueueNext();
}
NAN_METHOD(Server::AddHttp2Port) {
@@ -259,10 +312,10 @@ NAN_METHOD(Server::TryShutdown) {
Server *server = ObjectWrap::Unwrap<Server>(info.This());
unique_ptr<OpVec> ops(new OpVec());
grpc_server_shutdown_and_notify(
- server->wrapped_server, CompletionQueueAsyncWorker::GetQueue(),
+ server->wrapped_server, GetCompletionQueue(),
new struct tag(new Nan::Callback(info[0].As<Function>()), ops.release(),
- shared_ptr<Resources>(nullptr)));
- CompletionQueueAsyncWorker::Next();
+ shared_ptr<Resources>(nullptr), NULL));
+ CompletionQueueNext();
}
NAN_METHOD(Server::ForceShutdown) {
diff --git a/src/node/ext/server.h b/src/node/ext/server.h
index ab5fc210e8..9e6a7bd1e0 100644
--- a/src/node/ext/server.h
+++ b/src/node/ext/server.h
@@ -73,7 +73,6 @@ class Server : public Nan::ObjectWrap {
static Nan::Persistent<v8::FunctionTemplate> fun_tpl;
grpc_server *wrapped_server;
- grpc_completion_queue *shutdown_queue;
};
} // namespace node
diff --git a/src/node/index.js b/src/node/index.js
index 9fb6faa5d7..a294aad8ee 100644
--- a/src/node/index.js
+++ b/src/node/index.js
@@ -219,3 +219,7 @@ exports.getClientChannel = client.getClientChannel;
* @see module:src/client.waitForClientReady
*/
exports.waitForClientReady = client.waitForClientReady;
+
+exports.closeClient = function closeClient(client_obj) {
+ client.getClientChannel(client_obj).close();
+};
diff --git a/src/node/interop/interop_client.js b/src/node/interop/interop_client.js
index e8f2d37bd8..46ddecfb1f 100644
--- a/src/node/interop/interop_client.js
+++ b/src/node/interop/interop_client.js
@@ -375,11 +375,20 @@ function statusCodeAndMessage(client, done) {
duplex.end();
}
+// NOTE: the client param to this function is from UnimplementedService
+function unimplementedService(client, done) {
+ client.unimplementedCall({}, function(err, resp) {
+ assert(err);
+ assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED);
+ done();
+ });
+}
+
+// NOTE: the client param to this function is from TestService
function unimplementedMethod(client, done) {
client.unimplementedCall({}, function(err, resp) {
assert(err);
assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED);
- assert(!err.message);
done();
});
}
@@ -527,8 +536,10 @@ var test_cases = {
Client: testProto.TestService},
status_code_and_message: {run: statusCodeAndMessage,
Client: testProto.TestService},
- unimplemented_method: {run: unimplementedMethod,
+ unimplemented_service: {run: unimplementedService,
Client: testProto.UnimplementedService},
+ unimplemented_method: {run: unimplementedMethod,
+ Client: testProto.TestService},
compute_engine_creds: {run: computeEngineCreds,
Client: testProto.TestService,
getCreds: getApplicationCreds},
diff --git a/src/node/performance/benchmark_client_express.js b/src/node/performance/benchmark_client_express.js
new file mode 100644
index 0000000000..675eb5f288
--- /dev/null
+++ b/src/node/performance/benchmark_client_express.js
@@ -0,0 +1,291 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * Benchmark client module
+ * @module
+ */
+
+'use strict';
+
+var fs = require('fs');
+var path = require('path');
+var util = require('util');
+var EventEmitter = require('events');
+var http = require('http');
+var https = require('https');
+
+var async = require('async');
+var _ = require('lodash');
+var PoissonProcess = require('poisson-process');
+var Histogram = require('./histogram');
+
+/**
+ * Convert a time difference, as returned by process.hrtime, to a number of
+ * nanoseconds.
+ * @param {Array.<number>} time_diff The time diff, represented as
+ * [seconds, nanoseconds]
+ * @return {number} The total number of nanoseconds
+ */
+function timeDiffToNanos(time_diff) {
+ return time_diff[0] * 1e9 + time_diff[1];
+}
+
+function BenchmarkClient(server_targets, channels, histogram_params,
+ security_params) {
+ var options = {
+ method: 'PUT',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ };
+ var protocol;
+ if (security_params) {
+ var ca_path;
+ protocol = https;
+ this.request = _.bind(https.request, https);
+ if (security_params.use_test_ca) {
+ ca_path = path.join(__dirname, '../test/data/ca.pem');
+ var ca_data = fs.readFileSync(ca_path);
+ options.ca = ca_data;
+ }
+ if (security_params.server_host_override) {
+ var host_override = security_params.server_host_override;
+ options.servername = host_override;
+ }
+ } else {
+ protocol = http;
+ }
+
+ this.request = _.bind(protocol.request, protocol);
+
+ this.client_options = [];
+
+ for (var i = 0; i < channels; i++) {
+ var host_port;
+ host_port = server_targets[i % server_targets.length].split(':')
+ var new_options = _.assign({hostname: host_port[0], port: +host_port[1]}, options);
+ new_options.agent = new protocol.Agent(new_options);
+ this.client_options[i] = new_options;
+ }
+
+ this.histogram = new Histogram(histogram_params.resolution,
+ histogram_params.max_possible);
+
+ this.running = false;
+
+ this.pending_calls = 0;
+}
+
+util.inherits(BenchmarkClient, EventEmitter);
+
+function startAllClients(client_options_list, outstanding_rpcs_per_channel,
+ makeCall, emitter) {
+ _.each(client_options_list, function(client_options) {
+ _.times(outstanding_rpcs_per_channel, function() {
+ makeCall(client_options);
+ });
+ });
+}
+
+BenchmarkClient.prototype.startClosedLoop = function(
+ outstanding_rpcs_per_channel, rpc_type, req_size, resp_size, generic) {
+ var self = this;
+
+ var options = {};
+
+ self.running = true;
+
+ if (rpc_type == 'UNARY') {
+ options.path = '/serviceProto.BenchmarkService.service/unaryCall';
+ } else {
+ self.emit('error', new Error('Unsupported rpc_type: ' + rpc_type));
+ }
+
+ if (generic) {
+ self.emit('error', new Error('Generic client not supported'));
+ }
+
+ self.last_wall_time = process.hrtime();
+
+ var argument = {
+ response_size: resp_size,
+ payload: {
+ body: '0'.repeat(req_size)
+ }
+ };
+
+ function makeCall(client_options) {
+ if (self.running) {
+ self.pending_calls++;
+ var start_time = process.hrtime();
+ var req = self.request(client_options, function(res) {
+ var res_data = '';
+ res.on('data', function(data) {
+ res_data += data;
+ });
+ res.on('end', function() {
+ JSON.parse(res_data);
+ var time_diff = process.hrtime(start_time);
+ self.histogram.add(timeDiffToNanos(time_diff));
+ makeCall(client_options);
+ self.pending_calls--;
+ if ((!self.running) && self.pending_calls == 0) {
+ self.emit('finished');
+ }
+ });
+ });
+ req.write(JSON.stringify(argument));
+ req.end();
+ req.on('error', function(error) {
+ self.emit('error', new Error('Client error: ' + error.message));
+ self.running = false;
+ });
+ }
+ }
+
+ startAllClients(_.map(self.client_options, _.partial(_.assign, options)),
+ outstanding_rpcs_per_channel, makeCall, self);
+};
+
+BenchmarkClient.prototype.startPoisson = function(
+ outstanding_rpcs_per_channel, rpc_type, req_size, resp_size, offered_load,
+ generic) {
+ var self = this;
+
+ var options = {};
+
+ self.running = true;
+
+ if (rpc_type == 'UNARY') {
+ options.path = '/serviceProto.BenchmarkService.service/unaryCall';
+ } else {
+ self.emit('error', new Error('Unsupported rpc_type: ' + rpc_type));
+ }
+
+ if (generic) {
+ self.emit('error', new Error('Generic client not supported'));
+ }
+
+ self.last_wall_time = process.hrtime();
+
+ var argument = {
+ response_size: resp_size,
+ payload: {
+ body: '0'.repeat(req_size)
+ }
+ };
+
+ function makeCall(client_options, poisson) {
+ if (self.running) {
+ self.pending_calls++;
+ var start_time = process.hrtime();
+ var req = self.request(client_options, function(res) {
+ var res_data = '';
+ res.on('data', function(data) {
+ res_data += data;
+ });
+ res.on('end', function() {
+ JSON.parse(res_data);
+ var time_diff = process.hrtime(start_time);
+ self.histogram.add(timeDiffToNanos(time_diff));
+ self.pending_calls--;
+ if ((!self.running) && self.pending_calls == 0) {
+ self.emit('finished');
+ }
+ });
+ });
+ req.write(JSON.stringify(argument));
+ req.end();
+ req.on('error', function(error) {
+ self.emit('error', new Error('Client error: ' + error.message));
+ self.running = false;
+ });
+ } else {
+ poisson.stop();
+ }
+ }
+
+ var averageIntervalMs = (1 / offered_load) * 1000;
+
+ startAllClients(_.map(self.client_options, _.partial(_.assign, options)),
+ outstanding_rpcs_per_channel, function(opts){
+ var p = PoissonProcess.create(averageIntervalMs, function() {
+ makeCall(opts, p);
+ });
+ p.start();
+ }, self);
+};
+
+/**
+ * Return curent statistics for the client. If reset is set, restart
+ * statistic collection.
+ * @param {boolean} reset Indicates that statistics should be reset
+ * @return {object} Client statistics
+ */
+BenchmarkClient.prototype.mark = function(reset) {
+ var wall_time_diff = process.hrtime(this.last_wall_time);
+ var histogram = this.histogram;
+ if (reset) {
+ this.last_wall_time = process.hrtime();
+ this.histogram = new Histogram(histogram.resolution,
+ histogram.max_possible);
+ }
+
+ return {
+ latencies: {
+ bucket: histogram.getContents(),
+ min_seen: histogram.minimum(),
+ max_seen: histogram.maximum(),
+ sum: histogram.getSum(),
+ sum_of_squares: histogram.sumOfSquares(),
+ count: histogram.getCount()
+ },
+ time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
+ // Not sure how to measure these values
+ time_user: 0,
+ time_system: 0
+ };
+};
+
+/**
+ * Stop the clients.
+ * @param {function} callback Called when the clients have finished shutting
+ * down
+ */
+BenchmarkClient.prototype.stop = function(callback) {
+ this.running = false;
+ this.on('finished', callback);
+};
+
+module.exports = BenchmarkClient;
diff --git a/src/node/performance/benchmark_server.js b/src/node/performance/benchmark_server.js
index 70cee9979b..6abde2e17a 100644
--- a/src/node/performance/benchmark_server.js
+++ b/src/node/performance/benchmark_server.js
@@ -40,6 +40,8 @@
var fs = require('fs');
var path = require('path');
+var EventEmitter = require('events');
+var util = require('util');
var genericService = require('./generic_service');
@@ -138,12 +140,15 @@ function BenchmarkServer(host, port, tls, generic, response_size) {
this.server = server;
}
+util.inherits(BenchmarkServer, EventEmitter);
+
/**
* Start the benchmark server.
*/
BenchmarkServer.prototype.start = function() {
this.server.start();
this.last_wall_time = process.hrtime();
+ this.emit('started');
};
/**
diff --git a/src/node/performance/benchmark_server_express.js b/src/node/performance/benchmark_server_express.js
new file mode 100644
index 0000000000..065bcf660b
--- /dev/null
+++ b/src/node/performance/benchmark_server_express.js
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * Benchmark server module
+ * @module
+ */
+
+'use strict';
+
+var fs = require('fs');
+var path = require('path');
+var http = require('http');
+var https = require('https');
+var EventEmitter = require('events');
+var util = require('util');
+
+var express = require('express');
+var bodyParser = require('body-parser')
+
+function unaryCall(req, res) {
+ var reqObj = req.body;
+ var payload = {body: '0'.repeat(reqObj.response_size)};
+ res.json(payload);
+}
+
+function BenchmarkServer(host, port, tls, generic, response_size) {
+ var app = express();
+ app.use(bodyParser.json())
+ app.put('/serviceProto.BenchmarkService.service/unaryCall', unaryCall);
+ this.input_host = host;
+ this.input_port = port;
+ if (tls) {
+ var credentials = {};
+ var key_path = path.join(__dirname, '../test/data/server1.key');
+ var pem_path = path.join(__dirname, '../test/data/server1.pem');
+
+ var key_data = fs.readFileSync(key_path);
+ var pem_data = fs.readFileSync(pem_path);
+ credentials['key'] = key_data;
+ credentials['cert'] = pem_data;
+ this.server = https.createServer(credentials, app);
+ } else {
+ this.server = http.createServer(app);
+ }
+}
+
+util.inherits(BenchmarkServer, EventEmitter);
+
+BenchmarkServer.prototype.start = function() {
+ var self = this;
+ this.server.listen(this.input_port, this.input_hostname, function() {
+ self.last_wall_time = process.hrtime();
+ self.emit('started');
+ });
+};
+
+BenchmarkServer.prototype.getPort = function() {
+ return this.server.address().port;
+};
+
+BenchmarkServer.prototype.mark = function(reset) {
+ var wall_time_diff = process.hrtime(this.last_wall_time);
+ if (reset) {
+ this.last_wall_time = process.hrtime();
+ }
+ return {
+ time_elapsed: wall_time_diff[0] + wall_time_diff[1] / 1e9,
+ // Not sure how to measure these values
+ time_user: 0,
+ time_system: 0
+ };
+};
+
+BenchmarkServer.prototype.stop = function(callback) {
+ this.server.close(callback);
+};
+
+module.exports = BenchmarkServer;
diff --git a/src/node/performance/worker.js b/src/node/performance/worker.js
index 7ef9b84fe7..030bf7d7ba 100644
--- a/src/node/performance/worker.js
+++ b/src/node/performance/worker.js
@@ -34,18 +34,18 @@
'use strict';
var console = require('console');
-var worker_service_impl = require('./worker_service_impl');
+var WorkerServiceImpl = require('./worker_service_impl');
var grpc = require('../../../');
var serviceProto = grpc.load({
root: __dirname + '/../../..',
file: 'src/proto/grpc/testing/services.proto'}).grpc.testing;
-function runServer(port) {
+function runServer(port, benchmark_impl) {
var server_creds = grpc.ServerCredentials.createInsecure();
var server = new grpc.Server();
server.addProtoService(serviceProto.WorkerService.service,
- worker_service_impl);
+ new WorkerServiceImpl(benchmark_impl, server));
var address = '0.0.0.0:' + port;
server.bind(address, server_creds);
server.start();
@@ -57,9 +57,9 @@ if (require.main === module) {
Error.stackTraceLimit = Infinity;
var parseArgs = require('minimist');
var argv = parseArgs(process.argv, {
- string: ['driver_port']
+ string: ['driver_port', 'benchmark_impl']
});
- runServer(argv.driver_port);
+ runServer(argv.driver_port, argv.benchmark_impl);
}
exports.runServer = runServer;
diff --git a/src/node/performance/worker_service_impl.js b/src/node/performance/worker_service_impl.js
index 4b5cb8f9c2..3f317f6429 100644
--- a/src/node/performance/worker_service_impl.js
+++ b/src/node/performance/worker_service_impl.js
@@ -38,121 +38,141 @@ var console = require('console');
var BenchmarkClient = require('./benchmark_client');
var BenchmarkServer = require('./benchmark_server');
-exports.quitWorker = function quitWorker(call, callback) {
- callback(null, {});
- process.exit(0);
-}
+module.exports = function WorkerServiceImpl(benchmark_impl, server) {
+ var BenchmarkClient;
+ var BenchmarkServer;
+ switch (benchmark_impl) {
+ case 'grpc':
+ BenchmarkClient = require('./benchmark_client');
+ BenchmarkServer = require('./benchmark_server');
+ break;
+ case 'express':
+ BenchmarkClient = require('./benchmark_client_express');
+ BenchmarkServer = require('./benchmark_server_express');
+ break;
+ default:
+ throw new Error('Unrecognized benchmark impl: ' + benchmark_impl);
+ }
-exports.runClient = function runClient(call) {
- var client;
- call.on('data', function(request) {
- var stats;
- switch (request.argtype) {
- case 'setup':
- var setup = request.setup;
- console.log('ClientConfig %j', setup);
- client = new BenchmarkClient(setup.server_targets,
- setup.client_channels,
- setup.histogram_params,
- setup.security_params);
- client.on('error', function(error) {
- call.emit('error', error);
- });
- var req_size, resp_size, generic;
- switch (setup.payload_config.payload) {
- case 'bytebuf_params':
- req_size = setup.payload_config.bytebuf_params.req_size;
- resp_size = setup.payload_config.bytebuf_params.resp_size;
- generic = true;
+ this.quitWorker = function quitWorker(call, callback) {
+ server.tryShutdown(function() {
+ callback(null, {});
+ });
+ };
+
+ this.runClient = function runClient(call) {
+ var client;
+ call.on('data', function(request) {
+ var stats;
+ switch (request.argtype) {
+ case 'setup':
+ var setup = request.setup;
+ console.log('ClientConfig %j', setup);
+ client = new BenchmarkClient(setup.server_targets,
+ setup.client_channels,
+ setup.histogram_params,
+ setup.security_params);
+ client.on('error', function(error) {
+ call.emit('error', error);
+ });
+ var req_size, resp_size, generic;
+ switch (setup.payload_config.payload) {
+ case 'bytebuf_params':
+ req_size = setup.payload_config.bytebuf_params.req_size;
+ resp_size = setup.payload_config.bytebuf_params.resp_size;
+ generic = true;
+ break;
+ case 'simple_params':
+ req_size = setup.payload_config.simple_params.req_size;
+ resp_size = setup.payload_config.simple_params.resp_size;
+ generic = false;
+ break;
+ default:
+ call.emit('error', new Error('Unsupported PayloadConfig type' +
+ setup.payload_config.payload));
+ }
+ switch (setup.load_params.load) {
+ case 'closed_loop':
+ client.startClosedLoop(setup.outstanding_rpcs_per_channel,
+ setup.rpc_type, req_size, resp_size, generic);
+ break;
+ case 'poisson':
+ client.startPoisson(setup.outstanding_rpcs_per_channel,
+ setup.rpc_type, req_size, resp_size,
+ setup.load_params.poisson.offered_load, generic);
+ break;
+ default:
+ call.emit('error', new Error('Unsupported LoadParams type' +
+ setup.load_params.load));
+ }
+ stats = client.mark();
+ call.write({
+ stats: stats
+ });
break;
- case 'simple_params':
- req_size = setup.payload_config.simple_params.req_size;
- resp_size = setup.payload_config.simple_params.resp_size;
- generic = false;
+ case 'mark':
+ if (client) {
+ stats = client.mark(request.mark.reset);
+ call.write({
+ stats: stats
+ });
+ } else {
+ call.emit('error', new Error('Got Mark before ClientConfig'));
+ }
break;
default:
- call.emit('error', new Error('Unsupported PayloadConfig type' +
- setup.payload_config.payload));
+ throw new Error('Nonexistent client argtype option: ' + request.argtype);
}
- switch (setup.load_params.load) {
- case 'closed_loop':
- client.startClosedLoop(setup.outstanding_rpcs_per_channel,
- setup.rpc_type, req_size, resp_size, generic);
+ });
+ call.on('end', function() {
+ client.stop(function() {
+ call.end();
+ });
+ });
+ };
+
+ this.runServer = function runServer(call) {
+ var server;
+ call.on('data', function(request) {
+ var stats;
+ switch (request.argtype) {
+ case 'setup':
+ console.log('ServerConfig %j', request.setup);
+ server = new BenchmarkServer('[::]', request.setup.port,
+ request.setup.security_params);
+ server.on('started', function() {
+ stats = server.mark();
+ call.write({
+ stats: stats,
+ port: server.getPort()
+ });
+ });
+ server.start();
break;
- case 'poisson':
- client.startPoisson(setup.outstanding_rpcs_per_channel,
- setup.rpc_type, req_size, resp_size,
- setup.load_params.poisson.offered_load, generic);
+ case 'mark':
+ if (server) {
+ stats = server.mark(request.mark.reset);
+ call.write({
+ stats: stats,
+ port: server.getPort(),
+ cores: 1
+ });
+ } else {
+ call.emit('error', new Error('Got Mark before ServerConfig'));
+ }
break;
default:
- call.emit('error', new Error('Unsupported LoadParams type' +
- setup.load_params.load));
+ throw new Error('Nonexistent server argtype option');
}
- stats = client.mark();
- call.write({
- stats: stats
- });
- break;
- case 'mark':
- if (client) {
- stats = client.mark(request.mark.reset);
- call.write({
- stats: stats
- });
- } else {
- call.emit('error', new Error('Got Mark before ClientConfig'));
- }
- break;
- default:
- throw new Error('Nonexistent client argtype option: ' + request.argtype);
- }
- });
- call.on('end', function() {
- client.stop(function() {
- call.end();
});
- });
-};
-
-exports.runServer = function runServer(call) {
- var server;
- call.on('data', function(request) {
- var stats;
- switch (request.argtype) {
- case 'setup':
- console.log('ServerConfig %j', request.setup);
- server = new BenchmarkServer('[::]', request.setup.port,
- request.setup.security_params);
- server.start();
- stats = server.mark();
- call.write({
- stats: stats,
- port: server.getPort()
+ call.on('end', function() {
+ server.stop(function() {
+ call.end();
});
- break;
- case 'mark':
- if (server) {
- stats = server.mark(request.mark.reset);
- call.write({
- stats: stats,
- port: server.getPort(),
- cores: 1
- });
- } else {
- call.emit('error', new Error('Got Mark before ServerConfig'));
- }
- break;
- default:
- throw new Error('Nonexistent server argtype option');
- }
- });
- call.on('end', function() {
- server.stop(function() {
- call.end();
});
- });
-};
+ };
-exports.coreCount = function coreCount(call, callback) {
- callback(null, {cores: os.cpus().length});
+ this.coreCount = function coreCount(call, callback) {
+ callback(null, {cores: os.cpus().length});
+ };
};
diff --git a/src/node/src/client.js b/src/node/src/client.js
index f75f951eb8..9c1562e8b8 100644
--- a/src/node/src/client.js
+++ b/src/node/src/client.js
@@ -382,6 +382,7 @@ function makeUnaryRequestFunction(method, serialize, deserialize) {
if (args.options) {
message.grpcWriteFlags = args.options.flags;
}
+
client_batch[grpc.opType.SEND_INITIAL_METADATA] =
metadata._getCoreRepresentation();
client_batch[grpc.opType.SEND_MESSAGE] = message;
diff --git a/src/node/src/common.js b/src/node/src/common.js
index 22159dd39f..c6c6d597a8 100644
--- a/src/node/src/common.js
+++ b/src/node/src/common.js
@@ -141,7 +141,7 @@ exports.getProtobufServiceAttrs = function getProtobufServiceAttrs(service,
binaryAsBase64 = options.binaryAsBase64;
longsAsStrings = options.longsAsStrings;
}
- return _.object(_.map(service.children, function(method) {
+ return _.fromPairs(_.map(service.children, function(method) {
return [_.camelCase(method.name), {
path: prefix + method.name,
requestStream: method.requestStream,
diff --git a/src/node/src/grpc_extension.js b/src/node/src/grpc_extension.js
index 6a8fe2c03c..63a281ddbc 100644
--- a/src/node/src/grpc_extension.js
+++ b/src/node/src/grpc_extension.js
@@ -31,7 +31,7 @@
*
*/
-var binary = require('node-pre-gyp');
+var binary = require('node-pre-gyp/lib/pre-binding');
var path = require('path');
var binding_path =
binary.find(path.resolve(path.join(__dirname, '../../../package.json')));
diff --git a/src/node/test/async_test.js b/src/node/test/async_test.js
index c46e745116..7b467e5475 100644
--- a/src/node/test/async_test.js
+++ b/src/node/test/async_test.js
@@ -61,6 +61,7 @@ describe('Async functionality', function() {
done();
});
after(function() {
+ grpc.closeClient(math_client);
server.forceShutdown();
});
it('should not hang', function(done) {
diff --git a/src/node/test/interop_sanity_test.js b/src/node/test/interop_sanity_test.js
index f008a87585..58f8842c0d 100644
--- a/src/node/test/interop_sanity_test.js
+++ b/src/node/test/interop_sanity_test.js
@@ -98,6 +98,10 @@ describe('Interop tests', function() {
interop_client.runTest(port, name_override, 'status_code_and_message',
true, true, done);
});
+ it('should pass unimplemented_service', function(done) {
+ interop_client.runTest(port, name_override, 'unimplemented_service',
+ true, true, done);
+ });
it('should pass unimplemented_method', function(done) {
interop_client.runTest(port, name_override, 'unimplemented_method',
true, true, done);
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
index 0c3c3216ab..6e594fd3ed 100644
--- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -36,7 +36,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler-gRPCPlugin'
- v = '1.0.0'
+ v = '1.0.1'
s.version = v
s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.'
s.description = <<-DESC
@@ -95,7 +95,7 @@ Pod::Spec.new do |s|
s.preserve_paths = plugin
# Restrict the protoc version to the one supported by this plugin.
- s.dependency '!ProtoCompiler', '3.0.0'
+ s.dependency '!ProtoCompiler', '3.0.2'
# For the Protobuf dependency not to complain:
s.ios.deployment_target = '7.1'
s.osx.deployment_target = '10.9'
diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec
index 5018dedc06..b55f6c93c6 100644
--- a/src/objective-c/!ProtoCompiler.podspec
+++ b/src/objective-c/!ProtoCompiler.podspec
@@ -36,7 +36,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler'
- v = '3.0.0'
+ v = '3.0.2'
s.version = v
s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files'
s.description = <<-DESC
diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec
index e14f39b898..47b5b1a2e7 100644
--- a/src/objective-c/BoringSSL.podspec
+++ b/src/objective-c/BoringSSL.podspec
@@ -31,7 +31,7 @@
Pod::Spec.new do |s|
s.name = 'BoringSSL'
- version = '6.0'
+ version = '7.0'
s.version = version
s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.'
# Adapted from the homepage:
@@ -70,7 +70,7 @@ Pod::Spec.new do |s|
s.source = {
:git => 'https://boringssl.googlesource.com/boringssl',
:tag => "version_for_cocoapods_#{version}",
- # :commit => '4ac2dc4c0d48ca45da4f66c40e60d6b425fa94a3',
+ # :commit => '4fec04b48406111cb88fdd8d196253adc54f7a31',
}
name = 'openssl'
@@ -388,42 +388,42 @@ Pod::Spec.new do |s|
0x28340c19,
0x283480ac,
0x283500ea,
- 0x2c322843,
- 0x2c32a851,
- 0x2c332863,
- 0x2c33a875,
- 0x2c342889,
- 0x2c34a89b,
- 0x2c3528b6,
- 0x2c35a8c8,
- 0x2c3628db,
+ 0x2c322910,
+ 0x2c32a91e,
+ 0x2c332930,
+ 0x2c33a942,
+ 0x2c342956,
+ 0x2c34a968,
+ 0x2c352983,
+ 0x2c35a995,
+ 0x2c3629a8,
0x2c36832d,
- 0x2c3728e8,
- 0x2c37a8fa,
- 0x2c38290d,
- 0x2c38a924,
- 0x2c392932,
- 0x2c39a942,
- 0x2c3a2954,
- 0x2c3aa968,
- 0x2c3b2979,
- 0x2c3ba998,
- 0x2c3c29ac,
- 0x2c3ca9c2,
- 0x2c3d29db,
- 0x2c3da9f8,
- 0x2c3e2a09,
- 0x2c3eaa17,
- 0x2c3f2a2f,
- 0x2c3faa47,
- 0x2c402a54,
+ 0x2c3729b5,
+ 0x2c37a9c7,
+ 0x2c3829da,
+ 0x2c38a9f1,
+ 0x2c3929ff,
+ 0x2c39aa0f,
+ 0x2c3a2a21,
+ 0x2c3aaa35,
+ 0x2c3b2a46,
+ 0x2c3baa65,
+ 0x2c3c2a79,
+ 0x2c3caa8f,
+ 0x2c3d2aa8,
+ 0x2c3daac5,
+ 0x2c3e2ad6,
+ 0x2c3eaae4,
+ 0x2c3f2afc,
+ 0x2c3fab14,
+ 0x2c402b21,
0x2c4090e7,
- 0x2c412a65,
- 0x2c41aa78,
+ 0x2c412b32,
+ 0x2c41ab45,
0x2c4210c0,
- 0x2c42aa89,
+ 0x2c42ab56,
0x2c430720,
- 0x2c43a98a,
+ 0x2c43aa57,
0x30320000,
0x30328015,
0x3033001f,
@@ -576,174 +576,183 @@ Pod::Spec.new do |s|
0x403b9861,
0x403c0064,
0x403c8083,
- 0x403d1890,
- 0x403d98a6,
- 0x403e18b5,
- 0x403e98c8,
- 0x403f18e2,
- 0x403f98f0,
- 0x40401905,
- 0x40409919,
- 0x40411936,
- 0x40419951,
- 0x4042196a,
- 0x4042997d,
- 0x40431991,
- 0x404399a9,
- 0x404419c0,
+ 0x403d18aa,
+ 0x403d98c0,
+ 0x403e18cf,
+ 0x403e98e2,
+ 0x403f18fc,
+ 0x403f990a,
+ 0x4040191f,
+ 0x40409933,
+ 0x40411950,
+ 0x4041996b,
+ 0x40421984,
+ 0x40429997,
+ 0x404319ab,
+ 0x404399c3,
+ 0x404419da,
0x404480ac,
- 0x404519d5,
- 0x404599e7,
- 0x40461a0b,
- 0x40469a2b,
- 0x40471a39,
- 0x40479a60,
- 0x40481a89,
- 0x40489aa2,
- 0x40491ab9,
- 0x40499ad3,
- 0x404a1aea,
- 0x404a9b08,
- 0x404b1b20,
- 0x404b9b37,
- 0x404c1b4d,
- 0x404c9b5f,
- 0x404d1b80,
- 0x404d9ba2,
- 0x404e1bb6,
- 0x404e9bc3,
- 0x404f1bf0,
- 0x404f9c19,
- 0x40501c43,
- 0x40509c57,
- 0x40511c72,
- 0x40519c82,
- 0x40521c99,
- 0x40529cbd,
- 0x40531cd5,
- 0x40539ce8,
- 0x40541cfd,
- 0x40549d20,
- 0x40551d2e,
- 0x40559d4b,
- 0x40561d58,
- 0x40569d71,
- 0x40571d89,
- 0x40579d9c,
- 0x40581db1,
- 0x40589dc3,
- 0x40591df2,
- 0x40599e0b,
- 0x405a1e1f,
- 0x405a9e2f,
- 0x405b1e47,
- 0x405b9e58,
- 0x405c1e6b,
- 0x405c9e7c,
- 0x405d1e89,
- 0x405d9ea0,
- 0x405e1ec0,
+ 0x404519ef,
+ 0x40459a01,
+ 0x40461a25,
+ 0x40469a45,
+ 0x40471a53,
+ 0x40479a7a,
+ 0x40481ab7,
+ 0x40489ad0,
+ 0x40491ae7,
+ 0x40499b01,
+ 0x404a1b18,
+ 0x404a9b36,
+ 0x404b1b4e,
+ 0x404b9b65,
+ 0x404c1b7b,
+ 0x404c9b8d,
+ 0x404d1bae,
+ 0x404d9bd0,
+ 0x404e1be4,
+ 0x404e9bf1,
+ 0x404f1c1e,
+ 0x404f9c47,
+ 0x40501c71,
+ 0x40509c85,
+ 0x40511ca0,
+ 0x40519cb0,
+ 0x40521cc7,
+ 0x40529ceb,
+ 0x40531d03,
+ 0x40539d16,
+ 0x40541d2b,
+ 0x40549d4e,
+ 0x40551d5c,
+ 0x40559d79,
+ 0x40561d86,
+ 0x40569d9f,
+ 0x40571db7,
+ 0x40579dca,
+ 0x40581ddf,
+ 0x40589e06,
+ 0x40591e35,
+ 0x40599e62,
+ 0x405a1e76,
+ 0x405a9e86,
+ 0x405b1e9e,
+ 0x405b9eaf,
+ 0x405c1ec2,
+ 0x405c9ee3,
+ 0x405d1ef0,
+ 0x405d9f07,
+ 0x405e1f27,
0x405e8a95,
- 0x405f1ee1,
- 0x405f9eee,
- 0x40601efc,
- 0x40609f1e,
- 0x40611f46,
- 0x40619f5b,
- 0x40621f72,
- 0x40629f83,
- 0x40631f94,
- 0x40639fa9,
- 0x40641fc0,
- 0x40649fd1,
- 0x40651fec,
- 0x4065a003,
- 0x4066201b,
- 0x4066a045,
- 0x40672070,
- 0x4067a091,
- 0x406820a4,
- 0x4068a0c5,
- 0x406920f7,
- 0x4069a125,
- 0x406a2146,
- 0x406aa166,
- 0x406b22ee,
- 0x406ba311,
- 0x406c2327,
- 0x406ca553,
- 0x406d2582,
- 0x406da5aa,
- 0x406e25c3,
- 0x406ea5db,
- 0x406f25fa,
- 0x406fa60f,
- 0x40702622,
- 0x4070a63f,
+ 0x405f1f48,
+ 0x405f9f55,
+ 0x40601f63,
+ 0x40609f85,
+ 0x40611fad,
+ 0x40619fc2,
+ 0x40621fd9,
+ 0x40629fea,
+ 0x40631ffb,
+ 0x4063a010,
+ 0x40642027,
+ 0x4064a053,
+ 0x4065206e,
+ 0x4065a085,
+ 0x4066209d,
+ 0x4066a0c7,
+ 0x406720f2,
+ 0x4067a113,
+ 0x40682126,
+ 0x4068a147,
+ 0x40692179,
+ 0x4069a1a7,
+ 0x406a21c8,
+ 0x406aa1e8,
+ 0x406b2370,
+ 0x406ba393,
+ 0x406c23a9,
+ 0x406ca60b,
+ 0x406d263a,
+ 0x406da662,
+ 0x406e2690,
+ 0x406ea6a8,
+ 0x406f26c7,
+ 0x406fa6dc,
+ 0x407026ef,
+ 0x4070a70c,
0x40710800,
- 0x4071a651,
- 0x40722664,
- 0x4072a67d,
- 0x40732695,
+ 0x4071a71e,
+ 0x40722731,
+ 0x4072a74a,
+ 0x40732762,
0x4073936d,
- 0x407426a9,
- 0x4074a6c3,
- 0x407526d4,
- 0x4075a6e8,
- 0x407626f6,
+ 0x40742776,
+ 0x4074a790,
+ 0x407527a1,
+ 0x4075a7b5,
+ 0x407627c3,
0x407691aa,
- 0x4077271b,
- 0x4077a73d,
- 0x40782758,
- 0x4078a791,
- 0x407927a8,
- 0x4079a7be,
- 0x407a27ca,
- 0x407aa7dd,
- 0x407b27f2,
- 0x407ba804,
- 0x407c2819,
- 0x407ca822,
- 0x407d20e0,
- 0x407d9c29,
- 0x407e276d,
- 0x407e9dd3,
- 0x407f1a4d,
- 0x407f986d,
- 0x40801c00,
- 0x40809a75,
- 0x40811cab,
- 0x40819bda,
- 0x41f42219,
- 0x41f922ab,
- 0x41fe219e,
- 0x41fea37a,
- 0x41ff246b,
- 0x42032232,
- 0x42082254,
- 0x4208a290,
- 0x42092182,
- 0x4209a2ca,
- 0x420a21d9,
- 0x420aa1b9,
- 0x420b21f9,
- 0x420ba272,
- 0x420c2487,
- 0x420ca347,
- 0x420d2361,
- 0x420da398,
- 0x421223b2,
- 0x4217244e,
- 0x4217a3f4,
- 0x421c2416,
- 0x421f23d1,
- 0x4221249e,
- 0x42262431,
- 0x422b2537,
- 0x422ba500,
- 0x422c251f,
- 0x422ca4da,
- 0x422d24b9,
+ 0x407727e8,
+ 0x4077a80a,
+ 0x40782825,
+ 0x4078a85e,
+ 0x40792875,
+ 0x4079a88b,
+ 0x407a2897,
+ 0x407aa8aa,
+ 0x407b28bf,
+ 0x407ba8d1,
+ 0x407c28e6,
+ 0x407ca8ef,
+ 0x407d2162,
+ 0x407d9c57,
+ 0x407e283a,
+ 0x407e9e16,
+ 0x407f1a67,
+ 0x407f9887,
+ 0x40801c2e,
+ 0x40809a8f,
+ 0x40811cd9,
+ 0x40819c08,
+ 0x4082267b,
+ 0x4082986d,
+ 0x40831df1,
+ 0x4083a038,
+ 0x40841aa3,
+ 0x40849e4e,
+ 0x40851ed3,
+ 0x41f4229b,
+ 0x41f9232d,
+ 0x41fe2220,
+ 0x41fea3fc,
+ 0x41ff24ed,
+ 0x420322b4,
+ 0x420822d6,
+ 0x4208a312,
+ 0x42092204,
+ 0x4209a34c,
+ 0x420a225b,
+ 0x420aa23b,
+ 0x420b227b,
+ 0x420ba2f4,
+ 0x420c2509,
+ 0x420ca3c9,
+ 0x420d23e3,
+ 0x420da41a,
+ 0x42122434,
+ 0x421724d0,
+ 0x4217a476,
+ 0x421c2498,
+ 0x421f2453,
+ 0x42212520,
+ 0x422624b3,
+ 0x422b25ef,
+ 0x422ba59d,
+ 0x422c25d7,
+ 0x422ca55c,
+ 0x422d253b,
+ 0x422da5bc,
+ 0x422e2582,
0x4432072b,
0x4432873a,
0x44330746,
@@ -786,69 +795,69 @@ Pod::Spec.new do |s|
0x4c3d136d,
0x4c3d937c,
0x4c3e1389,
- 0x50322a9b,
- 0x5032aaaa,
- 0x50332ab5,
- 0x5033aac5,
- 0x50342ade,
- 0x5034aaf8,
- 0x50352b06,
- 0x5035ab1c,
- 0x50362b2e,
- 0x5036ab44,
- 0x50372b5d,
- 0x5037ab70,
- 0x50382b88,
- 0x5038ab99,
- 0x50392bae,
- 0x5039abc2,
- 0x503a2be2,
- 0x503aabf8,
- 0x503b2c10,
- 0x503bac22,
- 0x503c2c3e,
- 0x503cac55,
- 0x503d2c6e,
- 0x503dac84,
- 0x503e2c91,
- 0x503eaca7,
- 0x503f2cb9,
+ 0x50322b68,
+ 0x5032ab77,
+ 0x50332b82,
+ 0x5033ab92,
+ 0x50342bab,
+ 0x5034abc5,
+ 0x50352bd3,
+ 0x5035abe9,
+ 0x50362bfb,
+ 0x5036ac11,
+ 0x50372c2a,
+ 0x5037ac3d,
+ 0x50382c55,
+ 0x5038ac66,
+ 0x50392c7b,
+ 0x5039ac8f,
+ 0x503a2caf,
+ 0x503aacc5,
+ 0x503b2cdd,
+ 0x503bacef,
+ 0x503c2d0b,
+ 0x503cad22,
+ 0x503d2d3b,
+ 0x503dad51,
+ 0x503e2d5e,
+ 0x503ead74,
+ 0x503f2d86,
0x503f8382,
- 0x50402ccc,
- 0x5040acdc,
- 0x50412cf6,
- 0x5041ad05,
- 0x50422d1f,
- 0x5042ad3c,
- 0x50432d4c,
- 0x5043ad5c,
- 0x50442d6b,
+ 0x50402d99,
+ 0x5040ada9,
+ 0x50412dc3,
+ 0x5041add2,
+ 0x50422dec,
+ 0x5042ae09,
+ 0x50432e19,
+ 0x5043ae29,
+ 0x50442e38,
0x5044843f,
- 0x50452d7f,
- 0x5045ad9d,
- 0x50462db0,
- 0x5046adc6,
- 0x50472dd8,
- 0x5047aded,
- 0x50482e13,
- 0x5048ae21,
- 0x50492e34,
- 0x5049ae49,
- 0x504a2e5f,
- 0x504aae6f,
- 0x504b2e8f,
- 0x504baea2,
- 0x504c2ec5,
- 0x504caef3,
- 0x504d2f05,
- 0x504daf22,
- 0x504e2f3d,
- 0x504eaf59,
- 0x504f2f6b,
- 0x504faf82,
- 0x50502f91,
+ 0x50452e4c,
+ 0x5045ae6a,
+ 0x50462e7d,
+ 0x5046ae93,
+ 0x50472ea5,
+ 0x5047aeba,
+ 0x50482ee0,
+ 0x5048aeee,
+ 0x50492f01,
+ 0x5049af16,
+ 0x504a2f2c,
+ 0x504aaf3c,
+ 0x504b2f5c,
+ 0x504baf6f,
+ 0x504c2f92,
+ 0x504cafc0,
+ 0x504d2fd2,
+ 0x504dafef,
+ 0x504e300a,
+ 0x504eb026,
+ 0x504f3038,
+ 0x504fb04f,
+ 0x5050305e,
0x505086ef,
- 0x50512fa4,
+ 0x50513071,
0x58320ec9,
0x68320e8b,
0x68328c25,
@@ -1209,6 +1218,7 @@ Pod::Spec.new do |s|
"BAD_SSL_FILETYPE\\0"
"BAD_WRITE_RETRY\\0"
"BIO_NOT_SET\\0"
+ "BLOCK_CIPHER_PAD_IS_WRONG\\0"
"BUFFERED_MESSAGES_ON_CIPHER_CHANGE\\0"
"CA_DN_LENGTH_MISMATCH\\0"
"CA_DN_TOO_LONG\\0"
@@ -1233,6 +1243,7 @@ Pod::Spec.new do |s|
"DOWNGRADE_DETECTED\\0"
"DTLS_MESSAGE_TOO_BIG\\0"
"DUPLICATE_EXTENSION\\0"
+ "DUPLICATE_KEY_SHARE\\0"
"ECC_CERT_NOT_FOR_SIGNING\\0"
"EMS_STATE_INCONSISTENT\\0"
"ENCRYPTED_LENGTH_TOO_LONG\\0"
@@ -1270,15 +1281,18 @@ Pod::Spec.new do |s|
"NO_CERTIFICATE_SET\\0"
"NO_CIPHERS_AVAILABLE\\0"
"NO_CIPHERS_PASSED\\0"
+ "NO_CIPHERS_SPECIFIED\\0"
"NO_CIPHER_MATCH\\0"
"NO_COMMON_SIGNATURE_ALGORITHMS\\0"
"NO_COMPRESSION_SPECIFIED\\0"
+ "NO_GROUPS_SPECIFIED\\0"
"NO_METHOD_SPECIFIED\\0"
"NO_P256_SUPPORT\\0"
"NO_PRIVATE_KEY_ASSIGNED\\0"
"NO_RENEGOTIATION\\0"
"NO_REQUIRED_DIGEST\\0"
"NO_SHARED_CIPHER\\0"
+ "NO_SHARED_GROUP\\0"
"NULL_SSL_CTX\\0"
"NULL_SSL_METHOD_PASSED\\0"
"OLD_SESSION_CIPHER_NOT_RETURNED\\0"
@@ -1294,6 +1308,7 @@ Pod::Spec.new do |s|
"READ_TIMEOUT_EXPIRED\\0"
"RECORD_LENGTH_MISMATCH\\0"
"RECORD_TOO_LARGE\\0"
+ "RENEGOTIATION_EMS_MISMATCH\\0"
"RENEGOTIATION_ENCODING_ERR\\0"
"RENEGOTIATION_MISMATCH\\0"
"REQUIRED_CIPHER_MISSING\\0"
@@ -1338,12 +1353,15 @@ Pod::Spec.new do |s|
"TLSV1_ALERT_USER_CANCELLED\\0"
"TLSV1_BAD_CERTIFICATE_HASH_VALUE\\0"
"TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE\\0"
+ "TLSV1_CERTIFICATE_REQUIRED\\0"
"TLSV1_CERTIFICATE_UNOBTAINABLE\\0"
+ "TLSV1_UNKNOWN_PSK_IDENTITY\\0"
"TLSV1_UNRECOGNIZED_NAME\\0"
"TLSV1_UNSUPPORTED_EXTENSION\\0"
"TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\\0"
"TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\\0"
"TOO_MANY_EMPTY_FRAGMENTS\\0"
+ "TOO_MANY_KEY_UPDATES\\0"
"TOO_MANY_WARNING_ALERTS\\0"
"UNABLE_TO_FIND_ECDH_PARAMETERS\\0"
"UNEXPECTED_EXTENSION\\0"
diff --git a/src/objective-c/CronetFramework.podspec b/src/objective-c/CronetFramework.podspec
index 3ebcacf055..2f47b02c0c 100644
--- a/src/objective-c/CronetFramework.podspec
+++ b/src/objective-c/CronetFramework.podspec
@@ -30,14 +30,47 @@
Pod::Spec.new do |s|
s.name = "CronetFramework"
- s.version = "0.0.2"
+ s.version = "0.0.3"
s.summary = "Cronet, precompiled and used as a framework."
s.homepage = "http://chromium.org"
- s.license = { :type => 'BSD' }
+ s.license = {
+ :type => 'BSD',
+ :text => <<-LICENSE
+ Copyright 2015, Google Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ LICENSE
+ }
s.vendored_framework = "Cronet.framework"
s.author = "The Chromium Authors"
- s.ios.deployment_target = "7.1"
+ s.ios.deployment_target = "8.0"
s.source = { :http => 'https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework.zip' }
s.preserve_paths = "Cronet.framework"
s.public_header_files = "Cronet.framework/Headers/**/*{.h}"
+ s.source_files = "Cronet.framework/Headers/**/*{.h}"
end
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
index 4a3f3fa4a1..c1623a0068 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
@@ -40,15 +40,18 @@
@interface GRPCCall (ChannelArg)
/**
- * Use the provided @c userAgentPrefix at the beginning of the HTTP User Agent string for all calls
- * to the specified @c host.
+ * Use the provided @c userAgentPrefix at the beginning of the HTTP User Agent
+ * string for all calls to the specified @c host.
*/
-+ (void)setUserAgentPrefix:(nonnull NSString *)userAgentPrefix forHost:(nonnull NSString *)host;
++ (void)setUserAgentPrefix:(nonnull NSString *)userAgentPrefix
+ forHost:(nonnull NSString *)host;
-/** The default response size limit is 4MB. Set this to override that default. */
+/** The default response size limit is 4MB. Set this to override that default.
+ */
+ (void)setResponseSizeLimit:(NSUInteger)limit forHost:(nonnull NSString *)host;
-+ (void)closeOpenConnections DEPRECATED_MSG_ATTRIBUTE("The API for this feature is experimental, "
- "and might be removed or modified at any "
- "time.");
++ (void)closeOpenConnections DEPRECATED_MSG_ATTRIBUTE(
+ "The API for this feature is experimental, "
+ "and might be removed or modified at any "
+ "time.");
@end
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h b/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h
index ac2a37d75f..beae0d11a2 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelCredentials.h
@@ -33,24 +33,26 @@
#import "GRPCCall.h"
-/** Helpers for setting TLS Trusted Roots, Client Certificates, and Private Key */
+/** Helpers for setting TLS Trusted Roots, Client Certificates, and Private Key
+ */
@interface GRPCCall (ChannelCredentials)
/**
- * Use the provided @c pemRootCert as the set of trusted root Certificate Authorities for @c host.
+ * Use the provided @c pemRootCert as the set of trusted root Certificate
+ * Authorities for @c host.
*/
+ (BOOL)setTLSPEMRootCerts:(nullable NSString *)pemRootCert
forHost:(nonnull NSString *)host
- error:(NSError * _Nullable * _Nullable)errorPtr;
+ error:(NSError *_Nullable *_Nullable)errorPtr;
/**
- * Configures @c host with TLS/SSL Client Credentials and optionally trusted root Certificate
- * Authorities. If @c pemRootCerts is nil, the default CA Certificates bundled with gRPC will be
- * used.
+ * Configures @c host with TLS/SSL Client Credentials and optionally trusted
+ * root Certificate Authorities. If @c pemRootCerts is nil, the default CA
+ * Certificates bundled with gRPC will be used.
*/
+ (BOOL)setTLSPEMRootCerts:(nullable NSString *)pemRootCerts
withPrivateKey:(nullable NSString *)pemPrivateKey
withCertChain:(nullable NSString *)pemCertChain
forHost:(nonnull NSString *)host
- error:(NSError * _Nullable * _Nullable)errorPtr;
+ error:(NSError *_Nullable *_Nullable)errorPtr;
@end
diff --git a/src/objective-c/GRPCClient/GRPCCall+OAuth2.h b/src/objective-c/GRPCClient/GRPCCall+OAuth2.h
index 6b443877e9..467c6332c1 100644
--- a/src/objective-c/GRPCClient/GRPCCall+OAuth2.h
+++ b/src/objective-c/GRPCClient/GRPCCall+OAuth2.h
@@ -37,15 +37,17 @@
@interface GRPCCall (OAuth2)
/**
- * Setting this property is equivalent to setting "Bearer <passed token>" as the value of the
- * request header with key "authorization" (the authorization header). Setting it to nil removes the
- * authorization header from the request.
- * The value obtained by getting the property is the OAuth2 bearer token if the authorization header
- * of the request has the form "Bearer <token>", or nil otherwise.
+ * Setting this property is equivalent to setting "Bearer <passed token>" as the
+ * value of the request header with key "authorization" (the authorization
+ * header). Setting it to nil removes the authorization header from the request.
+ * The value obtained by getting the property is the OAuth2 bearer token if the
+ * authorization header of the request has the form "Bearer <token>", or nil
+ * otherwise.
*/
@property(atomic, copy) NSString *oauth2AccessToken;
-/** Returns the value (if any) of the "www-authenticate" response header (the challenge header). */
+/** Returns the value (if any) of the "www-authenticate" response header (the
+ * challenge header). */
@property(atomic, readonly) NSString *oauth2ChallengeHeader;
@end
diff --git a/src/objective-c/GRPCClient/GRPCCall+Tests.h b/src/objective-c/GRPCClient/GRPCCall+Tests.h
index 184ad09c5c..f517f3aac8 100644
--- a/src/objective-c/GRPCClient/GRPCCall+Tests.h
+++ b/src/objective-c/GRPCClient/GRPCCall+Tests.h
@@ -34,33 +34,36 @@
#import "GRPCCall.h"
/**
- * Methods to let tune down the security of gRPC connections for specific hosts. These shouldn't be
- * used in releases, but are sometimes needed for testing.
+ * Methods to let tune down the security of gRPC connections for specific hosts.
+ * These shouldn't be used in releases, but are sometimes needed for testing.
*/
@interface GRPCCall (Tests)
/**
- * Establish all SSL connections to the provided host using the passed SSL target name and the root
- * certificates found in the file at |certsPath|.
+ * Establish all SSL connections to the provided host using the passed SSL
+ * target name and the root certificates found in the file at |certsPath|.
*
- * Must be called before any gRPC call to that host is made. It's illegal to pass the same host to
- * more than one invocation of the methods of this category.
+ * Must be called before any gRPC call to that host is made. It's illegal to
+ * pass the same host to more than one invocation of the methods of this
+ * category.
*/
+ (void)useTestCertsPath:(NSString *)certsPath
testName:(NSString *)testName
forHost:(NSString *)host;
/**
- * Establish all connections to the provided host using cleartext instead of SSL.
+ * Establish all connections to the provided host using cleartext instead of
+ * SSL.
*
- * Must be called before any gRPC call to that host is made. It's illegal to pass the same host to
- * more than one invocation of the methods of this category.
+ * Must be called before any gRPC call to that host is made. It's illegal to
+ * pass the same host to more than one invocation of the methods of this
+ * category.
*/
+ (void)useInsecureConnectionsForHost:(NSString *)host;
/**
- * Resets all host configurations to their default values, and flushes all connections from the
- * cache.
+ * Resets all host configurations to their default values, and flushes all
+ * connections from the cache.
*/
+ (void)resetHostSettings;
@end
diff --git a/src/objective-c/GRPCClient/GRPCCall.h b/src/objective-c/GRPCClient/GRPCCall.h
index 7645bb1d34..5ed160d7a0 100644
--- a/src/objective-c/GRPCClient/GRPCCall.h
+++ b/src/objective-c/GRPCClient/GRPCCall.h
@@ -34,17 +34,18 @@
/**
* The gRPC protocol is an RPC protocol on top of HTTP2.
*
- * While the most common type of RPC receives only one request message and returns only one response
- * message, the protocol also supports RPCs that return multiple individual messages in a streaming
- * fashion, RPCs that accept a stream of request messages, or RPCs with both streaming requests and
+ * While the most common type of RPC receives only one request message and
+ * returns only one response message, the protocol also supports RPCs that
+ * return multiple individual messages in a streaming fashion, RPCs that accept
+ * a stream of request messages, or RPCs with both streaming requests and
* responses.
*
- * Conceptually, each gRPC call consists of a bidirectional stream of binary messages, with RPCs of
- * the "non-streaming type" sending only one message in the corresponding direction (the protocol
- * doesn't make any distinction).
+ * Conceptually, each gRPC call consists of a bidirectional stream of binary
+ * messages, with RPCs of the "non-streaming type" sending only one message in
+ * the corresponding direction (the protocol doesn't make any distinction).
*
- * Each RPC uses a different HTTP2 stream, and thus multiple simultaneous RPCs can be multiplexed
- * transparently on the same TCP connection.
+ * Each RPC uses a different HTTP2 stream, and thus multiple simultaneous RPCs
+ * can be multiplexed transparently on the same TCP connection.
*/
#import <Foundation/Foundation.h>
@@ -59,51 +60,56 @@ extern NSString *const kGRPCErrorDomain;
/**
* gRPC error codes.
- * Note that a few of these are never produced by the gRPC libraries, but are of general utility for
- * server applications to produce.
+ * Note that a few of these are never produced by the gRPC libraries, but are of
+ * general utility for server applications to produce.
*/
typedef NS_ENUM(NSUInteger, GRPCErrorCode) {
/** The operation was cancelled (typically by the caller). */
GRPCErrorCodeCancelled = 1,
/**
- * Unknown error. Errors raised by APIs that do not return enough error information may be
+ * Unknown error. Errors raised by APIs that do not return enough error
+ * information may be
* converted to this error.
*/
GRPCErrorCodeUnknown = 2,
/**
- * The client specified an invalid argument. Note that this differs from FAILED_PRECONDITION.
- * INVALID_ARGUMENT indicates arguments that are problematic regardless of the state of the
- * server (e.g., a malformed file name).
+ * The client specified an invalid argument. Note that this differs from
+ * FAILED_PRECONDITION.
+ * INVALID_ARGUMENT indicates arguments that are problematic regardless of the
+ * state of the server (e.g., a malformed file name).
*/
GRPCErrorCodeInvalidArgument = 3,
/**
- * Deadline expired before operation could complete. For operations that change the state of the
- * server, this error may be returned even if the operation has completed successfully. For
- * example, a successful response from the server could have been delayed long enough for the
- * deadline to expire.
+ * Deadline expired before operation could complete. For operations that
+ * change the state of the server, this error may be returned even if the
+ * operation has completed successfully. For example, a successful response
+ * from the server could have been delayed long enough for the deadline to
+ * expire.
*/
GRPCErrorCodeDeadlineExceeded = 4,
/** Some requested entity (e.g., file or directory) was not found. */
GRPCErrorCodeNotFound = 5,
- /** Some entity that we attempted to create (e.g., file or directory) already exists. */
+ /** Some entity that we attempted to create (e.g., file or directory) already
+ exists. */
GRPCErrorCodeAlreadyExists = 6,
/**
- * The caller does not have permission to execute the specified operation. PERMISSION_DENIED isn't
- * used for rejections caused by exhausting some resource (RESOURCE_EXHAUSTED is used instead for
- * those errors). PERMISSION_DENIED doesn't indicate a failure to identify the caller
+ * The caller does not have permission to execute the specified operation.
+ * PERMISSION_DENIED isn't used for rejections caused by exhausting some
+ * resource (RESOURCE_EXHAUSTED is used instead for those errors).
+ * PERMISSION_DENIED doesn't indicate a failure to identify the caller
* (UNAUTHENTICATED is used instead for those errors).
*/
GRPCErrorCodePermissionDenied = 7,
/**
- * The request does not have valid authentication credentials for the operation (e.g. the caller's
- * identity can't be verified).
+ * The request does not have valid authentication credentials for the
+ * operation (e.g. the caller's identity can't be verified).
*/
GRPCErrorCodeUnauthenticated = 16,
@@ -111,42 +117,47 @@ typedef NS_ENUM(NSUInteger, GRPCErrorCode) {
GRPCErrorCodeResourceExhausted = 8,
/**
- * The RPC was rejected because the server is not in a state required for the procedure's
+ * The RPC was rejected because the server is not in a state required for the
+ * procedure's
* execution. For example, a directory to be deleted may be non-empty, etc.
- * The client should not retry until the server state has been explicitly fixed (e.g. by
- * performing another RPC). The details depend on the service being called, and should be found in
- * the NSError's userInfo.
+ * The client should not retry until the server state has been explicitly
+ * fixed (e.g. by
+ * performing another RPC). The details depend on the service being called,
+ * and should be found in the NSError's userInfo.
*/
GRPCErrorCodeFailedPrecondition = 9,
/**
- * The RPC was aborted, typically due to a concurrency issue like sequencer check failures,
- * transaction aborts, etc. The client should retry at a higher-level (e.g., restarting a read-
- * modify-write sequence).
+ * The RPC was aborted, typically due to a concurrency issue like sequencer
+ * check failures, transaction aborts, etc. The client should retry at a
+ * higher-level (e.g., restarting a read-modify-write sequence).
*/
GRPCErrorCodeAborted = 10,
/**
- * The RPC was attempted past the valid range. E.g., enumerating past the end of a list.
- * Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed if the system state
- * changes. For example, an RPC to get elements of a list will generate INVALID_ARGUMENT if asked
- * to return the element at a negative index, but it will generate OUT_OF_RANGE if asked to return
- * the element at an index past the current size of the list.
+ * The RPC was attempted past the valid range. E.g., enumerating past the end
+ * of a list.
+ * Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed
+ * if the system state changes. For example, an RPC to get elements of a list
+ * will generate INVALID_ARGUMENT if asked to return the element at a negative
+ * index, but it will generate OUT_OF_RANGE if asked to return the element at
+ * an index past the current size of the list.
*/
GRPCErrorCodeOutOfRange = 11,
- /** The procedure is not implemented or not supported/enabled in this server. */
+ /** The procedure is not implemented or not supported/enabled in this server.
+ */
GRPCErrorCodeUnimplemented = 12,
/**
- * Internal error. Means some invariant expected by the server application or the gRPC library has
- * been broken.
+ * Internal error. Means some invariant expected by the server application or
+ * the gRPC library has been broken.
*/
GRPCErrorCodeInternal = 13,
/**
- * The server is currently unavailable. This is most likely a transient condition and may be
- * corrected by retrying with a backoff.
+ * The server is currently unavailable. This is most likely a transient
+ * condition and may be corrected by retrying with a backoff.
*/
GRPCErrorCodeUnavailable = 14,
@@ -158,17 +169,19 @@ typedef NS_ENUM(NSUInteger, GRPCErrorCode) {
* Safety remark of a gRPC method as defined in RFC 2616 Section 9.1
*/
typedef NS_ENUM(NSUInteger, GRPCCallSafety) {
- /** Signal that there is no guarantees on how the call affects the server state. */
+ /** Signal that there is no guarantees on how the call affects the server
+ state. */
GRPCCallSafetyDefault = 0,
/** Signal that the call is idempotent. gRPC is free to use PUT verb. */
GRPCCallSafetyIdempotentRequest = 1,
- /** Signal that the call is cacheable and will not affect server state. gRPC is free to use GET verb. */
+ /** Signal that the call is cacheable and will not affect server state. gRPC
+ is free to use GET verb. */
GRPCCallSafetyCacheableRequest = 2,
};
/**
- * Keys used in |NSError|'s |userInfo| dictionary to store the response headers and trailers sent by
- * the server.
+ * Keys used in |NSError|'s |userInfo| dictionary to store the response headers
+ * and trailers sent by the server.
*/
extern id const kGRPCHeadersKey;
extern id const kGRPCTrailersKey;
@@ -179,20 +192,24 @@ extern id const kGRPCTrailersKey;
@interface GRPCCall : GRXWriter
/**
- * The container of the request headers of an RPC conforms to this protocol, which is a subset of
- * NSMutableDictionary's interface. It will become a NSMutableDictionary later on.
- * The keys of this container are the header names, which per the HTTP standard are case-
- * insensitive. They are stored in lowercase (which is how HTTP/2 mandates them on the wire), and
- * can only consist of ASCII characters.
- * A header value is a NSString object (with only ASCII characters), unless the header name has the
- * suffix "-bin", in which case the value has to be a NSData object.
+ * The container of the request headers of an RPC conforms to this protocol,
+ * which is a subset of NSMutableDictionary's interface. It will become a
+ * NSMutableDictionary later on. The keys of this container are the header
+ * names, which per the HTTP standard are case-insensitive. They are stored in
+ * lowercase (which is how HTTP/2 mandates them on the wire), and can only
+ * consist of ASCII characters.
+ * A header value is a NSString object (with only ASCII characters), unless the
+ * header name has the suffix "-bin", in which case the value has to be a NSData
+ * object.
*/
/**
- * These HTTP headers will be passed to the server as part of this call. Each HTTP header is a
- * name-value pair with string names and either string or binary values.
+ * These HTTP headers will be passed to the server as part of this call. Each
+ * HTTP header is a name-value pair with string names and either string or
+ * binary values.
*
- * The passed dictionary has to use NSString keys, corresponding to the header names. The value
- * associated to each can be a NSString object or a NSData object. E.g.:
+ * The passed dictionary has to use NSString keys, corresponding to the header
+ * names. The value associated to each can be a NSString object or a NSData
+ * object. E.g.:
*
* call.requestHeaders = @{@"authorization": @"Bearer ..."};
*
@@ -205,53 +222,61 @@ extern id const kGRPCTrailersKey;
@property(atomic, readonly) NSMutableDictionary *requestHeaders;
/**
- * This dictionary is populated with the HTTP headers received from the server. This happens before
- * any response message is received from the server. It has the same structure as the request
- * headers dictionary: Keys are NSString header names; names ending with the suffix "-bin" have a
- * NSData value; the others have a NSString value.
+ * This dictionary is populated with the HTTP headers received from the server.
+ * This happens before any response message is received from the server. It has
+ * the same structure as the request headers dictionary: Keys are NSString
+ * header names; names ending with the suffix "-bin" have a NSData value; the
+ * others have a NSString value.
*
- * The value of this property is nil until all response headers are received, and will change before
- * any of -writeValue: or -writesFinishedWithError: are sent to the writeable.
+ * The value of this property is nil until all response headers are received,
+ * and will change before any of -writeValue: or -writesFinishedWithError: are
+ * sent to the writeable.
*/
@property(atomic, readonly) NSDictionary *responseHeaders;
/**
- * Same as responseHeaders, but populated with the HTTP trailers received from the server before the
- * call finishes.
+ * Same as responseHeaders, but populated with the HTTP trailers received from
+ * the server before the call finishes.
*
- * The value of this property is nil until all response trailers are received, and will change
- * before -writesFinishedWithError: is sent to the writeable.
+ * The value of this property is nil until all response trailers are received,
+ * and will change before -writesFinishedWithError: is sent to the writeable.
*/
@property(atomic, readonly) NSDictionary *responseTrailers;
/**
- * The request writer has to write NSData objects into the provided Writeable. The server will
- * receive each of those separately and in order as distinct messages.
- * A gRPC call might not complete until the request writer finishes. On the other hand, the request
- * finishing doesn't necessarily make the call to finish, as the server might continue sending
- * messages to the response side of the call indefinitely (depending on the semantics of the
- * specific remote method called).
+ * The request writer has to write NSData objects into the provided Writeable.
+ * The server will receive each of those separately and in order as distinct
+ * messages.
+ * A gRPC call might not complete until the request writer finishes. On the
+ * other hand, the request finishing doesn't necessarily make the call to
+ * finish, as the server might continue sending messages to the response side of
+ * the call indefinitely (depending on the semantics of the specific remote
+ * method called).
* To finish a call right away, invoke cancel.
- * host parameter should not contain the scheme (http:// or https://), only the name or IP addr
- * and the port number, for example @"localhost:5050".
+ * host parameter should not contain the scheme (http:// or https://), only the
+ * name or IP addr and the port number, for example @"localhost:5050".
*/
- (instancetype)initWithHost:(NSString *)host
path:(NSString *)path
- requestsWriter:(GRXWriter *)requestsWriter NS_DESIGNATED_INITIALIZER;
+ requestsWriter:(GRXWriter *)requestsWriter
+ NS_DESIGNATED_INITIALIZER;
/**
- * Finishes the request side of this call, notifies the server that the RPC should be cancelled, and
- * finishes the response side of the call with an error of code CANCELED.
+ * Finishes the request side of this call, notifies the server that the RPC
+ * should be cancelled, and finishes the response side of the call with an error
+ * of code CANCELED.
*/
- (void)cancel;
/**
* Set the call flag for a specific host path.
*
- * Host parameter should not contain the scheme (http:// or https://), only the name or IP addr
- * and the port number, for example @"localhost:5050".
+ * Host parameter should not contain the scheme (http:// or https://), only the
+ * name or IP addr and the port number, for example @"localhost:5050".
*/
-+ (void)setCallSafety:(GRPCCallSafety)callSafety host:(NSString *)host path:(NSString *)path;
++ (void)setCallSafety:(GRPCCallSafety)callSafety
+ host:(NSString *)host
+ path:(NSString *)path;
// TODO(jcanizales): Let specify a deadline. As a category of GRXWriter?
@end
@@ -260,7 +285,7 @@ extern id const kGRPCTrailersKey;
/** This protocol is kept for backwards compatibility with existing code. */
DEPRECATED_MSG_ATTRIBUTE("Use NSDictionary or NSMutableDictionary instead.")
-@protocol GRPCRequestHeaders <NSObject>
+@protocol GRPCRequestHeaders<NSObject>
@property(nonatomic, readonly) NSUInteger count;
- (id)objectForKeyedSubscript:(id)key;
@@ -273,6 +298,6 @@ DEPRECATED_MSG_ATTRIBUTE("Use NSDictionary or NSMutableDictionary instead.")
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated"
/** This is only needed for backwards-compatibility. */
-@interface NSMutableDictionary (GRPCRequestHeaders) <GRPCRequestHeaders>
+@interface NSMutableDictionary (GRPCRequestHeaders)<GRPCRequestHeaders>
@end
#pragma clang diagnostic pop
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index 43204345f5..85d141aa09 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -33,9 +33,9 @@
#import "GRPCCall.h"
+#import <RxLibrary/GRXConcurrentWriteable.h>
#include <grpc/grpc.h>
#include <grpc/support/time.h>
-#import <RxLibrary/GRXConcurrentWriteable.h>
#import "private/GRPCConnectivityMonitor.h"
#import "private/GRPCHost.h"
@@ -45,11 +45,11 @@
#import "private/NSDictionary+GRPC.h"
#import "private/NSError+GRPC.h"
-NSString * const kGRPCHeadersKey = @"io.grpc.HeadersKey";
-NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey";
+NSString *const kGRPCHeadersKey = @"io.grpc.HeadersKey";
+NSString *const kGRPCTrailersKey = @"io.grpc.TrailersKey";
static NSMutableDictionary *callFlags;
-@interface GRPCCall () <GRXWriteable>
+@interface GRPCCall ()<GRXWriteable>
// Make them read-write.
@property(atomic, strong) NSDictionary *responseHeaders;
@property(atomic, strong) NSDictionary *responseTrailers;
@@ -85,17 +85,21 @@ static NSMutableDictionary *callFlags;
// correct ordering.
GRXConcurrentWriteable *_responseWriteable;
- // The network thread wants the requestWriter to resume (when the server is ready for more input),
- // or to stop (on errors), concurrently with user threads that want to start it, pause it or stop
- // it. Because a writer isn't thread-safe, we'll synchronize those operations on it.
- // We don't use a dispatch queue for that purpose, because the writer can call writeValue: or
- // writesFinishedWithError: on this GRPCCall as part of those operations. We want to be able to
- // pause the writer immediately on writeValue:, so we need our locking to be recursive.
+ // The network thread wants the requestWriter to resume (when the server is
+ // ready for more input), or to stop (on errors), concurrently with user
+ // threads that want to start it, pause it or stop it. Because a writer isn't
+ // thread-safe, we'll synchronize those operations on it.
+ // We don't use a dispatch queue for that purpose, because the writer can call
+ // writeValue: or writesFinishedWithError: on this GRPCCall as part of those
+ // operations. We want to be able to pause the writer immediately on
+ // writeValue:, so we need our locking to be recursive.
GRXWriter *_requestWriter;
// To create a retain cycle when a call is started, up until it finishes. See
- // |startWithWriteable:| and |finishWithError:|. This saves users from having to retain a
- // reference to the call object if all they're interested in is the handler being executed when
+ // |startWithWriteable:| and |finishWithError:|. This saves users from having
+ // to retain a
+ // reference to the call object if all they're interested in is the handler
+ // being executed when
// the response arrives.
GRPCCall *_retainSelf;
@@ -104,13 +108,16 @@ static NSMutableDictionary *callFlags;
@synthesize state = _state;
-// TODO(jcanizales): If grpc_init is idempotent, this should be changed from load to initialize.
+// TODO(jcanizales): If grpc_init is idempotent, this should be changed from
+// load to initialize.
+ (void)load {
grpc_init();
callFlags = [NSMutableDictionary dictionary];
}
-+ (void)setCallSafety:(GRPCCallSafety)callSafety host:(NSString *)host path:(NSString *)path {
++ (void)setCallSafety:(GRPCCallSafety)callSafety
+ host:(NSString *)host
+ path:(NSString *)path {
NSString *hostAndPath = [NSString stringWithFormat:@"%@/%@", host, path];
switch (callSafety) {
case GRPCCallSafetyDefault:
@@ -141,7 +148,8 @@ static NSMutableDictionary *callFlags;
path:(NSString *)path
requestsWriter:(GRXWriter *)requestWriter {
if (!host || !path) {
- [NSException raise:NSInvalidArgumentException format:@"Neither host nor path can be nil."];
+ [NSException raise:NSInvalidArgumentException
+ format:@"Neither host nor path can be nil."];
}
if (requestWriter.state != GRXWriterStateNotStarted) {
[NSException raise:NSInvalidArgumentException
@@ -191,7 +199,10 @@ static NSMutableDictionary *callFlags;
- (void)cancel {
[self finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
code:GRPCErrorCodeCancelled
- userInfo:@{NSLocalizedDescriptionKey: @"Canceled by app"}]];
+ userInfo:@{
+ NSLocalizedDescriptionKey :
+ @"Canceled by app"
+ }]];
[self cancelCall];
}
@@ -206,15 +217,18 @@ static NSMutableDictionary *callFlags;
// Only called from the call queue.
// The handler will be called from the network queue.
-- (void)startReadWithHandler:(void(^)(grpc_byte_buffer *))handler {
+- (void)startReadWithHandler:(void (^)(grpc_byte_buffer *))handler {
// TODO(jcanizales): Add error handlers for async failures
- [_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvMessage alloc] initWithHandler:handler]]];
+ [_wrappedCall startBatchWithOperations:@[ [[GRPCOpRecvMessage alloc]
+ initWithHandler:handler] ]];
}
// Called initially from the network queue once response headers are received,
-// then "recursively" from the responseWriteable queue after each response from the
+// then "recursively" from the responseWriteable queue after each response from
+// the
// server has been written.
-// If the call is currently paused, this is a noop. Restarting the call will invoke this
+// If the call is currently paused, this is a noop. Restarting the call will
+// invoke this
// method.
// TODO(jcanizales): Rename to readResponseIfNotPaused.
- (void)startNextRead {
@@ -237,15 +251,23 @@ static NSMutableDictionary *callFlags;
// don't want to throw, because the app shouldn't crash for a behavior
// that's on the hands of any server to have. Instead we finish and ask
// the server to cancel.
- [weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeResourceExhausted
- userInfo:@{NSLocalizedDescriptionKey: @"Client does not have enough memory to hold the server response."}]];
+ [weakSelf
+ finishWithError:[NSError
+ errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeResourceExhausted
+ userInfo:@{
+ NSLocalizedDescriptionKey :
+ @"Client does not have enough "
+ @"memory to hold the server "
+ @"response."
+ }]];
[weakSelf cancelCall];
return;
}
- [weakWriteable enqueueValue:data completionHandler:^{
- [weakSelf startNextRead];
- }];
+ [weakWriteable enqueueValue:data
+ completionHandler:^{
+ [weakSelf startNextRead];
+ }];
}];
});
}
@@ -254,19 +276,22 @@ static NSMutableDictionary *callFlags;
- (void)sendHeaders:(NSDictionary *)headers {
// TODO(jcanizales): Add error handlers for async failures
- [_wrappedCall startBatchWithOperations:@[[[GRPCOpSendMetadata alloc] initWithMetadata:headers
- flags:[GRPCCall callFlagsForHost:_host path:_path]
- handler:nil]]];
+ [_wrappedCall startBatchWithOperations:@[
+ [[GRPCOpSendMetadata alloc]
+ initWithMetadata:headers
+ flags:[GRPCCall callFlagsForHost:_host path:_path]
+ handler:nil]
+ ]];
}
#pragma mark GRXWriteable implementation
// Only called from the call queue. The error handler will be called from the
// network queue if the write didn't succeed.
-- (void)writeMessage:(NSData *)message withErrorHandler:(void (^)())errorHandler {
-
+- (void)writeMessage:(NSData *)message
+ withErrorHandler:(void (^)())errorHandler {
__weak GRPCCall *weakSelf = self;
- void(^resumingHandler)(void) = ^{
+ void (^resumingHandler)(void) = ^{
// Resume the request writer.
GRPCCall *strongSelf = weakSelf;
if (strongSelf) {
@@ -275,8 +300,9 @@ static NSMutableDictionary *callFlags;
}
}
};
- [_wrappedCall startBatchWithOperations:@[[[GRPCOpSendMessage alloc] initWithMessage:message
- handler:resumingHandler]]
+ [_wrappedCall startBatchWithOperations:@[ [[GRPCOpSendMessage alloc]
+ initWithMessage:message
+ handler:resumingHandler] ]
errorHandler:errorHandler];
}
@@ -291,18 +317,20 @@ static NSMutableDictionary *callFlags;
__weak GRPCCall *weakSelf = self;
dispatch_async(_callQueue, ^{
- [weakSelf writeMessage:value withErrorHandler:^{
- [weakSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ [weakSelf writeMessage:value
+ withErrorHandler:^{
+ [weakSelf
+ finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
code:GRPCErrorCodeInternal
userInfo:nil]];
- }];
+ }];
});
}
// Only called from the call queue. The error handler will be called from the
// network queue if the requests stream couldn't be closed successfully.
- (void)finishRequestWithErrorHandler:(void (^)())errorHandler {
- [_wrappedCall startBatchWithOperations:@[[[GRPCOpSendClose alloc] init]]
+ [_wrappedCall startBatchWithOperations:@[ [[GRPCOpSendClose alloc] init] ]
errorHandler:errorHandler];
}
@@ -323,17 +351,19 @@ static NSMutableDictionary *callFlags;
#pragma mark Invoke
-// Both handlers will eventually be called, from the network queue. Writes can start immediately
-// after this.
+// Both handlers will eventually be called, from the network queue. Writes can
+// start immediately after this.
// The first one (headersHandler), when the response headers are received.
// The second one (completionHandler), whenever the RPC finishes for any reason.
-- (void)invokeCallWithHeadersHandler:(void(^)(NSDictionary *))headersHandler
- completionHandler:(void(^)(NSError *, NSDictionary *))completionHandler {
+- (void)invokeCallWithHeadersHandler:(void (^)(NSDictionary *))headersHandler
+ completionHandler:
+ (void (^)(NSError *, NSDictionary *))completionHandler {
// TODO(jcanizales): Add error handlers for async failures
- [_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvMetadata alloc]
- initWithHandler:headersHandler]]];
- [_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvStatus alloc]
- initWithHandler:completionHandler]]];
+ [_wrappedCall startBatchWithOperations:@[ [[GRPCOpRecvMetadata alloc]
+ initWithHandler:headersHandler] ]];
+ [_wrappedCall
+ startBatchWithOperations:@[ [[GRPCOpRecvStatus alloc]
+ initWithHandler:completionHandler] ]];
}
- (void)invokeCall {
@@ -341,27 +371,31 @@ static NSMutableDictionary *callFlags;
// Response headers received.
self.responseHeaders = headers;
[self startNextRead];
- } completionHandler:^(NSError *error, NSDictionary *trailers) {
- self.responseTrailers = trailers;
-
- if (error) {
- NSMutableDictionary *userInfo = [NSMutableDictionary dictionary];
- if (error.userInfo) {
- [userInfo addEntriesFromDictionary:error.userInfo];
- }
- userInfo[kGRPCTrailersKey] = self.responseTrailers;
- // TODO(jcanizales): The C gRPC library doesn't guarantee that the headers block will be
- // called before this one, so an error might end up with trailers but no headers. We
- // shouldn't call finishWithError until ater both blocks are called. It is also when this is
- // done that we can provide a merged view of response headers and trailers in a thread-safe
- // way.
- if (self.responseHeaders) {
- userInfo[kGRPCHeadersKey] = self.responseHeaders;
- }
- error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
- }
- [self finishWithError:error];
- }];
+ }
+ completionHandler:^(NSError *error, NSDictionary *trailers) {
+ self.responseTrailers = trailers;
+
+ if (error) {
+ NSMutableDictionary *userInfo = [NSMutableDictionary dictionary];
+ if (error.userInfo) {
+ [userInfo addEntriesFromDictionary:error.userInfo];
+ }
+ userInfo[kGRPCTrailersKey] = self.responseTrailers;
+ // TODO(jcanizales): The C gRPC library doesn't guarantee that the
+ // headers block will be called before this one, so an error might end
+ // up with trailers but no headers. We shouldn't call finishWithError
+ // until ater both blocks are called. It is also when this is done
+ // that we can provide a merged view of response headers and trailers
+ // in a thread-safe way.
+ if (self.responseHeaders) {
+ userInfo[kGRPCHeadersKey] = self.responseHeaders;
+ }
+ error = [NSError errorWithDomain:error.domain
+ code:error.code
+ userInfo:userInfo];
+ }
+ [self finishWithError:error];
+ }];
// Now that the RPC has been initiated, request writes can start.
@synchronized(_requestWriter) {
[_requestWriter startWithWriteable:self];
@@ -375,43 +409,55 @@ static NSMutableDictionary *callFlags;
_state = GRXWriterStateStarted;
}
- // Create a retain cycle so that this instance lives until the RPC finishes (or is cancelled).
- // This makes RPCs in which the call isn't externally retained possible (as long as it is started
- // before being autoreleased).
- // Care is taken not to retain self strongly in any of the blocks used in this implementation, so
- // that the life of the instance is determined by this retain cycle.
+ // Create a retain cycle so that this instance lives until the RPC finishes
+ // (or is cancelled). This makes RPCs in which the call isn't externally
+ // retained possible (as long as it is started before being autoreleased).
+ // Care is taken not to retain self strongly in any of the blocks used in this
+ // implementation, so that the life of the instance is determined by this
+ // retain cycle.
_retainSelf = self;
- _responseWriteable = [[GRXConcurrentWriteable alloc] initWithWriteable:writeable];
+ _responseWriteable =
+ [[GRXConcurrentWriteable alloc] initWithWriteable:writeable];
_wrappedCall = [[GRPCWrappedCall alloc] initWithHost:_host path:_path];
NSAssert(_wrappedCall, @"Error allocating RPC objects. Low memory?");
[self sendHeaders:_requestHeaders];
[self invokeCall];
+
// TODO(jcanizales): Extract this logic somewhere common.
- NSString *host = [NSURL URLWithString:[@"https://" stringByAppendingString:_host]].host;
+ NSString *host =
+ [NSURL URLWithString:[@"https://" stringByAppendingString:_host]].host;
if (!host) {
// TODO(jcanizales): Check this on init.
- [NSException raise:NSInvalidArgumentException format:@"host of %@ is nil", _host];
+ [NSException raise:NSInvalidArgumentException
+ format:@"host of %@ is nil", _host];
}
__weak typeof(self) weakSelf = self;
_connectivityMonitor = [GRPCConnectivityMonitor monitorWithHost:host];
- [_connectivityMonitor handleLossWithHandler:^{
+ void (^handler)() = ^{
typeof(self) strongSelf = weakSelf;
if (strongSelf) {
- [strongSelf finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
- code:GRPCErrorCodeUnavailable
- userInfo:@{NSLocalizedDescriptionKey: @"Connectivity lost."}]];
- [[GRPCHost hostWithAddress:strongSelf->_host] disconnect];
+ [strongSelf
+ finishWithError:[NSError errorWithDomain:kGRPCErrorDomain
+ code:GRPCErrorCodeUnavailable
+ userInfo:@{
+ NSLocalizedDescriptionKey :
+ @"Connectivity lost."
+ }]];
}
- }];
+ };
+ [_connectivityMonitor handleLossWithHandler:handler
+ wifiStatusChangeHandler:^{
+ }];
}
- (void)setState:(GRXWriterState)newState {
@synchronized(self) {
// Manual transitions are only allowed from the started or paused states.
- if (_state == GRXWriterStateNotStarted || _state == GRXWriterStateFinished) {
+ if (_state == GRXWriterStateNotStarted ||
+ _state == GRXWriterStateFinished) {
return;
}
diff --git a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.h b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.h
index fe3b8f39d1..c0bbf22c74 100644
--- a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.h
+++ b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.h
@@ -34,18 +34,19 @@
#import <Foundation/Foundation.h>
#include <grpc/grpc.h>
-typedef void(^GRPCQueueCompletionHandler)(bool success);
+typedef void (^GRPCQueueCompletionHandler)(bool success);
/**
- * This class lets one more easily use |grpc_completion_queue|. To use it, pass the value of the
- * |unmanagedQueue| property of an instance of this class to |grpc_channel_create_call|. Then for
- * every |grpc_call_*| method that accepts a tag, you can pass a block of type
- * |GRPCQueueCompletionHandler| (remembering to cast it using |__bridge_retained|). The block is
- * guaranteed to eventually be called, by a concurrent queue, and then released. Each such block is
+ * This class lets one more easily use |grpc_completion_queue|. To use it, pass
+ * the value of the |unmanagedQueue| property of an instance of this class to
+ * |grpc_channel_create_call|. Then for every |grpc_call_*| method that accepts
+ * a tag, you can pass a block of type |GRPCQueueCompletionHandler| (remembering
+ * to cast it using |__bridge_retained|). The block is guaranteed to eventually
+ * be called, by a concurrent queue, and then released. Each such block is
* passed a |bool| that tells if the operation was successful.
*
- * Release the GRPCCompletionQueue object only after you are not going to pass any more blocks to
- * the |grpc_call| that's using it.
+ * Release the GRPCCompletionQueue object only after you are not going to pass
+ * any more blocks to the |grpc_call| that's using it.
*/
@interface GRPCCompletionQueue : NSObject
@property(nonatomic, readonly) grpc_completion_queue *unmanagedQueue;
diff --git a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h
index 2fae410331..941b596d2c 100644
--- a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h
+++ b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h
@@ -45,7 +45,7 @@
*/
#define GRPC_XMACRO_ITEM(methodName, FlagName) \
-@property(nonatomic, readonly) BOOL methodName;
+ @property(nonatomic, readonly) BOOL methodName;
#include "GRPCReachabilityFlagNames.xmacro.h"
#undef GRPC_XMACRO_ITEM
@@ -53,7 +53,6 @@
@property(nonatomic, readonly) BOOL isHostReachable;
@end
-
@interface GRPCConnectivityMonitor : NSObject
+ (nullable instancetype)monitorWithHost:(nonnull NSString *)hostName;
@@ -61,17 +60,19 @@
- (nonnull instancetype)init NS_UNAVAILABLE;
/**
- * Queue on which callbacks will be dispatched. Default is the main queue. Set it before calling
- * handleLossWithHandler:.
+ * Queue on which callbacks will be dispatched. Default is the main queue. Set
+ * it before calling handleLossWithHandler:.
*/
// TODO(jcanizales): Default to a serial background queue instead.
@property(nonatomic, strong, null_resettable) dispatch_queue_t queue;
/**
- * Calls handler every time the connectivity to this instance's host is lost. If this instance is
- * released before that happens, the handler won't be called.
- * Only one handler is active at a time, so if this method is called again before the previous
- * handler has been called, it might never be called at all (or yes, if it has already been queued).
+ * Calls handler every time the connectivity to this instance's host is lost. If
+ * this instance is released before that happens, the handler won't be called.
+ * Only one handler is active at a time, so if this method is called again
+ * before the previous handler has been called, it might never be called at all
+ * (or yes, if it has already been queued).
*/
-- (void)handleLossWithHandler:(nonnull void (^)())handler;
+- (void)handleLossWithHandler:(nonnull void (^)())handler
+ wifiStatusChangeHandler:(nonnull void (^)())wifiStatusChangeHandler;
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m
index b4061bd5ef..fec6391d39 100644
--- a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m
+++ b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m
@@ -58,19 +58,20 @@
}
*/
-#define GRPC_XMACRO_ITEM(methodName, FlagName) \
-- (BOOL)methodName { \
- return !!(_flags & kSCNetworkReachabilityFlags ## FlagName); \
-}
+#define GRPC_XMACRO_ITEM(methodName, FlagName) \
+ -(BOOL)methodName { \
+ return !!(_flags & kSCNetworkReachabilityFlags##FlagName); \
+ }
#include "GRPCReachabilityFlagNames.xmacro.h"
#undef GRPC_XMACRO_ITEM
- (BOOL)isHostReachable {
- // Note: connectionOnDemand means it'll be reachable only if using the CFSocketStream API or APIs
- // on top of it.
- // connectionRequired means we can't tell until a connection is attempted (e.g. for VPN on
- // demand).
- return self.reachable && !self.interventionRequired && !self.connectionOnDemand;
+ // Note: connectionOnDemand means it'll be reachable only if using the
+ // CFSocketStream API or APIs on top of it.
+ // connectionRequired means we can't tell until a connection is attempted
+ // (e.g. for VPN on demand).
+ return self.reachable && !self.interventionRequired &&
+ !self.connectionOnDemand;
}
- (NSString *)description {
@@ -79,24 +80,26 @@
/*
* For each flag, add its name to the array if it's ON. Example:
- if (self.isCell) {
- [activeOptions addObject:@"isCell"];
- }
+ if (self.isCell) {
+ [activeOptions addObject:@"isCell"];
+ }
*/
-#define GRPC_XMACRO_ITEM(methodName, FlagName) \
- if (self.methodName) { \
- [activeOptions addObject:@#methodName]; \
- }
-#include "GRPCReachabilityFlagNames.xmacro.h"
-#undef GRPC_XMACRO_ITEM
+ #define GRPC_XMACRO_ITEM(methodName, FlagName) \
+ if (self.methodName) { \
+ [activeOptions addObject:@ #methodName]; \
+ }
+ #include "GRPCReachabilityFlagNames.xmacro.h"
+ #undef GRPC_XMACRO_ITEM
- return activeOptions.count == 0 ? @"(none)" : [activeOptions componentsJoinedByString:@", "];
+ return activeOptions.count == 0
+ ? @"(none)"
+ : [activeOptions componentsJoinedByString:@", "];
}
- (BOOL)isEqual:(id)object {
return [object isKindOfClass:[GRPCReachabilityFlags class]] &&
- _flags == ((GRPCReachabilityFlags *)object)->_flags;
+ _flags == ((GRPCReachabilityFlags *)object)->_flags;
}
- (NSUInteger)hash {
@@ -106,29 +109,33 @@
#pragma mark Connectivity Monitor
-// Assumes the third argument is a block that accepts a GRPCReachabilityFlags object, and passes the
-// received ones to it.
+// Assumes the third argument is a block that accepts a GRPCReachabilityFlags
+// object, and passes the received ones to it.
static void PassFlagsToContextInfoBlock(SCNetworkReachabilityRef target,
SCNetworkReachabilityFlags flags,
void *info) {
- #pragma unused (target)
- // This can be called many times with the same info. The info is retained by SCNetworkReachability
- // while this function is being executed.
- void (^handler)(GRPCReachabilityFlags *) = (__bridge void (^)(GRPCReachabilityFlags *))info;
+#pragma unused(target)
+ // This can be called many times with the same info. The info is retained by
+ // SCNetworkReachability while this function is being executed.
+ void (^handler)(GRPCReachabilityFlags *) =
+ (__bridge void (^)(GRPCReachabilityFlags *))info;
handler([[GRPCReachabilityFlags alloc] initWithFlags:flags]);
}
@implementation GRPCConnectivityMonitor {
SCNetworkReachabilityRef _reachabilityRef;
+ GRPCReachabilityFlags *_previousReachabilityFlags;
}
-- (nullable instancetype)initWithReachability:(nullable SCNetworkReachabilityRef)reachability {
+- (nullable instancetype)initWithReachability:
+ (nullable SCNetworkReachabilityRef)reachability {
if (!reachability) {
return nil;
}
if ((self = [super init])) {
_reachabilityRef = CFRetain(reachability);
_queue = dispatch_get_main_queue();
+ _previousReachabilityFlags = nil;
}
return self;
}
@@ -142,33 +149,46 @@ static void PassFlagsToContextInfoBlock(SCNetworkReachabilityRef target,
SCNetworkReachabilityRef reachability =
SCNetworkReachabilityCreateWithName(NULL, hostName);
- GRPCConnectivityMonitor *returnValue = [[self alloc] initWithReachability:reachability];
+ GRPCConnectivityMonitor *returnValue =
+ [[self alloc] initWithReachability:reachability];
if (reachability) {
CFRelease(reachability);
}
return returnValue;
}
-- (void)handleLossWithHandler:(void (^)())handler {
+- (void)handleLossWithHandler:(void (^)())handler
+ wifiStatusChangeHandler:(nonnull void (^)())wifiStatusChangeHandler {
+ __weak typeof(self) weakSelf = self;
[self startListeningWithHandler:^(GRPCReachabilityFlags *flags) {
- if (!flags.isHostReachable) {
- handler();
+ typeof(self) strongSelf = weakSelf;
+ if (strongSelf) {
+ if (!flags.reachable) {
+ handler();
+ } else if (strongSelf->_previousReachabilityFlags &&
+ (flags.isWWAN ^
+ strongSelf->_previousReachabilityFlags.isWWAN)) {
+ wifiStatusChangeHandler();
+ }
+ strongSelf->_previousReachabilityFlags = flags;
}
}];
}
- (void)startListeningWithHandler:(void (^)(GRPCReachabilityFlags *))handler {
- // Copy to ensure the handler block is in the heap (and so can't be deallocated when this method
- // returns).
+ // Copy to ensure the handler block is in the heap (and so can't be
+ // deallocated when this method returns).
void (^copiedHandler)(GRPCReachabilityFlags *) = [handler copy];
SCNetworkReachabilityContext context = {
- .version = 0,
- .info = (__bridge void *)copiedHandler,
- .retain = CFRetain,
- .release = CFRelease,
+ .version = 0,
+ .info = (__bridge void *)copiedHandler,
+ .retain = CFRetain,
+ .release = CFRelease,
};
- // The following will retain context.info, and release it when the callback is set to NULL.
- SCNetworkReachabilitySetCallback(_reachabilityRef, PassFlagsToContextInfoBlock, &context);
+ // The following will retain context.info, and release it when the callback is
+ // set to NULL.
+ SCNetworkReachabilitySetCallback(_reachabilityRef,
+ PassFlagsToContextInfoBlock, &context);
SCNetworkReachabilitySetDispatchQueue(_reachabilityRef, _queue);
}
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index f8634b448e..dd0479083d 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -33,9 +33,9 @@
#import "GRPCHost.h"
+#import <GRPCClient/GRPCCall.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
-#import <GRPCClient/GRPCCall.h>
#ifdef GRPC_COMPILE_WITH_CRONET
#import <GRPCClient/GRPCCall+ChannelArg.h>
#import <GRPCClient/GRPCCall+Cronet.h>
@@ -43,18 +43,27 @@
#import "GRPCChannel.h"
#import "GRPCCompletionQueue.h"
+#import "GRPCConnectivityMonitor.h"
#import "NSDictionary+GRPC.h"
NS_ASSUME_NONNULL_BEGIN
-// TODO(jcanizales): Generate the version in a standalone header, from templates. Like
+// TODO(jcanizales): Generate the version in a standalone header, from
+// templates. Like
// templates/src/core/surface/version.c.template .
-#define GRPC_OBJC_VERSION_STRING @"1.0.0"
+#define GRPC_OBJC_VERSION_STRING @"1.0.1"
static NSMutableDictionary *kHostCache;
+// This connectivity monitor flushes the host cache when connectivity status
+// changes or when connection switch between Wifi and Cellular data, so that a
+// new call will use a new channel. Otherwise, a new call will still use the
+// cached channel which is no longer available and will cause gRPC to hang.
+static GRPCConnectivityMonitor *connectivityMonitor = nil;
+
@implementation GRPCHost {
- // TODO(mlumish): Investigate whether caching channels with strong links is a good idea.
+ // TODO(mlumish): Investigate whether caching channels with strong links is a
+ // good idea.
GRPCChannel *_channel;
}
@@ -74,11 +83,13 @@ static NSMutableDictionary *kHostCache;
return nil;
}
- // To provide a default port, we try to interpret the address. If it's just a host name without
- // scheme and without port, we'll use port 443. If it has a scheme, we pass it untouched to the C
- // gRPC library.
- // TODO(jcanizales): Add unit tests for the types of addresses we want to let pass untouched.
- NSURL *hostURL = [NSURL URLWithString:[@"https://" stringByAppendingString:address]];
+ // To provide a default port, we try to interpret the address. If it's just a
+ // host name without scheme and without port, we'll use port 443. If it has a
+ // scheme, we pass it untouched to the C gRPC library.
+ // TODO(jcanizales): Add unit tests for the types of addresses we want to let
+ // pass untouched.
+ NSURL *hostURL =
+ [NSURL URLWithString:[@"https://" stringByAppendingString:address]];
if (hostURL.host && !hostURL.port) {
address = [hostURL.host stringByAppendingString:@":443"];
}
@@ -88,6 +99,7 @@ static NSMutableDictionary *kHostCache;
dispatch_once(&cacheInitialization, ^{
kHostCache = [NSMutableDictionary dictionary];
});
+
@synchronized(kHostCache) {
GRPCHost *cachedHost = kHostCache[address];
if (cachedHost) {
@@ -99,6 +111,17 @@ static NSMutableDictionary *kHostCache;
_secure = YES;
kHostCache[address] = self;
}
+ // Keep a single monitor to flush the cache if the connectivity status changes
+ // Thread safety guarded by @synchronized(kHostCache)
+ if (!connectivityMonitor) {
+ connectivityMonitor =
+ [GRPCConnectivityMonitor monitorWithHost:hostURL.host];
+ void (^handler)() = ^{
+ [GRPCHost flushChannelCache];
+ };
+ [connectivityMonitor handleLossWithHandler:handler
+ wifiStatusChangeHandler:handler];
+ }
}
return self;
}
@@ -114,7 +137,7 @@ static NSMutableDictionary *kHostCache;
}
+ (void)resetAllHostSettings {
- @synchronized (kHostCache) {
+ @synchronized(kHostCache) {
kHostCache = [NSMutableDictionary dictionary];
}
}
@@ -140,16 +163,19 @@ static NSMutableDictionary *kHostCache;
static NSError *kDefaultRootsError;
static dispatch_once_t loading;
dispatch_once(&loading, ^{
- NSString *defaultPath = @"gRPCCertificates.bundle/roots"; // .pem
- // Do not use NSBundle.mainBundle, as it's nil for tests of library projects.
+ NSString *defaultPath = @"gRPCCertificates.bundle/roots"; // .pem
+ // Do not use NSBundle.mainBundle, as it's nil for tests of library
+ // projects.
NSBundle *bundle = [NSBundle bundleForClass:self.class];
NSString *path = [bundle pathForResource:defaultPath ofType:@"pem"];
NSError *error;
- // Files in PEM format can have non-ASCII characters in their comments (e.g. for the name of the
- // issuer). Load them as UTF8 and produce an ASCII equivalent.
- NSString *contentInUTF8 = [NSString stringWithContentsOfFile:path
- encoding:NSUTF8StringEncoding
- error:&error];
+ // Files in PEM format can have non-ASCII characters in their comments (e.g.
+ // for the name of the issuer). Load them as UTF8 and produce an ASCII
+ // equivalent.
+ NSString *contentInUTF8 =
+ [NSString stringWithContentsOfFile:path
+ encoding:NSUTF8StringEncoding
+ error:&error];
if (contentInUTF8 == nil) {
kDefaultRootsError = error;
return;
@@ -161,17 +187,21 @@ static NSMutableDictionary *kHostCache;
NSData *rootsASCII;
if (pemRootCerts != nil) {
rootsASCII = [pemRootCerts dataUsingEncoding:NSASCIIStringEncoding
- allowLossyConversion:YES];
+ allowLossyConversion:YES];
} else {
if (kDefaultRootsASCII == nil) {
if (errorPtr) {
*errorPtr = kDefaultRootsError;
}
- NSAssert(kDefaultRootsASCII, @"Could not read gRPCCertificates.bundle/roots.pem. This file, "
- "with the root certificates, is needed to establish secure (TLS) connections. "
- "Because the file is distributed with the gRPC library, this error is usually a sign "
- "that the library wasn't configured correctly for your project. Error: %@",
- kDefaultRootsError);
+ NSAssert(kDefaultRootsASCII,
+ @"Could not read gRPCCertificates.bundle/roots.pem. This file, "
+ "with the root certificates, is needed to establish secure "
+ "(TLS) connections. "
+ "Because the file is distributed with the gRPC library, this "
+ "error is usually a sign "
+ "that the library wasn't configured correctly for your "
+ "project. Error: %@",
+ kDefaultRootsError);
return NO;
}
rootsASCII = kDefaultRootsASCII;
@@ -182,10 +212,12 @@ static NSMutableDictionary *kHostCache;
creds = grpc_ssl_credentials_create(rootsASCII.bytes, NULL, NULL);
} else {
grpc_ssl_pem_key_cert_pair key_cert_pair;
- NSData *privateKeyASCII = [pemPrivateKey dataUsingEncoding:NSASCIIStringEncoding
- allowLossyConversion:YES];
- NSData *certChainASCII = [pemCertChain dataUsingEncoding:NSASCIIStringEncoding
- allowLossyConversion:YES];
+ NSData *privateKeyASCII =
+ [pemPrivateKey dataUsingEncoding:NSASCIIStringEncoding
+ allowLossyConversion:YES];
+ NSData *certChainASCII =
+ [pemCertChain dataUsingEncoding:NSASCIIStringEncoding
+ allowLossyConversion:YES];
key_cert_pair.private_key = privateKeyASCII.bytes;
key_cert_pair.cert_chain = certChainASCII.bytes;
creds = grpc_ssl_credentials_create(rootsASCII.bytes, &key_cert_pair, NULL);
@@ -205,7 +237,8 @@ static NSMutableDictionary *kHostCache;
NSMutableDictionary *args = [NSMutableDictionary dictionary];
// TODO(jcanizales): Add OS and device information (see
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#user-agents ).
+ // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#user-agents
+ // ).
NSString *userAgent = @"grpc-objc/" GRPC_OBJC_VERSION_STRING;
if (_userAgentPrefix) {
userAgent = [_userAgentPrefix stringByAppendingFormat:@" %@", userAgent];
@@ -219,7 +252,7 @@ static NSMutableDictionary *kHostCache;
if (_responseSizeLimitOverride) {
args[@GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH] = _responseSizeLimitOverride;
}
- // Use 10000ms initial backoff time for correct behavior on bad/slow networks
+ // Use 10000ms initial backoff time for correct behavior on bad/slow networks
args[@GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS] = @10000;
return args;
}
@@ -230,31 +263,35 @@ static NSMutableDictionary *kHostCache;
BOOL useCronet = [GRPCCall isUsingCronet];
#endif
if (_secure) {
- GRPCChannel *channel;
- @synchronized(self) {
- if (_channelCreds == nil) {
- [self setTLSPEMRootCerts:nil withPrivateKey:nil withCertChain:nil error:nil];
- }
+ GRPCChannel *channel;
+ @synchronized(self) {
+ if (_channelCreds == nil) {
+ [self setTLSPEMRootCerts:nil
+ withPrivateKey:nil
+ withCertChain:nil
+ error:nil];
+ }
#ifdef GRPC_COMPILE_WITH_CRONET
- if (useCronet) {
- channel = [GRPCChannel secureCronetChannelWithHost:_address
- channelArgs:args];
- } else
+ if (useCronet) {
+ channel =
+ [GRPCChannel secureCronetChannelWithHost:_address channelArgs:args];
+ } else
#endif
- {
- channel = [GRPCChannel secureChannelWithHost:_address
- credentials:_channelCreds
- channelArgs:args];
- }
+ {
+ channel = [GRPCChannel secureChannelWithHost:_address
+ credentials:_channelCreds
+ channelArgs:args];
}
- return channel;
+ }
+ return channel;
} else {
return [GRPCChannel insecureChannelWithHost:_address channelArgs:args];
}
}
- (NSString *)hostName {
- // TODO(jcanizales): Default to nil instead of _address when Issue #2635 is clarified.
+ // TODO(jcanizales): Default to nil instead of _address when Issue #2635 is
+ // clarified.
return _hostNameOverride ?: _address;
}
diff --git a/src/objective-c/GRPCClient/private/GRPCReachabilityFlagNames.xmacro.h b/src/objective-c/GRPCClient/private/GRPCReachabilityFlagNames.xmacro.h
index 4b92504b55..f5b9e7e64c 100644
--- a/src/objective-c/GRPCClient/private/GRPCReachabilityFlagNames.xmacro.h
+++ b/src/objective-c/GRPCClient/private/GRPCReachabilityFlagNames.xmacro.h
@@ -55,7 +55,7 @@
#endif
#if TARGET_OS_IPHONE
-GRPC_XMACRO_ITEM(isCell, IsWWAN)
+GRPC_XMACRO_ITEM(isWWAN, IsWWAN)
#endif
GRPC_XMACRO_ITEM(reachable, Reachable)
GRPC_XMACRO_ITEM(transientConnection, TransientConnection)
diff --git a/src/objective-c/GRPCClient/private/GRPCRequestHeaders.m b/src/objective-c/GRPCClient/private/GRPCRequestHeaders.m
index c6a03c145e..58c6032e80 100644
--- a/src/objective-c/GRPCClient/private/GRPCRequestHeaders.m
+++ b/src/objective-c/GRPCClient/private/GRPCRequestHeaders.m
@@ -38,9 +38,10 @@
#import "NSDictionary+GRPC.h"
// Used by the setter.
-static void CheckIsNonNilASCII(NSString *name, NSString* value) {
+static void CheckIsNonNilASCII(NSString *name, NSString *value) {
if (!value) {
- [NSException raise:NSInvalidArgumentException format:@"%@ cannot be nil", name];
+ [NSException raise:NSInvalidArgumentException
+ format:@"%@ cannot be nil", name];
}
if (![value canBeConvertedToEncoding:NSASCIIStringEncoding]) {
[NSException raise:NSInvalidArgumentException
@@ -52,15 +53,20 @@ static void CheckIsNonNilASCII(NSString *name, NSString* value) {
static void CheckKeyValuePairIsValid(NSString *key, id value) {
if ([key hasSuffix:@"-bin"]) {
if (![value isKindOfClass:NSData.class]) {
- [NSException raise:NSInvalidArgumentException
- format:@"Expected NSData value for header %@ ending in \"-bin\", "
- @"instead got %@", key, value];
+ [NSException
+ raise:NSInvalidArgumentException
+ format:@"Expected NSData value for header %@ ending in \"-bin\", "
+ @"instead got %@",
+ key, value];
}
} else {
if (![value isKindOfClass:NSString.class]) {
- [NSException raise:NSInvalidArgumentException
- format:@"Expected NSString value for header %@ not ending in \"-bin\", "
- @"instead got %@", key, value];
+ [NSException
+ raise:NSInvalidArgumentException
+ format:
+ @"Expected NSString value for header %@ not ending in \"-bin\", "
+ @"instead got %@",
+ key, value];
}
CheckIsNonNilASCII(@"Text header value", (NSString *)value);
}
@@ -68,9 +74,10 @@ static void CheckKeyValuePairIsValid(NSString *key, id value) {
@implementation GRPCRequestHeaders {
__weak GRPCCall *_call;
- // The NSMutableDictionary superclass doesn't hold any storage (so that people can implement their
- // own in subclasses). As that's not the reason we're subclassing, we just delegate storage to the
- // default NSMutableDictionary subclass returned by the cluster (e.g. __NSDictionaryM on iOS 9).
+ // The NSMutableDictionary superclass doesn't hold any storage (so that people
+ // can implement their own in subclasses). As that's not the reason we're
+ // subclassing, we just delegate storage to the default NSMutableDictionary
+ // subclass returned by the cluster (e.g. __NSDictionaryM on iOS 9).
NSMutableDictionary *_delegate;
}
@@ -91,7 +98,8 @@ static void CheckKeyValuePairIsValid(NSString *key, id value) {
}
// Designated initializer
-- (instancetype)initWithCall:(GRPCCall *)call storage:(NSMutableDictionary *)storage {
+- (instancetype)initWithCall:(GRPCCall *)call
+ storage:(NSMutableDictionary *)storage {
// TODO(jcanizales): Throw if call or storage are nil.
if ((self = [super init])) {
_call = call;
@@ -100,9 +108,10 @@ static void CheckKeyValuePairIsValid(NSString *key, id value) {
return self;
}
-- (instancetype)initWithObjects:(const id _Nonnull __unsafe_unretained *)objects
- forKeys:(const id<NSCopying> _Nonnull __unsafe_unretained *)keys
- count:(NSUInteger)cnt {
+- (instancetype)
+initWithObjects:(const id _Nonnull __unsafe_unretained *)objects
+ forKeys:(const id<NSCopying> _Nonnull __unsafe_unretained *)keys
+ count:(NSUInteger)cnt {
return [self init];
}
@@ -134,7 +143,7 @@ static void CheckKeyValuePairIsValid(NSString *key, id value) {
return _delegate.count;
}
-- (NSEnumerator * _Nonnull)keyEnumerator {
+- (NSEnumerator *_Nonnull)keyEnumerator {
return [_delegate keyEnumerator];
}
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
index 627b6aa86d..bbda9f2f64 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
@@ -34,27 +34,27 @@
#import "GRPCWrappedCall.h"
#import <Foundation/Foundation.h>
-#include <grpc/grpc.h>
#include <grpc/byte_buffer.h>
+#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#import "GRPCCompletionQueue.h"
#import "GRPCHost.h"
-#import "NSDictionary+GRPC.h"
#import "NSData+GRPC.h"
+#import "NSDictionary+GRPC.h"
#import "NSError+GRPC.h"
@implementation GRPCOperation {
-@protected
- // Most operation subclasses don't set any flags in the grpc_op, and rely on the flag member being
- // initialized to zero.
+ @protected
+ // Most operation subclasses don't set any flags in the grpc_op, and rely on
+ // the flag member being initialized to zero.
grpc_op _op;
- void(^_handler)();
+ void (^_handler)();
}
- (void)finish {
if (_handler) {
- void(^handler)() = _handler;
+ void (^handler)() = _handler;
_handler = nil;
handler();
}
@@ -101,7 +101,8 @@
- (instancetype)initWithMessage:(NSData *)message handler:(void (^)())handler {
if (!message) {
- [NSException raise:NSInvalidArgumentException format:@"message cannot be nil"];
+ [NSException raise:NSInvalidArgumentException
+ format:@"message cannot be nil"];
}
if (self = [super init]) {
_op.op = GRPC_OP_SEND_MESSAGE;
@@ -137,11 +138,11 @@
grpc_metadata_array _headers;
}
-- (instancetype) init {
+- (instancetype)init {
return [self initWithHandler:nil];
}
-- (instancetype) initWithHandler:(void (^)(NSDictionary *))handler {
+- (instancetype)initWithHandler:(void (^)(NSDictionary *))handler {
if (self = [super init]) {
_op.op = GRPC_OP_RECV_INITIAL_METADATA;
grpc_metadata_array_init(&_headers);
@@ -152,7 +153,7 @@
_handler = ^{
__strong typeof(self) strongSelf = weakSelf;
NSDictionary *metadata = [NSDictionary
- grpc_dictionaryFromMetadataArray:strongSelf->_headers];
+ grpc_dictionaryFromMetadataArray:strongSelf->_headers];
handler(metadata);
};
}
@@ -166,7 +167,7 @@
@end
-@implementation GRPCOpRecvMessage{
+@implementation GRPCOpRecvMessage {
grpc_byte_buffer *_receivedMessage;
}
@@ -192,18 +193,18 @@
@end
-@implementation GRPCOpRecvStatus{
+@implementation GRPCOpRecvStatus {
grpc_status_code _statusCode;
char *_details;
size_t _detailsCapacity;
grpc_metadata_array _trailers;
}
-- (instancetype) init {
+- (instancetype)init {
return [self initWithHandler:nil];
}
-- (instancetype) initWithHandler:(void (^)(NSError *, NSDictionary *))handler {
+- (instancetype)initWithHandler:(void (^)(NSError *, NSDictionary *))handler {
if (self = [super init]) {
_op.op = GRPC_OP_RECV_STATUS_ON_CLIENT;
_op.data.recv_status_on_client.status = &_statusCode;
@@ -216,10 +217,11 @@
__weak typeof(self) weakSelf = self;
_handler = ^{
__strong typeof(self) strongSelf = weakSelf;
- NSError *error = [NSError grpc_errorFromStatusCode:strongSelf->_statusCode
- details:strongSelf->_details];
+ NSError *error =
+ [NSError grpc_errorFromStatusCode:strongSelf->_statusCode
+ details:strongSelf->_details];
NSDictionary *trailers = [NSDictionary
- grpc_dictionaryFromMetadataArray:strongSelf->_trailers];
+ grpc_dictionaryFromMetadataArray:strongSelf->_trailers];
handler(error, trailers);
};
}
@@ -245,20 +247,21 @@
return [self initWithHost:nil path:nil];
}
-- (instancetype)initWithHost:(NSString *)host
- path:(NSString *)path {
+- (instancetype)initWithHost:(NSString *)host path:(NSString *)path {
if (!path || !host) {
[NSException raise:NSInvalidArgumentException
format:@"path and host cannot be nil."];
}
if (self = [super init]) {
- // Each completion queue consumes one thread. There's a trade to be made between creating and
- // consuming too many threads and having contention of multiple calls in a single completion
- // queue. Currently we use a singleton queue.
+ // Each completion queue consumes one thread. There's a trade to be made
+ // between creating and consuming too many threads and having contention of
+ // multiple calls in a single completion queue. Currently we use a singleton
+ // queue.
_queue = [GRPCCompletionQueue completionQueue];
- _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path completionQueue:_queue];
+ _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path
+ completionQueue:_queue];
if (_call == NULL) {
return nil;
}
@@ -270,32 +273,35 @@
[self startBatchWithOperations:operations errorHandler:nil];
}
-- (void)startBatchWithOperations:(NSArray *)operations errorHandler:(void (^)())errorHandler {
+- (void)startBatchWithOperations:(NSArray *)operations
+ errorHandler:(void (^)())errorHandler {
size_t nops = operations.count;
grpc_op *ops_array = gpr_malloc(nops * sizeof(grpc_op));
size_t i = 0;
for (GRPCOperation *operation in operations) {
ops_array[i++] = operation.op;
}
- grpc_call_error error = grpc_call_start_batch(_call, ops_array, nops,
- (__bridge_retained void *)(^(bool success){
- if (!success) {
- if (errorHandler) {
- errorHandler();
- } else {
- return;
- }
- }
- for (GRPCOperation *operation in operations) {
- [operation finish];
- }
- }), NULL);
+ grpc_call_error error = grpc_call_start_batch(
+ _call, ops_array, nops, (__bridge_retained void *)(^(bool success) {
+ if (!success) {
+ if (errorHandler) {
+ errorHandler();
+ } else {
+ return;
+ }
+ }
+ for (GRPCOperation *operation in operations) {
+ [operation finish];
+ }
+ }),
+ NULL);
gpr_free(ops_array);
if (error != GRPC_CALL_OK) {
[NSException raise:NSInternalInconsistencyException
- format:@"A precondition for calling grpc_call_start_batch wasn't met. Error %i",
- error];
+ format:@"A precondition for calling grpc_call_start_batch "
+ @"wasn't met. Error %i",
+ error];
}
}
diff --git a/src/objective-c/GRPCClient/private/NSError+GRPC.h b/src/objective-c/GRPCClient/private/NSError+GRPC.h
index e0c1efc1f9..a9a321470c 100644
--- a/src/objective-c/GRPCClient/private/NSError+GRPC.h
+++ b/src/objective-c/GRPCClient/private/NSError+GRPC.h
@@ -36,8 +36,9 @@
@interface NSError (GRPC)
/**
- * Returns nil if the status code is OK. Otherwise, a NSError whose code is one of |GRPCErrorCode|
- * and whose domain is |kGRPCErrorDomain|.
+ * Returns nil if the status code is OK. Otherwise, a NSError whose code is one
+ * of |GRPCErrorCode| and whose domain is |kGRPCErrorDomain|.
*/
-+ (instancetype)grpc_errorFromStatusCode:(grpc_status_code)statusCode details:(char *)details;
++ (instancetype)grpc_errorFromStatusCode:(grpc_status_code)statusCode
+ details:(char *)details;
@end
diff --git a/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj
index 2a9466c03f..3f26c98564 100644
--- a/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj
+++ b/src/objective-c/tests/Connectivity/ConnectivityTestingApp.xcodeproj/project.pbxproj
@@ -156,7 +156,7 @@
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [[ $? != 0 ]] ; then\n cat << EOM\nerror: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\nEOM\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
showEnvVarsInLog = 0;
};
5347BF6C41E7888C1C05CD88 /* [CP] Copy Pods Resources */ = {
diff --git a/src/objective-c/tests/Connectivity/Podfile b/src/objective-c/tests/Connectivity/Podfile
index f9224d9e4e..27ff935c54 100644
--- a/src/objective-c/tests/Connectivity/Podfile
+++ b/src/objective-c/tests/Connectivity/Podfile
@@ -1,10 +1,32 @@
install! 'cocoapods', :deterministic_uuids => false
+platform :ios, '8.0'
# Location of gRPC's repo root relative to this file.
GRPC_LOCAL_SRC = '../../../..'
target 'ConnectivityTestingApp' do
pod 'gRPC', :path => GRPC_LOCAL_SRC
+ pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
+ pod 'gRPC-ProtoRPC', :path => GRPC_LOCAL_SRC
+ pod 'gRPC-RxLibrary', :path => GRPC_LOCAL_SRC
pod 'Protobuf', :path => "#{GRPC_LOCAL_SRC}/third_party/protobuf"
pod 'BoringSSL', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c"
end
+
+pre_install do |installer|
+ # This is the gRPC-Core podspec object, as initialized by its podspec file.
+ grpc_core_spec = installer.pod_targets.find{|t| t.name == 'gRPC-Core'}.root_spec
+
+ # Copied from gRPC-Core.podspec, except for the adjusted src_root:
+ src_root = "$(PODS_ROOT)/../#{GRPC_LOCAL_SRC}"
+ grpc_core_spec.pod_target_xcconfig = {
+ 'GRPC_SRC_ROOT' => src_root,
+ 'HEADER_SEARCH_PATHS' => '"$(inherited)" "$(GRPC_SRC_ROOT)/include"',
+ 'USER_HEADER_SEARCH_PATHS' => '"$(GRPC_SRC_ROOT)"',
+ # If we don't set these two settings, `include/grpc/support/time.h` and
+ # `src/core/lib/support/string.h` shadow the system `<time.h>` and `<string.h>`, breaking the
+ # build.
+ 'USE_HEADERMAP' => 'NO',
+ 'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+ }
+end
diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m
index 44f22c9e85..9804734d6a 100644
--- a/src/objective-c/tests/InteropTests.m
+++ b/src/objective-c/tests/InteropTests.m
@@ -92,20 +92,21 @@
return 0;
}
++ (void)setUp {
+#ifdef GRPC_COMPILE_WITH_CRONET
+ // Cronet setup
+ [Cronet setHttp2Enabled:YES];
+ [Cronet start];
+ [GRPCCall useCronetWithEngine:[Cronet getGlobalEngine]];
+#endif
+}
+
- (void)setUp {
self.continueAfterFailure = NO;
[GRPCCall resetHostSettings];
_service = self.class.host ? [RMTTestService serviceWithHost:self.class.host] : nil;
-#ifdef GRPC_COMPILE_WITH_CRONET
- if (cronetEngine == NULL) {
- // Cronet setup
- [Cronet setHttp2Enabled:YES];
- [Cronet start];
- [GRPCCall useCronetWithEngine:[Cronet getGlobalEngine]];
- }
-#endif
}
- (void)testEmptyUnaryRPC {
diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php
index 8a7f6572a6..36d94cae2c 100644
--- a/src/php/lib/Grpc/BaseStub.php
+++ b/src/php/lib/Grpc/BaseStub.php
@@ -116,7 +116,7 @@ class BaseStub
}
/**
- * @param $timeout in microseconds
+ * @param int $timeout in microseconds
*
* @return bool true if channel is ready
* @throw Exception if channel is in FATAL_ERROR state
@@ -189,7 +189,7 @@ class BaseStub
/**
* validate and normalize the metadata array.
*
- * @param $metadata The metadata map
+ * @param array $metadata The metadata map
*
* @return $metadata Validated and key-normalized metadata map
* @throw InvalidArgumentException if key contains invalid characters
@@ -216,8 +216,8 @@ class BaseStub
* Call a remote method that takes a single argument and has a
* single output.
*
- * @param string $method The name of the method to call
- * @param $argument The argument to the method
+ * @param string $method The name of the method to call
+ * @param mixed $argument The argument to the method
* @param callable $deserialize A function that deserializes the response
* @param array $metadata A metadata map to send to the server
*
@@ -250,8 +250,8 @@ class BaseStub
* Call a remote method that takes a stream of arguments and has a single
* output.
*
- * @param string $method The name of the method to call
- * @param $arguments An array or Traversable of arguments to stream to the
+ * @param string $method The name of the method to call
+ * @param array $arguments An array or Traversable of arguments to stream to the
* server
* @param callable $deserialize A function that deserializes the response
* @param array $metadata A metadata map to send to the server
@@ -284,8 +284,8 @@ class BaseStub
* Call a remote method that takes a single argument and returns a stream of
* responses.
*
- * @param string $method The name of the method to call
- * @param $argument The argument to the method
+ * @param string $method The name of the method to call
+ * @param mixed $argument The argument to the method
* @param callable $deserialize A function that deserializes the responses
* @param array $metadata A metadata map to send to the server
*
diff --git a/src/php/tests/interop/interop_client.php b/src/php/tests/interop/interop_client.php
index 124d324913..3d62e86ab0 100755
--- a/src/php/tests/interop/interop_client.php
+++ b/src/php/tests/interop/interop_client.php
@@ -477,9 +477,11 @@ function statusCodeAndMessage($stub)
list($result, $status) = $call->wait();
hardAssert($status->code === 2,
- 'Received unexpected status code');
+ 'Received unexpected UnaryCall status code: ' .
+ $status->code);
hardAssert($status->details === 'test status message',
- 'Received unexpected status details');
+ 'Received unexpected UnaryCall status details: ' .
+ $status->details);
$streaming_call = $stub->FullDuplexCall();
@@ -487,14 +489,27 @@ function statusCodeAndMessage($stub)
$streaming_request->setResponseStatus($echo_status);
$streaming_call->write($streaming_request);
$streaming_call->writesDone();
+ $result = $streaming_call->read();
$status = $streaming_call->getStatus();
hardAssert($status->code === 2,
- 'Received unexpected status code');
+ 'Received unexpected FullDuplexCall status code: ' .
+ $status->code);
hardAssert($status->details === 'test status message',
- 'Received unexpected status details');
+ 'Received unexpected FullDuplexCall status details: ' .
+ $status->details);
+}
+
+# NOTE: the stub input to this function is from UnimplementedService
+function unimplementedService($stub)
+{
+ $call = $stub->UnimplementedCall(new grpc\testing\EmptyMessage());
+ list($result, $status) = $call->wait();
+ hardAssert($status->code === Grpc\STATUS_UNIMPLEMENTED,
+ 'Received unexpected status code');
}
+# NOTE: the stub input to this function is from TestService
function unimplementedMethod($stub)
{
$call = $stub->UnimplementedCall(new grpc\testing\EmptyMessage());
@@ -587,7 +602,7 @@ function _makeStub($args)
$opts['update_metadata'] = $update_metadata;
}
- if ($test_case === 'unimplemented_method') {
+ if ($test_case === 'unimplemented_service') {
$stub = new grpc\testing\UnimplementedServiceClient($server_address,
$opts);
} else {
@@ -640,6 +655,9 @@ function interop_main($args, $stub = false)
case 'status_code_and_message':
statusCodeAndMessage($stub);
break;
+ case 'unimplemented_service':
+ unimplementedService($stub);
+ break;
case 'unimplemented_method':
unimplementedMethod($stub);
break;
diff --git a/src/proto/grpc/testing/control.proto b/src/proto/grpc/testing/control.proto
index ece6910815..918f5fa3e3 100644
--- a/src/proto/grpc/testing/control.proto
+++ b/src/proto/grpc/testing/control.proto
@@ -137,6 +137,11 @@ message ServerConfig {
// If we use an OTHER_SERVER client_type, this string gives more detail
string other_server_api = 11;
+
+ // c++-only options (for now) --------------------------------
+
+ // Buffer pool size (no buffer pool specified if unset)
+ int32 resource_quota_size = 1001;
}
message ServerArgs {
@@ -213,6 +218,10 @@ message ScenarioResultSummary
double latency_95 = 9;
double latency_99 = 10;
double latency_999 = 11;
+
+ // Number of requests that succeeded/failed
+ double successful_requests_per_second = 12;
+ double failed_requests_per_second = 13;
}
// Results of a single benchmark scenario.
@@ -232,4 +241,6 @@ message ScenarioResult {
// Information on success or failure of each worker
repeated bool client_success = 7;
repeated bool server_success = 8;
+ // Number of failed requests (one row per status code seen)
+ repeated RequestResultCount request_results = 9;
}
diff --git a/src/proto/grpc/testing/duplicate/echo_duplicate.proto b/src/proto/grpc/testing/duplicate/echo_duplicate.proto
index 94130ea767..97fdbc4fd3 100644
--- a/src/proto/grpc/testing/duplicate/echo_duplicate.proto
+++ b/src/proto/grpc/testing/duplicate/echo_duplicate.proto
@@ -38,4 +38,5 @@ package grpc.testing.duplicate;
service EchoTestService {
rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse);
+ rpc ResponseStream(EchoRequest) returns (stream EchoResponse);
}
diff --git a/src/proto/grpc/testing/proto2/empty2.proto b/src/proto/grpc/testing/proto2/empty2.proto
new file mode 100644
index 0000000000..51f0fe28b1
--- /dev/null
+++ b/src/proto/grpc/testing/proto2/empty2.proto
@@ -0,0 +1,37 @@
+
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+package grpc.testing.proto2;
+
+message EmptyWithExtensions {
+ extensions 100 to 999;
+}
diff --git a/src/proto/grpc/testing/proto2/empty2_extensions.proto b/src/proto/grpc/testing/proto2/empty2_extensions.proto
new file mode 100644
index 0000000000..0229fe3fbd
--- /dev/null
+++ b/src/proto/grpc/testing/proto2/empty2_extensions.proto
@@ -0,0 +1,43 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+
+import "src/proto/grpc/testing/proto2/empty2.proto";
+
+package grpc.testing.proto2;
+
+// Fill emptiness with music.
+extend grpc.testing.proto2.EmptyWithExtensions {
+ optional int64 Deadmau5 = 124;
+ optional float Madeon = 125;
+ optional string AboveAndBeyond = 126;
+ optional bool Tycho = 127;
+ optional fixed64 Pendulum = 128;
+}
diff --git a/src/proto/grpc/testing/stats.proto b/src/proto/grpc/testing/stats.proto
index f9d116110b..d40d714801 100644
--- a/src/proto/grpc/testing/stats.proto
+++ b/src/proto/grpc/testing/stats.proto
@@ -59,6 +59,11 @@ message HistogramData {
double count = 6;
}
+message RequestResultCount {
+ int32 status_code = 1;
+ int64 count = 2;
+}
+
message ClientStats {
// Latency histogram. Data points are in nanoseconds.
HistogramData latencies = 1;
@@ -67,4 +72,7 @@ message ClientStats {
double time_elapsed = 2;
double time_user = 3;
double time_system = 4;
+
+ // Number of failed requests (one row per status code seen)
+ repeated RequestResultCount request_results = 5;
}
diff --git a/src/proto/grpc/testing/test.proto b/src/proto/grpc/testing/test.proto
index b52c4cbad6..2b0dcde5a9 100644
--- a/src/proto/grpc/testing/test.proto
+++ b/src/proto/grpc/testing/test.proto
@@ -74,6 +74,10 @@ service TestService {
// first request.
rpc HalfDuplexCall(stream StreamingOutputCallRequest)
returns (stream StreamingOutputCallResponse);
+
+ // The test server will not implement this method. It will be used
+ // to test the behavior when clients call unimplemented methods.
+ rpc UnimplementedCall(grpc.testing.Empty) returns (grpc.testing.Empty);
}
// A simple service NOT implemented at servers so clients can test for
diff --git a/src/python/.gitignore b/src/python/.gitignore
index f158efa4bf..7b520579a0 100644
--- a/src/python/.gitignore
+++ b/src/python/.gitignore
@@ -1 +1,3 @@
gens/
+*_pb2.py
+*_pb2_grpc.py
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index ad20c94de9..9560fad137 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -173,10 +173,14 @@ cdef extern from "grpc/grpc.h":
GRPC_ARG_INTEGER
GRPC_ARG_POINTER
- ctypedef struct grpc_arg_value_pointer:
- void *address "p"
+ ctypedef struct grpc_arg_pointer_vtable:
void *(*copy)(void *)
void (*destroy)(void *)
+ int (*cmp)(void *, void *)
+
+ ctypedef struct grpc_arg_value_pointer:
+ void *address "p"
+ grpc_arg_pointer_vtable *vtable
union grpc_arg_value:
char *string
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 96c5b02bc2..00ec91b131 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -84,6 +84,7 @@ cdef class SslPemKeyCertPair:
cdef class ChannelArg:
cdef grpc_arg c_arg
+ cdef grpc_arg_pointer_vtable ptr_vtable
cdef readonly object key, value
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index 5a11a08f54..8a4eef4d2e 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -27,6 +27,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from libc.stdint cimport intptr_t
class ConnectivityState:
idle = GRPC_CHANNEL_IDLE
@@ -304,20 +305,49 @@ cdef class SslPemKeyCertPair:
self.c_pair.certificate_chain = self.certificate_chain
+
+cdef void* copy_ptr(void* ptr):
+ return ptr
+
+
+cdef void destroy_ptr(void* ptr):
+ pass
+
+
+cdef int compare_ptr(void* ptr1, void* ptr2):
+ if ptr1 < ptr2:
+ return -1
+ elif ptr1 > ptr2:
+ return 1
+ else:
+ return 0
+
+
cdef class ChannelArg:
def __cinit__(self, bytes key, value):
self.key = key
+ self.value = value
self.c_arg.key = self.key
if isinstance(value, int):
- self.value = value
self.c_arg.type = GRPC_ARG_INTEGER
self.c_arg.value.integer = self.value
elif isinstance(value, bytes):
- self.value = value
self.c_arg.type = GRPC_ARG_STRING
self.c_arg.value.string = self.value
+ elif hasattr(value, '__int__'):
+ # Pointer objects must override __int__() to return
+ # the underlying C address (Python ints are word size). The
+ # lifecycle of the pointer is fixed to the lifecycle of the
+ # python object wrapping it.
+ self.ptr_vtable.copy = &copy_ptr
+ self.ptr_vtable.destroy = &destroy_ptr
+ self.ptr_vtable.cmp = &compare_ptr
+ self.c_arg.type = GRPC_ARG_POINTER
+ self.c_arg.value.pointer.vtable = &self.ptr_vtable
+ self.c_arg.value.pointer.address = <void*>(<intptr_t>int(self.value))
else:
+ # TODO Add supported pointer types to this message
raise TypeError('Expected int or bytes, got {}'.format(type(value)))
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 2bebf48c70..1ad423909f 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -98,6 +98,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
'src/core/lib/iomgr/endpoint_pair_posix.c',
+ 'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
@@ -109,36 +110,48 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/iocp_windows.c',
'src/core/lib/iomgr/iomgr.c',
'src/core/lib/iomgr/iomgr_posix.c',
+ 'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/network_status_tracker.c',
'src/core/lib/iomgr/polling_entity.c',
+ 'src/core/lib/iomgr/pollset_set_uv.c',
'src/core/lib/iomgr/pollset_set_windows.c',
+ 'src/core/lib/iomgr/pollset_uv.c',
'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
+ 'src/core/lib/iomgr/resolve_address_uv.c',
'src/core/lib/iomgr/resolve_address_windows.c',
+ 'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
+ 'src/core/lib/iomgr/socket_utils_uv.c',
+ 'src/core/lib/iomgr/socket_utils_windows.c',
'src/core/lib/iomgr/socket_windows.c',
'src/core/lib/iomgr/tcp_client_posix.c',
+ 'src/core/lib/iomgr/tcp_client_uv.c',
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_server_posix.c',
+ 'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
+ 'src/core/lib/iomgr/tcp_uv.c',
'src/core/lib/iomgr/tcp_windows.c',
'src/core/lib/iomgr/time_averaged_stats.c',
- 'src/core/lib/iomgr/timer.c',
+ 'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
+ 'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
+ 'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
- 'src/core/lib/iomgr/workqueue_posix.c',
+ 'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
@@ -164,8 +177,10 @@ CORE_SOURCE_FILES = [
'src/core/lib/surface/version.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
+ 'src/core/lib/transport/mdstr_hash_table.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
+ 'src/core/lib/transport/method_config.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
@@ -199,8 +214,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/security/credentials/credentials.c',
'src/core/lib/security/credentials/credentials_metadata.c',
'src/core/lib/security/credentials/fake/fake_credentials.c',
- 'src/core/lib/security/credentials/google_default/credentials_posix.c',
- 'src/core/lib/security/credentials/google_default/credentials_windows.c',
+ 'src/core/lib/security/credentials/google_default/credentials_generic.c',
'src/core/lib/security/credentials/google_default/google_default_credentials.c',
'src/core/lib/security/credentials/iam/iam_credentials.c',
'src/core/lib/security/credentials/jwt/json_token.c',
@@ -222,25 +236,24 @@ CORE_SOURCE_FILES = [
'src/core/lib/tsi/ssl_transport_security.c',
'src/core/lib/tsi/transport_security.c',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
- 'src/core/ext/client_config/channel_connectivity.c',
- 'src/core/ext/client_config/client_channel.c',
- 'src/core/ext/client_config/client_channel_factory.c',
- 'src/core/ext/client_config/client_config_plugin.c',
- 'src/core/ext/client_config/connector.c',
- 'src/core/ext/client_config/default_initial_connect_string.c',
- 'src/core/ext/client_config/http_connect_handshaker.c',
- 'src/core/ext/client_config/initial_connect_string.c',
- 'src/core/ext/client_config/lb_policy.c',
- 'src/core/ext/client_config/lb_policy_factory.c',
- 'src/core/ext/client_config/lb_policy_registry.c',
- 'src/core/ext/client_config/parse_address.c',
- 'src/core/ext/client_config/resolver.c',
- 'src/core/ext/client_config/resolver_factory.c',
- 'src/core/ext/client_config/resolver_registry.c',
- 'src/core/ext/client_config/resolver_result.c',
- 'src/core/ext/client_config/subchannel.c',
- 'src/core/ext/client_config/subchannel_index.c',
- 'src/core/ext/client_config/uri_parser.c',
+ 'src/core/ext/client_channel/channel_connectivity.c',
+ 'src/core/ext/client_channel/client_channel.c',
+ 'src/core/ext/client_channel/client_channel_factory.c',
+ 'src/core/ext/client_channel/client_channel_plugin.c',
+ 'src/core/ext/client_channel/connector.c',
+ 'src/core/ext/client_channel/default_initial_connect_string.c',
+ 'src/core/ext/client_channel/http_connect_handshaker.c',
+ 'src/core/ext/client_channel/initial_connect_string.c',
+ 'src/core/ext/client_channel/lb_policy.c',
+ 'src/core/ext/client_channel/lb_policy_factory.c',
+ 'src/core/ext/client_channel/lb_policy_registry.c',
+ 'src/core/ext/client_channel/parse_address.c',
+ 'src/core/ext/client_channel/resolver.c',
+ 'src/core/ext/client_channel/resolver_factory.c',
+ 'src/core/ext/client_channel/resolver_registry.c',
+ 'src/core/ext/client_channel/subchannel.c',
+ 'src/core/ext/client_channel/subchannel_index.c',
+ 'src/core/ext/client_channel/uri_parser.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py
index 7730374df0..f363f5fdc5 100644
--- a/src/python/grpcio/support.py
+++ b/src/python/grpcio/support.py
@@ -45,7 +45,10 @@ int main(int argc, char **argv) { return 0; }
"""
C_PYTHON_DEV_ERROR_MESSAGE = """
Could not find <Python.h>. This could mean the following:
- * You're on Ubuntu and haven't `apt-get install`ed `python-dev`.
+ * You're on Ubuntu and haven't run `apt-get install python-dev`.
+ * You're on RHEL/Fedora and haven't run `yum install python-devel` or
+ `dnf install python-devel` (make sure you also have redhat-rpm-config
+ installed)
* You're on Mac OS X and the usual Python framework was somehow corrupted
(check your environment variables or try re-installing?)
* You're on Windows and your Python installation was somehow corrupted
diff --git a/src/python/grpcio_reflection/.gitignore b/src/python/grpcio_reflection/.gitignore
new file mode 100644
index 0000000000..c0befdc8ea
--- /dev/null
+++ b/src/python/grpcio_reflection/.gitignore
@@ -0,0 +1,5 @@
+*.proto
+*_pb2.py
+build/
+grpcio_reflection.egg-info/
+dist/
diff --git a/src/python/grpcio_reflection/grpc/__init__.py b/src/python/grpcio_reflection/grpc/__init__.py
new file mode 100644
index 0000000000..70ac5edd48
--- /dev/null
+++ b/src/python/grpcio_reflection/grpc/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/src/python/grpcio_reflection/grpc/reflection/__init__.py b/src/python/grpcio_reflection/grpc/reflection/__init__.py
new file mode 100644
index 0000000000..d5ad73a74a
--- /dev/null
+++ b/src/python/grpcio_reflection/grpc/reflection/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py b/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py
new file mode 100644
index 0000000000..d5ad73a74a
--- /dev/null
+++ b/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py
new file mode 100644
index 0000000000..3c399b0d79
--- /dev/null
+++ b/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py
@@ -0,0 +1,143 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Reference implementation for reflection in gRPC Python."""
+
+import threading
+
+import grpc
+from google.protobuf import descriptor_pb2
+from google.protobuf import descriptor_pool
+
+from grpc.reflection.v1alpha import reflection_pb2
+
+_POOL = descriptor_pool.Default()
+
+def _not_found_error():
+ return reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )
+ )
+
+def _file_descriptor_response(descriptor):
+ proto = descriptor_pb2.FileDescriptorProto()
+ descriptor.CopyToProto(proto)
+ serialized_proto = proto.SerializeToString()
+ return reflection_pb2.ServerReflectionResponse(
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(serialized_proto,)
+ ),
+ )
+
+
+class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
+ """Servicer handling RPCs for service statuses."""
+
+ def __init__(self, service_names, pool=None):
+ """Constructor.
+
+ Args:
+ service_names: Iterable of fully-qualified service names available.
+ """
+ self._service_names = list(service_names)
+ self._pool = _POOL if pool is None else pool
+
+ def _file_by_filename(self, filename):
+ try:
+ descriptor = self._pool.FindFileByName(filename)
+ except KeyError:
+ return _not_found_error()
+ else:
+ return _file_descriptor_response(descriptor)
+
+ def _file_containing_symbol(self, fully_qualified_name):
+ try:
+ descriptor = self._pool.FindFileContainingSymbol(fully_qualified_name)
+ except KeyError:
+ return _not_found_error()
+ else:
+ return _file_descriptor_response(descriptor)
+
+ def _file_containing_extension(containing_type, extension_number):
+ # TODO(atash) Python protobuf currently doesn't support querying extensions.
+ # https://github.com/google/protobuf/issues/2248
+ return reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
+ error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),
+ )
+ )
+
+ def _extension_numbers_of_type(fully_qualified_name):
+ # TODO(atash) We're allowed to leave this unsupported according to the
+ # protocol, but we should still eventually implement it. Hits the same issue
+ # as `_file_containing_extension`, however.
+ # https://github.com/google/protobuf/issues/2248
+ return reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
+ error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),
+ )
+ )
+
+ def _list_services(self):
+ return reflection_pb2.ServerReflectionResponse(
+ list_services_response=reflection_pb2.ListServiceResponse(
+ service=[
+ reflection_pb2.ServiceResponse(name=service_name)
+ for service_name in self._service_names
+ ]
+ )
+ )
+
+ def ServerReflectionInfo(self, request_iterator, context):
+ for request in request_iterator:
+ if request.HasField('file_by_filename'):
+ yield self._file_by_filename(request.file_by_filename)
+ elif request.HasField('file_containing_symbol'):
+ yield self._file_containing_symbol(request.file_containing_symbol)
+ elif request.HasField('file_containing_extension'):
+ yield self._file_containing_extension(
+ request.file_containing_extension.containing_type,
+ request.file_containing_extension.extension_number)
+ elif request.HasField('all_extension_numbers_of_type'):
+ yield _all_extension_numbers_of_type(
+ request.all_extension_numbers_of_type)
+ elif request.HasField('list_services'):
+ yield self._list_services()
+ else:
+ yield reflection_pb2.ServerReflectionResponse(
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
+ error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1].encode(),
+ )
+ )
+
diff --git a/src/python/grpcio_reflection/grpc_version.py b/src/python/grpcio_reflection/grpc_version.py
new file mode 100644
index 0000000000..9b3c44c022
--- /dev/null
+++ b/src/python/grpcio_reflection/grpc_version.py
@@ -0,0 +1,32 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
+
+VERSION='1.1.0.dev0'
diff --git a/src/python/grpcio_reflection/reflection_commands.py b/src/python/grpcio_reflection/reflection_commands.py
new file mode 100644
index 0000000000..d189aee577
--- /dev/null
+++ b/src/python/grpcio_reflection/reflection_commands.py
@@ -0,0 +1,78 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Provides distutils command classes for the GRPC Python setup process."""
+
+import os
+import shutil
+
+import setuptools
+
+ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
+HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/reflection/v1alpha/reflection.proto')
+
+
+class CopyProtoModules(setuptools.Command):
+ """Command to copy proto modules from grpc/src/proto."""
+
+ description = ''
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ if os.path.isfile(HEALTH_PROTO):
+ shutil.copyfile(
+ HEALTH_PROTO,
+ os.path.join(ROOT_DIR, 'grpc/reflection/v1alpha/reflection.proto'))
+
+
+class BuildPackageProtos(setuptools.Command):
+ """Command to generate project *_pb2.py modules from proto files."""
+
+ description = 'build grpc protobuf modules'
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ # due to limitations of the proto generator, we require that only *one*
+ # directory is provided as an 'include' directory. We assume it's the '' key
+ # to `self.distribution.package_dir` (and get a key error if it's not
+ # there).
+ from grpc.tools import command
+ command.build_package_protos(self.distribution.package_dir[''])
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
new file mode 100644
index 0000000000..df95af4de1
--- /dev/null
+++ b/src/python/grpcio_reflection/setup.py
@@ -0,0 +1,73 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Setup module for the GRPC Python package's optional reflection."""
+
+import os
+import sys
+
+import setuptools
+
+# Ensure we're in the proper directory whether or not we're being used by pip.
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+# Break import-style to ensure we can actually find our commands module.
+import reflection_commands
+import grpc_version
+
+PACKAGE_DIRECTORIES = {
+ '': '.',
+}
+
+SETUP_REQUIRES = (
+ 'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
+)
+
+INSTALL_REQUIRES = (
+ 'protobuf>=3.0.0',
+ 'grpcio>={version}'.format(version=grpc_version.VERSION),
+)
+
+COMMAND_CLASS = {
+ # Run preprocess from the repository *before* doing any packaging!
+ 'preprocess': reflection_commands.CopyProtoModules,
+ 'build_package_protos': reflection_commands.BuildPackageProtos,
+}
+
+setuptools.setup(
+ name='grpcio-reflection',
+ version=grpc_version.VERSION,
+ license='3-clause BSD',
+ package_dir=PACKAGE_DIRECTORIES,
+ packages=setuptools.find_packages('.'),
+ namespace_packages=['grpc'],
+ install_requires=INSTALL_REQUIRES,
+ setup_requires=SETUP_REQUIRES,
+ cmdclass=COMMAND_CLASS
+)
diff --git a/src/python/grpcio_tests/tests/interop/client.py b/src/python/grpcio_tests/tests/interop/client.py
index 9d61d18975..4fbf58f7d9 100644
--- a/src/python/grpcio_tests/tests/interop/client.py
+++ b/src/python/grpcio_tests/tests/interop/client.py
@@ -106,7 +106,10 @@ def _stub(args):
(('grpc.ssl_target_name_override', args.server_host_override,),))
else:
channel = grpc.insecure_channel(target)
- return test_pb2.TestServiceStub(channel)
+ if args.test_case == "unimplemented_service":
+ return test_pb2.UnimplementedServiceStub(channel)
+ else:
+ return test_pb2.TestServiceStub(channel)
def _test_case_from_arg(test_case_arg):
diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
index 7edd75c56c..52e56f3502 100644
--- a/src/python/grpcio_tests/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -44,25 +44,43 @@ from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2
+_INITIAL_METADATA_KEY = "x-grpc-test-echo-initial"
+_TRAILING_METADATA_KEY = "x-grpc-test-echo-trailing-bin"
+
+def _maybe_echo_metadata(servicer_context):
+ """Copies metadata from request to response if it is present."""
+ invocation_metadata = dict(servicer_context.invocation_metadata())
+ if _INITIAL_METADATA_KEY in invocation_metadata:
+ initial_metadatum = (
+ _INITIAL_METADATA_KEY, invocation_metadata[_INITIAL_METADATA_KEY])
+ servicer_context.send_initial_metadata((initial_metadatum,))
+ if _TRAILING_METADATA_KEY in invocation_metadata:
+ trailing_metadatum = (
+ _TRAILING_METADATA_KEY, invocation_metadata[_TRAILING_METADATA_KEY])
+ servicer_context.set_trailing_metadata((trailing_metadatum,))
+
+def _maybe_echo_status_and_message(request, servicer_context):
+ """Sets the response context code and details if the request asks for them"""
+ if request.HasField('response_status'):
+ servicer_context.set_code(request.response_status.code)
+ servicer_context.set_details(request.response_status.message)
class TestService(test_pb2.TestServiceServicer):
def EmptyCall(self, request, context):
+ _maybe_echo_metadata(context)
return empty_pb2.Empty()
def UnaryCall(self, request, context):
- if request.HasField('response_status'):
- context.set_code(request.response_status.code)
- context.set_details(request.response_status.message)
+ _maybe_echo_metadata(context)
+ _maybe_echo_status_and_message(request, context)
return messages_pb2.SimpleResponse(
payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
def StreamingOutputCall(self, request, context):
- if request.HasField('response_status'):
- context.set_code(request.response_status.code)
- context.set_details(request.response_status.message)
+ _maybe_echo_status_and_message(request, context)
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
@@ -78,10 +96,9 @@ class TestService(test_pb2.TestServiceServicer):
aggregated_payload_size=aggregate_size)
def FullDuplexCall(self, request_iterator, context):
+ _maybe_echo_metadata(context)
for request in request_iterator:
- if request.HasField('response_status'):
- context.set_code(request.response_status.code)
- context.set_details(request.response_status.message)
+ _maybe_echo_status_and_message(request, context)
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
@@ -94,23 +111,46 @@ class TestService(test_pb2.TestServiceServicer):
return self.FullDuplexCall(request_iterator, context)
+def _expect_status_code(call, expected_code):
+ if call.code() != expected_code:
+ raise ValueError(
+ 'expected code %s, got %s' % (expected_code, call.code()))
+
+
+def _expect_status_details(call, expected_details):
+ if call.details() != expected_details:
+ raise ValueError(
+ 'expected message %s, got %s' % (expected_details, call.details()))
+
+
+def _validate_status_code_and_details(call, expected_code, expected_details):
+ _expect_status_code(call, expected_code)
+ _expect_status_details(call, expected_details)
+
+
+def _validate_payload_type_and_length(response, expected_type, expected_length):
+ if response.payload.type is not expected_type:
+ raise ValueError(
+ 'expected payload type %s, got %s' %
+ (expected_type, type(response.payload.type)))
+ elif len(response.payload.body) != expected_length:
+ raise ValueError(
+ 'expected payload body size %d, got %d' %
+ (expected_length, len(response.payload.body)))
+
+
def _large_unary_common_behavior(
stub, fill_username, fill_oauth_scope, call_credentials):
+ size = 314159
request = messages_pb2.SimpleRequest(
- response_type=messages_pb2.COMPRESSABLE, response_size=314159,
+ response_type=messages_pb2.COMPRESSABLE, response_size=size,
payload=messages_pb2.Payload(body=b'\x00' * 271828),
fill_username=fill_username, fill_oauth_scope=fill_oauth_scope)
response_future = stub.UnaryCall.future(
request, credentials=call_credentials)
response = response_future.result()
- if response.payload.type is not messages_pb2.COMPRESSABLE:
- raise ValueError(
- 'response payload type is "%s"!' % type(response.payload.type))
- elif len(response.payload.body) != 314159:
- raise ValueError(
- 'response body of incorrect size %d!' % len(response.payload.body))
- else:
- return response
+ _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
+ return response
def _empty_unary(stub):
@@ -152,12 +192,9 @@ def _server_streaming(stub):
)
response_iterator = stub.StreamingOutputCall(request)
for index, response in enumerate(response_iterator):
- if response.payload.type != messages_pb2.COMPRESSABLE:
- raise ValueError(
- 'response body of invalid type %s!' % response.payload.type)
- elif len(response.payload.body) != sizes[index]:
- raise ValueError(
- 'response body of invalid size %d!' % len(response.payload.body))
+ _validate_payload_type_and_length(
+ response, messages_pb2.COMPRESSABLE, sizes[index])
+
def _cancel_after_begin(stub):
sizes = (27182, 8, 1828, 45904,)
@@ -224,12 +261,8 @@ def _ping_pong(stub):
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
- if response.payload.type != messages_pb2.COMPRESSABLE:
- raise ValueError(
- 'response body of invalid type %s!' % response.payload.type)
- if len(response.payload.body) != response_size:
- raise ValueError(
- 'response body of invalid size %d!' % len(response.payload.body))
+ _validate_payload_type_and_length(
+ response, messages_pb2.COMPRESSABLE, response_size)
def _cancel_after_first_response(stub):
@@ -291,36 +324,84 @@ def _empty_stream(stub):
def _status_code_and_message(stub):
- message = 'test status message'
+ details = 'test status message'
code = 2
status = grpc.StatusCode.UNKNOWN # code = 2
+
+ # Test with a UnaryCall
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
response_size=1,
payload=messages_pb2.Payload(body=b'\x00'),
- response_status=messages_pb2.EchoStatus(code=code, message=message)
+ response_status=messages_pb2.EchoStatus(code=code, message=details)
)
response_future = stub.UnaryCall.future(request)
- if response_future.code() != status:
- raise ValueError(
- 'expected code %s, got %s' % (status, response_future.code()))
- elif response_future.details() != message:
- raise ValueError(
- 'expected message %s, got %s' % (message, response_future.details()))
+ _validate_status_code_and_details(response_future, status, details)
- request = messages_pb2.StreamingOutputCallRequest(
+ # Test with a FullDuplexCall
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe)
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(
+ messages_pb2.ResponseParameters(size=1),),
+ payload=messages_pb2.Payload(body=b'\x00'),
+ response_status=messages_pb2.EchoStatus(code=code, message=details))
+ pipe.add(request) # sends the initial request.
+ # Dropping out of with block closes the pipe
+ _validate_status_code_and_details(response_iterator, status, details)
+
+
+def _unimplemented_method(test_service_stub):
+ response_future = (
+ test_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
+ _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
+
+
+def _unimplemented_service(unimplemented_service_stub):
+ response_future = (
+ unimplemented_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
+ _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
+
+
+def _custom_metadata(stub):
+ initial_metadata_value = "test_initial_metadata_value"
+ trailing_metadata_value = "\x0a\x0b\x0a\x0b\x0a\x0b"
+ metadata = (
+ (_INITIAL_METADATA_KEY, initial_metadata_value),
+ (_TRAILING_METADATA_KEY, trailing_metadata_value))
+
+ def _validate_metadata(response):
+ initial_metadata = dict(response.initial_metadata())
+ if initial_metadata[_INITIAL_METADATA_KEY] != initial_metadata_value:
+ raise ValueError(
+ 'expected initial metadata %s, got %s' % (
+ initial_metadata_value, initial_metadata[_INITIAL_METADATA_KEY]))
+ trailing_metadata = dict(response.trailing_metadata())
+ if trailing_metadata[_TRAILING_METADATA_KEY] != trailing_metadata_value:
+ raise ValueError(
+ 'expected trailing metadata %s, got %s' % (
+ trailing_metadata_value, initial_metadata[_TRAILING_METADATA_KEY]))
+
+ # Testing with UnaryCall
+ request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
- response_parameters=(
- messages_pb2.ResponseParameters(size=1),),
- response_status=messages_pb2.EchoStatus(code=code, message=message))
- response_iterator = stub.StreamingOutputCall(request)
- if response_future.code() != status:
- raise ValueError(
- 'expected code %s, got %s' % (status, response_iterator.code()))
- elif response_future.details() != message:
- raise ValueError(
- 'expected message %s, got %s' % (message, response_iterator.details()))
+ response_size=1,
+ payload=messages_pb2.Payload(body=b'\x00'))
+ response_future = stub.UnaryCall.future(request, metadata=metadata)
+ _validate_metadata(response_future)
+ # Testing with FullDuplexCall
+ with _Pipe() as pipe:
+ response_iterator = stub.FullDuplexCall(pipe, metadata=metadata)
+ request = messages_pb2.StreamingOutputCallRequest(
+ response_type=messages_pb2.COMPRESSABLE,
+ response_parameters=(
+ messages_pb2.ResponseParameters(size=1),))
+ pipe.add(request) # Sends the request
+ next(response_iterator) # Causes server to send trailing metadata
+ # Dropping out of the with block closes the pipe
+ _validate_metadata(response_iterator)
def _compute_engine_creds(stub, args):
response = _large_unary_common_behavior(stub, True, True, None)
@@ -381,6 +462,9 @@ class TestCase(enum.Enum):
CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
EMPTY_STREAM = 'empty_stream'
STATUS_CODE_AND_MESSAGE = 'status_code_and_message'
+ UNIMPLEMENTED_METHOD = 'unimplemented_method'
+ UNIMPLEMENTED_SERVICE = 'unimplemented_service'
+ CUSTOM_METADATA = "custom_metadata"
COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
JWT_TOKEN_CREDS = 'jwt_token_creds'
@@ -408,6 +492,12 @@ class TestCase(enum.Enum):
_empty_stream(stub)
elif self is TestCase.STATUS_CODE_AND_MESSAGE:
_status_code_and_message(stub)
+ elif self is TestCase.UNIMPLEMENTED_METHOD:
+ _unimplemented_method(stub)
+ elif self is TestCase.UNIMPLEMENTED_SERVICE:
+ _unimplemented_service(stub)
+ elif self is TestCase.CUSTOM_METADATA:
+ _custom_metadata(stub)
elif self is TestCase.COMPUTE_ENGINE_CREDS:
_compute_engine_creds(stub, args)
elif self is TestCase.OAUTH2_AUTH_TOKEN:
diff --git a/src/python/grpcio_tests/tests/reflection/__init__.py b/src/python/grpcio_tests/tests/reflection/__init__.py
new file mode 100644
index 0000000000..100a624dc9
--- /dev/null
+++ b/src/python/grpcio_tests/tests/reflection/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
new file mode 100644
index 0000000000..87264cf9ba
--- /dev/null
+++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
@@ -0,0 +1,185 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests of grpc.reflection.v1alpha.reflection."""
+
+import unittest
+
+import grpc
+from grpc.framework.foundation import logging_pool
+from grpc.reflection.v1alpha import reflection
+from grpc.reflection.v1alpha import reflection_pb2
+
+from google.protobuf import descriptor_pool
+from google.protobuf import descriptor_pb2
+
+from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
+from src.proto.grpc.testing import empty_pb2
+from tests.unit.framework.common import test_constants
+
+_EMPTY_PROTO_FILE_NAME = 'src/proto/grpc/testing/empty.proto'
+_EMPTY_PROTO_SYMBOL_NAME = 'grpc.testing.Empty'
+_SERVICE_NAMES = (
+ 'Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman', 'Galilei')
+
+def _file_descriptor_to_proto(descriptor):
+ proto = descriptor_pb2.FileDescriptorProto()
+ descriptor.CopyToProto(proto)
+ return proto.SerializeToString()
+
+class ReflectionServicerTest(unittest.TestCase):
+
+ def setUp(self):
+ servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES)
+ server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(server_pool)
+ port = self._server.add_insecure_port('[::]:0')
+ reflection_pb2.add_ServerReflectionServicer_to_server(servicer, self._server)
+ self._server.start()
+
+ channel = grpc.insecure_channel('localhost:%d' % port)
+ self._stub = reflection_pb2.ServerReflectionStub(channel)
+
+ def testFileByName(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ file_by_filename=_EMPTY_PROTO_FILE_NAME
+ ),
+ reflection_pb2.ServerReflectionRequest(
+ file_by_filename='i-donut-exist'
+ ),
+ )
+ responses = tuple(self._stub.ServerReflectionInfo(requests))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(
+ _file_descriptor_to_proto(empty_pb2.DESCRIPTOR),
+ )
+ )
+ ),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )
+ ),
+ )
+ self.assertEqual(expected_responses, responses)
+
+ def testFileBySymbol(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME
+ ),
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo'
+ ),
+ )
+ responses = tuple(self._stub.ServerReflectionInfo(requests))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(
+ _file_descriptor_to_proto(empty_pb2.DESCRIPTOR),
+ )
+ )
+ ),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )
+ ),
+ )
+ self.assertEqual(expected_responses, responses)
+
+ @unittest.skip('TODO(atash): implement file-containing-extension reflection '
+ '(see https://github.com/google/protobuf/issues/2248)')
+ def testFileContainingExtension(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_extension=reflection_pb2.ExtensionRequest(
+ containing_type='grpc.testing.proto2.Empty',
+ extension_number=125,
+ ),
+ ),
+ reflection_pb2.ServerReflectionRequest(
+ file_containing_extension=reflection_pb2.ExtensionRequest(
+ containing_type='i.donut.exist.co.uk.org.net.me.name.foo',
+ extension_number=55,
+ ),
+ ),
+ )
+ responses = tuple(self._stub.ServerReflectionInfo(requests))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ file_descriptor_response=reflection_pb2.FileDescriptorResponse(
+ file_descriptor_proto=(
+ _file_descriptor_to_proto(empty_extensions_pb2.DESCRIPTOR),
+ )
+ )
+ ),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )
+ ),
+ )
+ self.assertEqual(expected_responses, responses)
+
+ def testListServices(self):
+ requests = (
+ reflection_pb2.ServerReflectionRequest(
+ list_services='',
+ ),
+ )
+ responses = tuple(self._stub.ServerReflectionInfo(requests))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ list_services_response=reflection_pb2.ListServiceResponse(
+ service=tuple(
+ reflection_pb2.ServiceResponse(name=name)
+ for name in _SERVICE_NAMES
+ )
+ )
+ ),
+ )
+ self.assertEqual(expected_responses, responses)
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index 2071a33e13..2e85f1f397 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -4,41 +4,42 @@
"_api_test.ChannelTest",
"_auth_test.AccessTokenCallCredentialsTest",
"_auth_test.GoogleCallCredentialsTest",
- "_beta_features_test.BetaFeaturesTest",
- "_beta_features_test.ContextManagementAndLifecycleTest",
+ "_beta_features_test.BetaFeaturesTest",
+ "_beta_features_test.ContextManagementAndLifecycleTest",
"_cancel_many_calls_test.CancelManyCallsTest",
"_channel_args_test.ChannelArgsTest",
"_channel_connectivity_test.ChannelConnectivityTest",
"_channel_ready_future_test.ChannelReadyFutureTest",
- "_channel_test.ChannelTest",
+ "_channel_test.ChannelTest",
"_compression_test.CompressionTest",
"_connectivity_channel_test.ConnectivityStatesTest",
"_credentials_test.CredentialsTest",
"_empty_message_test.EmptyMessageTest",
"_exit_test.ExitTest",
- "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest",
- "_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest",
- "_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest",
- "_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest",
- "_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest",
+ "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest",
+ "_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest",
+ "_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest",
+ "_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest",
+ "_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest",
"_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest",
"_health_servicer_test.HealthServicerTest",
"_implementations_test.CallCredentialsTest",
- "_implementations_test.ChannelCredentialsTest",
- "_insecure_interop_test.InsecureInteropTest",
- "_logging_pool_test.LoggingPoolTest",
+ "_implementations_test.ChannelCredentialsTest",
+ "_insecure_interop_test.InsecureInteropTest",
+ "_logging_pool_test.LoggingPoolTest",
"_metadata_code_details_test.MetadataCodeDetailsTest",
"_metadata_test.MetadataTest",
- "_not_found_test.NotFoundTest",
+ "_not_found_test.NotFoundTest",
"_python_plugin_test.PythonPluginTest",
"_read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest",
+ "_reflection_servicer_test.ReflectionServicerTest",
"_rpc_test.RPCTest",
- "_sanity_test.Sanity",
- "_secure_interop_test.SecureInteropTest",
+ "_sanity_test.Sanity",
+ "_secure_interop_test.SecureInteropTest",
"_thread_cleanup_test.CleanupThreadTest",
- "_utilities_test.ChannelConnectivityTest",
- "beta_python_plugin_test.PythonPluginTest",
- "cygrpc_test.InsecureServerInsecureClient",
- "cygrpc_test.SecureServerSecureClient",
+ "_utilities_test.ChannelConnectivityTest",
+ "beta_python_plugin_test.PythonPluginTest",
+ "cygrpc_test.InsecureServerInsecureClient",
+ "cygrpc_test.SecureServerSecureClient",
"cygrpc_test.TypeSmokeTest"
]
diff --git a/src/python/grpcio_tests/tests/unit/_channel_args_test.py b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
index 6a636d7993..b46497afd6 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_args_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
@@ -33,11 +33,18 @@ import unittest
import grpc
+class TestPointerWrapper(object):
+
+ def __int__(self):
+ return 123456
+
+
TEST_CHANNEL_ARGS = (
('arg1', b'bytes_val'),
('arg2', 'str_val'),
('arg3', 1),
(b'arg4', 'str_val'),
+ ('arg6', TestPointerWrapper()),
)
diff --git a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
index 1324ad37b6..3d9dd17ff6 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
@@ -34,8 +34,6 @@ import time
import unittest
import grpc
-from grpc import _channel
-from grpc import _server
from tests.unit.framework.common import test_constants
from tests.unit import _thread_pool
@@ -78,7 +76,7 @@ class ChannelConnectivityTest(unittest.TestCase):
def test_lonely_channel_connectivity(self):
callback = _Callback()
- channel = _channel.Channel('localhost:12345', (), None)
+ channel = grpc.insecure_channel('localhost:12345')
channel.subscribe(callback.update, try_to_connect=False)
first_connectivities = callback.block_until_connectivities_satisfy(bool)
channel.subscribe(callback.update, try_to_connect=True)
@@ -105,13 +103,13 @@ class ChannelConnectivityTest(unittest.TestCase):
def test_immediately_connectable_channel_connectivity(self):
thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
- server = _server.Server(thread_pool, (), ())
+ server = grpc.server(thread_pool)
port = server.add_insecure_port('[::]:0')
server.start()
first_callback = _Callback()
second_callback = _Callback()
- channel = _channel.Channel('localhost:{}'.format(port), (), None)
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
channel.subscribe(first_callback.update, try_to_connect=False)
first_connectivities = first_callback.block_until_connectivities_satisfy(
bool)
@@ -146,12 +144,12 @@ class ChannelConnectivityTest(unittest.TestCase):
def test_reachable_then_unreachable_channel_connectivity(self):
thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
- server = _server.Server(thread_pool, (), ())
+ server = grpc.server(thread_pool)
port = server.add_insecure_port('[::]:0')
server.start()
callback = _Callback()
- channel = _channel.Channel('localhost:{}'.format(port), (), None)
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
channel.subscribe(callback.update, try_to_connect=True)
callback.block_until_connectivities_satisfy(_ready_in_connectivities)
# Now take down the server and confirm that channel readiness is repudiated.
diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
index 60d478bcd9..e0a7d15aa7 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
@@ -33,8 +33,6 @@ import threading
import unittest
import grpc
-from grpc import _channel
-from grpc import _server
from tests.unit.framework.common import test_constants
from tests.unit import _thread_pool
@@ -79,7 +77,7 @@ class ChannelReadyFutureTest(unittest.TestCase):
def test_immediately_connectable_channel_connectivity(self):
thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
- server = _server.Server(thread_pool, (), ())
+ server = grpc.server(thread_pool)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
diff --git a/src/ruby/README.md b/src/ruby/README.md
index f476fe72be..266d6b2c16 100644
--- a/src/ruby/README.md
+++ b/src/ruby/README.md
@@ -59,7 +59,7 @@ Directory structure is the layout for [ruby extensions][]
- bin: example gRPC clients and servers, e.g,
```ruby
- stub = Math::Math::Stub.new('my.test.math.server.com:8080')
+ stub = Math::Math::Stub.new('my.test.math.server.com:8080', :this_channel_is_insecure)
req = Math::DivArgs.new(dividend: 7, divisor: 3)
GRPC.logger.info("div(7/3): req=#{req.inspect}")
resp = stub.div(req)
diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c
index 9b6675da84..280f21c973 100644
--- a/src/ruby/ext/grpc/rb_call_credentials.c
+++ b/src/ruby/ext/grpc/rb_call_credentials.c
@@ -86,19 +86,16 @@ static VALUE grpc_rb_call_credentials_callback_rescue(VALUE args,
rb_funcall(exception_object, rb_intern("backtrace"), 0),
rb_intern("join"),
1, rb_str_new2("\n\tfrom "));
- VALUE rb_exception_info = rb_funcall(exception_object, rb_intern("to_s"), 0);
- const char *exception_classname = rb_obj_classname(exception_object);
+ VALUE rb_exception_info = rb_funcall(exception_object, rb_intern("inspect"), 0);
(void)args;
- gpr_log(GPR_INFO, "Call credentials callback failed: %s: %s\n%s",
- exception_classname, StringValueCStr(rb_exception_info),
+ gpr_log(GPR_INFO, "Call credentials callback failed: %s\n%s",
+ StringValueCStr(rb_exception_info),
StringValueCStr(backtrace));
rb_hash_aset(result, rb_str_new2("metadata"), Qnil);
- /* Currently only gives the exception class name. It should be possible get
- more details */
rb_hash_aset(result, rb_str_new2("status"),
- INT2NUM(GRPC_STATUS_PERMISSION_DENIED));
+ INT2NUM(GRPC_STATUS_UNAUTHENTICATED));
rb_hash_aset(result, rb_str_new2("details"),
- rb_str_new2(exception_classname));
+ rb_exception_info);
return result;
}
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index a6cad0db1a..fd73cc7970 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -132,6 +132,11 @@ grpc_header_key_is_legal_type grpc_header_key_is_legal_import;
grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import;
grpc_is_binary_header_type grpc_is_binary_header_import;
grpc_call_error_to_string_type grpc_call_error_to_string_import;
+grpc_resource_quota_create_type grpc_resource_quota_create_import;
+grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
+grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
+grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
+grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import;
grpc_use_signal_type grpc_use_signal_import;
@@ -401,6 +406,11 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal");
grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header");
grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string");
+ grpc_resource_quota_create_import = (grpc_resource_quota_create_type) GetProcAddress(library, "grpc_resource_quota_create");
+ grpc_resource_quota_ref_import = (grpc_resource_quota_ref_type) GetProcAddress(library, "grpc_resource_quota_ref");
+ grpc_resource_quota_unref_import = (grpc_resource_quota_unref_type) GetProcAddress(library, "grpc_resource_quota_unref");
+ grpc_resource_quota_resize_import = (grpc_resource_quota_resize_type) GetProcAddress(library, "grpc_resource_quota_resize");
+ grpc_resource_quota_arg_vtable_import = (grpc_resource_quota_arg_vtable_type) GetProcAddress(library, "grpc_resource_quota_arg_vtable");
grpc_insecure_channel_create_from_fd_import = (grpc_insecure_channel_create_from_fd_type) GetProcAddress(library, "grpc_insecure_channel_create_from_fd");
grpc_server_add_insecure_channel_from_fd_import = (grpc_server_add_insecure_channel_from_fd_type) GetProcAddress(library, "grpc_server_add_insecure_channel_from_fd");
grpc_use_signal_import = (grpc_use_signal_type) GetProcAddress(library, "grpc_use_signal");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index 00a67b0b2c..c2244150f2 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -347,6 +347,21 @@ extern grpc_is_binary_header_type grpc_is_binary_header_import;
typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error);
extern grpc_call_error_to_string_type grpc_call_error_to_string_import;
#define grpc_call_error_to_string grpc_call_error_to_string_import
+typedef grpc_resource_quota *(*grpc_resource_quota_create_type)(const char *trace_name);
+extern grpc_resource_quota_create_type grpc_resource_quota_create_import;
+#define grpc_resource_quota_create grpc_resource_quota_create_import
+typedef void(*grpc_resource_quota_ref_type)(grpc_resource_quota *resource_quota);
+extern grpc_resource_quota_ref_type grpc_resource_quota_ref_import;
+#define grpc_resource_quota_ref grpc_resource_quota_ref_import
+typedef void(*grpc_resource_quota_unref_type)(grpc_resource_quota *resource_quota);
+extern grpc_resource_quota_unref_type grpc_resource_quota_unref_import;
+#define grpc_resource_quota_unref grpc_resource_quota_unref_import
+typedef void(*grpc_resource_quota_resize_type)(grpc_resource_quota *resource_quota, size_t new_size);
+extern grpc_resource_quota_resize_type grpc_resource_quota_resize_import;
+#define grpc_resource_quota_resize grpc_resource_quota_resize_import
+typedef const grpc_arg_pointer_vtable *(*grpc_resource_quota_arg_vtable_type)(void);
+extern grpc_resource_quota_arg_vtable_type grpc_resource_quota_arg_vtable_import;
+#define grpc_resource_quota_arg_vtable grpc_resource_quota_arg_vtable_import
typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args);
extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import;
#define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import
diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb
index dfc2644c46..f5c426ebfc 100644
--- a/src/ruby/lib/grpc/generic/active_call.rb
+++ b/src/ruby/lib/grpc/generic/active_call.rb
@@ -43,7 +43,8 @@ class Struct
GRPC.logger.debug("Failing with status #{status}")
# raise BadStatus, propagating the metadata if present.
md = status.metadata
- fail GRPC::BadStatus.new(status.code, status.details, md)
+ fail GRPC::BadStatus.new(status.code, status.details, md),
+ "status code: #{status.code}, details: #{status.details}"
end
status
end
@@ -156,41 +157,25 @@ module GRPC
Operation.new(self)
end
- # writes_done indicates that all writes are completed.
- #
- # It blocks until the remote endpoint acknowledges with at status unless
- # assert_finished is set to false. Any calls to #remote_send after this
- # call will fail.
- #
- # @param assert_finished [true, false] when true(default), waits for
- # FINISHED.
- def writes_done(assert_finished = true)
- ops = {
- SEND_CLOSE_FROM_CLIENT => nil
- }
- ops[RECV_STATUS_ON_CLIENT] = nil if assert_finished
- batch_result = @call.run_batch(ops)
- return unless assert_finished
- unless batch_result.status.nil?
- @call.trailing_metadata = batch_result.status.metadata
- end
- @call.status = batch_result.status
- op_is_done
- batch_result.check_status
- end
-
# finished waits until a client call is completed.
#
# It blocks until the remote endpoint acknowledges by sending a status.
def finished
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
- unless batch_result.status.nil?
- @call.trailing_metadata = batch_result.status.metadata
+ attach_status_results_and_complete_call(batch_result)
+ end
+
+ def attach_status_results_and_complete_call(recv_status_batch_result)
+ unless recv_status_batch_result.status.nil?
+ @call.trailing_metadata = recv_status_batch_result.status.metadata
end
- @call.status = batch_result.status
- op_is_done
- batch_result.check_status
+ @call.status = recv_status_batch_result.status
@call.close
+ op_is_done
+
+ # The RECV_STATUS in run_batch always succeeds
+ # Check the status for a bad status or failed run batch
+ recv_status_batch_result.check_status
end
# remote_send sends a request to the remote endpoint.
@@ -226,6 +211,23 @@ module GRPC
nil
end
+ def server_unary_response(req, trailing_metadata: {},
+ code: Core::StatusCodes::OK, details: 'OK')
+ ops = {}
+ @send_initial_md_mutex.synchronize do
+ ops[SEND_INITIAL_METADATA] = @metadata_to_send unless @metadata_sent
+ @metadata_sent = true
+ end
+
+ payload = @marshal.call(req)
+ ops[SEND_MESSAGE] = payload
+ ops[SEND_STATUS_FROM_SERVER] = Struct::Status.new(
+ code, details, trailing_metadata)
+ ops[RECV_CLOSE_ON_SERVER] = nil
+
+ @call.run_batch(ops)
+ end
+
# remote_read reads a response from the remote endpoint.
#
# It blocks until the remote endpoint replies with a message or status.
@@ -240,9 +242,13 @@ module GRPC
@call.metadata = batch_result.metadata
@metadata_received = true
end
- unless batch_result.nil? || batch_result.message.nil?
- res = @unmarshal.call(batch_result.message)
- return res
+ get_message_from_batch_result(batch_result)
+ end
+
+ def get_message_from_batch_result(recv_message_batch_result)
+ unless recv_message_batch_result.nil? ||
+ recv_message_batch_result.message.nil?
+ return @unmarshal.call(recv_message_batch_result.message)
end
GRPC.logger.debug('found nil; the final response has been sent')
nil
@@ -298,7 +304,6 @@ module GRPC
return enum_for(:each_remote_read_then_finish) unless block_given?
loop do
resp = remote_read
- break if resp.is_a? Struct::Status # is an OK status
if resp.nil? # the last response was received, but not finished yet
finished
break
@@ -315,15 +320,25 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Object] the response received from the server
def request_response(req, metadata: {})
- merge_metadata_to_send(metadata) && send_initial_metadata
- remote_send(req)
- writes_done(false)
- response = remote_read
- finished unless response.is_a? Struct::Status
- response
- rescue GRPC::Core::CallError => e
- finished # checks for Cancelled
- raise e
+ ops = {
+ SEND_MESSAGE => @marshal.call(req),
+ SEND_CLOSE_FROM_CLIENT => nil,
+ RECV_INITIAL_METADATA => nil,
+ RECV_MESSAGE => nil,
+ RECV_STATUS_ON_CLIENT => nil
+ }
+ @send_initial_md_mutex.synchronize do
+ # Metadata might have already been sent if this is an operation view
+ unless @metadata_sent
+ ops[SEND_INITIAL_METADATA] = @metadata_to_send.merge!(metadata)
+ end
+ @metadata_sent = true
+ end
+ batch_result = @call.run_batch(ops)
+
+ @call.metadata = batch_result.metadata
+ attach_status_results_and_complete_call(batch_result)
+ get_message_from_batch_result(batch_result)
end
# client_streamer sends a stream of requests to a GRPC server, and
@@ -339,12 +354,20 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Object] the response received from the server
def client_streamer(requests, metadata: {})
- merge_metadata_to_send(metadata) && send_initial_metadata
- requests.each { |r| remote_send(r) }
- writes_done(false)
- response = remote_read
- finished unless response.is_a? Struct::Status
- response
+ # Metadata might have already been sent if this is an operation view
+ merge_metadata_and_send_if_not_already_sent(metadata)
+
+ requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) }
+ batch_result = @call.run_batch(
+ SEND_CLOSE_FROM_CLIENT => nil,
+ RECV_INITIAL_METADATA => nil,
+ RECV_MESSAGE => nil,
+ RECV_STATUS_ON_CLIENT => nil
+ )
+
+ @call.metadata = batch_result.metadata
+ attach_status_results_and_complete_call(batch_result)
+ get_message_from_batch_result(batch_result)
rescue GRPC::Core::CallError => e
finished # checks for Cancelled
raise e
@@ -365,9 +388,18 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Enumerator|nil] a response Enumerator
def server_streamer(req, metadata: {})
- merge_metadata_to_send(metadata) && send_initial_metadata
- remote_send(req)
- writes_done(false)
+ ops = {
+ SEND_MESSAGE => @marshal.call(req),
+ SEND_CLOSE_FROM_CLIENT => nil
+ }
+ @send_initial_md_mutex.synchronize do
+ # Metadata might have already been sent if this is an operation view
+ unless @metadata_sent
+ ops[SEND_INITIAL_METADATA] = @metadata_to_send.merge!(metadata)
+ end
+ @metadata_sent = true
+ end
+ @call.run_batch(ops)
replies = enum_for(:each_remote_read_then_finish)
return replies unless block_given?
replies.each { |r| yield r }
@@ -404,7 +436,8 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Enumerator, nil] a response Enumerator
def bidi_streamer(requests, metadata: {}, &blk)
- merge_metadata_to_send(metadata) && send_initial_metadata
+ # Metadata might have already been sent if this is an operation view
+ merge_metadata_and_send_if_not_already_sent(metadata)
bd = BidiCall.new(@call,
@marshal,
@unmarshal,
@@ -457,6 +490,15 @@ module GRPC
end
end
+ def merge_metadata_and_send_if_not_already_sent(new_metadata = {})
+ @send_initial_md_mutex.synchronize do
+ return if @metadata_sent
+ @metadata_to_send.merge!(new_metadata)
+ @call.run_batch(SEND_INITIAL_METADATA => @metadata_to_send)
+ @metadata_sent = true
+ end
+ end
+
private
# Starts the call if not already started
diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb
index 0d7c1f7805..6934257cbc 100644
--- a/src/ruby/lib/grpc/generic/client_stub.rb
+++ b/src/ruby/lib/grpc/generic/client_stub.rb
@@ -168,6 +168,7 @@ module GRPC
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #request_response.
+ c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
c.request_response(req, metadata: metadata)
@@ -231,9 +232,10 @@ module GRPC
# return the operation view of the active_call; define #execute as a
# new method for this instance that invokes #client_streamer.
+ c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
- c.client_streamer(requests, metadata: metadata)
+ c.client_streamer(requests)
end
op
end
@@ -309,9 +311,10 @@ module GRPC
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #server_streamer
+ c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
- c.server_streamer(req, metadata: metadata, &blk)
+ c.server_streamer(req, &blk)
end
op
end
@@ -417,15 +420,15 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
-
return c.bidi_streamer(requests, metadata: metadata,
&blk) unless return_op
# return the operation view of the active_call; define #execute
# as a new method for this instance that invokes #bidi_streamer
+ c.merge_metadata_to_send(metadata)
op = c.operation
op.define_singleton_method(:execute) do
- c.bidi_streamer(requests, metadata: metadata, &blk)
+ c.bidi_streamer(requests, &blk)
end
op
end
@@ -445,7 +448,6 @@ module GRPC
deadline: nil,
parent: nil,
credentials: nil)
-
deadline = from_relative_time(@timeout) if deadline.nil?
# Provide each new client call with its own completion queue
call = @ch.create_call(parent, # parent call
diff --git a/src/ruby/lib/grpc/generic/rpc_desc.rb b/src/ruby/lib/grpc/generic/rpc_desc.rb
index 584fe78169..cd17aed8e7 100644
--- a/src/ruby/lib/grpc/generic/rpc_desc.rb
+++ b/src/ruby/lib/grpc/generic/rpc_desc.rb
@@ -62,25 +62,44 @@ module GRPC
proc { |o| unmarshal_class.method(unmarshal_method).call(o) }
end
+ def handle_request_response(active_call, mth)
+ req = active_call.remote_read
+ resp = mth.call(req, active_call.single_req_view)
+ active_call.server_unary_response(
+ resp, trailing_metadata: active_call.output_metadata)
+ end
+
+ def handle_client_streamer(active_call, mth)
+ resp = mth.call(active_call.multi_req_view)
+ active_call.server_unary_response(
+ resp, trailing_metadata: active_call.output_metadata)
+ end
+
+ def handle_server_streamer(active_call, mth)
+ req = active_call.remote_read
+ replys = mth.call(req, active_call.single_req_view)
+ replys.each { |r| active_call.remote_send(r) }
+ send_status(active_call, OK, 'OK', active_call.output_metadata)
+ end
+
+ def handle_bidi_streamer(active_call, mth)
+ active_call.run_server_bidi(mth)
+ send_status(active_call, OK, 'OK', active_call.output_metadata)
+ end
+
def run_server_method(active_call, mth)
# While a server method is running, it might be cancelled, its deadline
# might be reached, the handler could throw an unknown error, or a
# well-behaved handler could throw a StatusError.
if request_response?
- req = active_call.remote_read
- resp = mth.call(req, active_call.single_req_view)
- active_call.remote_send(resp)
+ handle_request_response(active_call, mth)
elsif client_streamer?
- resp = mth.call(active_call.multi_req_view)
- active_call.remote_send(resp)
+ handle_client_streamer(active_call, mth)
elsif server_streamer?
- req = active_call.remote_read
- replys = mth.call(req, active_call.single_req_view)
- replys.each { |r| active_call.remote_send(r) }
+ handle_server_streamer(active_call, mth)
else # is a bidi_stream
- active_call.run_server_bidi(mth)
+ handle_bidi_streamer(active_call, mth)
end
- send_status(active_call, OK, 'OK', active_call.output_metadata)
rescue BadStatus => e
# this is raised by handlers that want GRPC to send an application error
# code and detail message and some additional app-specific metadata.
@@ -91,7 +110,7 @@ module GRPC
# Log it, but don't notify the other endpoint..
GRPC.logger.warn("failed call: #{active_call}\n#{e}")
rescue Core::OutOfTime
- # This is raised when active_call#method.call exceeeds the deadline
+ # This is raised when active_call#method.call exceeds the deadline
# event. Send a status of deadline exceeded
GRPC.logger.warn("late call: #{active_call}")
send_status(active_call, DEADLINE_EXCEEDED, 'late')
@@ -100,7 +119,7 @@ module GRPC
# Send back a UNKNOWN status to the client
GRPC.logger.warn("failed handler: #{active_call}; sending status:UNKNOWN")
GRPC.logger.warn(e)
- send_status(active_call, UNKNOWN, 'no reason given')
+ send_status(active_call, UNKNOWN, 'unkown error handling call on server')
end
def assert_arity_matches(mth)
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index 7dbcb7d479..57f99c8ce6 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -54,6 +54,7 @@ module GRPC
DEFAULT_MAX_WAITING_REQUESTS = 60
# Default poll period is 1s
+ # Used for grpc server shutdown and thread pool shutdown timeouts
DEFAULT_POLL_PERIOD = 1
# Signal check period is 0.25s
@@ -127,7 +128,7 @@ module GRPC
deadline = from_relative_time(@poll_period)
@server.close(deadline)
@pool.shutdown
- @pool.wait_for_termination
+ @pool.wait_for_termination(@poll_period)
end
def running_state
diff --git a/src/ruby/qps/server.rb b/src/ruby/qps/server.rb
index d0c2073dd1..6175855cd9 100644
--- a/src/ruby/qps/server.rb
+++ b/src/ruby/qps/server.rb
@@ -63,7 +63,9 @@ class BenchmarkServer
cred = :this_port_is_insecure
end
# Make sure server can handle the large number of calls in benchmarks
- @server = GRPC::RpcServer.new(pool_size: 100, max_waiting_requests: 100)
+ # TODO: @apolcyn, if scenario config increases total outstanding
+ # calls then will need to increase the pool size too
+ @server = GRPC::RpcServer.new(pool_size: 1024, max_waiting_requests: 1024)
@port = @server.add_http2_port("0.0.0.0:" + port.to_s, cred)
@server.handle(BenchmarkServiceImpl.new)
@start_time = Time.now
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index 48bc61e494..aa51d9d7b1 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -137,6 +137,8 @@ describe GRPC::ActiveCall do
msg = 'message is a string'
client_call.write_flag = f
client_call.remote_send(msg)
+ # flush the message in case writes are set to buffered
+ call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) if f == 1
# confirm that the message was marshalled
recvd_rpc = @server.request_call
@@ -400,7 +402,7 @@ describe GRPC::ActiveCall do
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
- client_call.writes_done(false)
+ call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
server_call.send_status(OK, 'OK')
@@ -458,7 +460,7 @@ describe GRPC::ActiveCall do
msg = 'message is a string'
reply = 'server_response'
client_call.remote_send(msg)
- client_call.writes_done(false)
+ call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
server_call = expect_server_to_receive(msg)
e = client_call.each_remote_read
n = 3 # arbitrary value > 1
@@ -471,7 +473,7 @@ describe GRPC::ActiveCall do
end
end
- describe '#writes_done' do
+ describe '#closing the call from the client' do
it 'finishes ok if the server sends a status response' do
call = make_test_call
ActiveCall.client_invoke(call)
@@ -479,7 +481,9 @@ describe GRPC::ActiveCall do
@pass_through, deadline)
msg = 'message is a string'
client_call.remote_send(msg)
- expect { client_call.writes_done(false) }.to_not raise_error
+ expect do
+ call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
+ end.to_not raise_error
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response')
@@ -498,11 +502,13 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
- expect { client_call.writes_done(false) }.to_not raise_error
+ expect do
+ call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
+ end.to_not raise_error
expect { client_call.finished }.to_not raise_error
end
- it 'finishes ok if writes_done is true' do
+ it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
@@ -513,7 +519,11 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
server_call.send_status(OK, 'status code is OK')
expect(client_call.remote_read).to eq('server_response')
- expect { client_call.writes_done(true) }.to_not raise_error
+ expect do
+ call.run_batch(
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil)
+ end.to_not raise_error
end
end
diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb
index 6034b5419c..607a4a3c5d 100644
--- a/src/ruby/spec/generic/client_stub_spec.rb
+++ b/src/ruby/spec/generic/client_stub_spec.rb
@@ -168,42 +168,94 @@ describe 'ClientStub' do
expect(&blk).to raise_error(GRPC::BadStatus)
th.join
end
+
+ it 'should receive UNAUTHENTICATED if call credentials plugin fails' do
+ server_port = create_secure_test_server
+ th = run_request_response(@sent_msg, @resp, @pass)
+
+ certs = load_test_certs
+ secure_channel_creds = GRPC::Core::ChannelCredentials.new(
+ certs[0], nil, nil)
+ secure_stub_opts = {
+ channel_args: {
+ GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.fr'
+ }
+ }
+ stub = GRPC::ClientStub.new("localhost:#{server_port}",
+ secure_channel_creds, **secure_stub_opts)
+
+ error_message = 'Failing call credentials callback'
+ failing_auth = proc do
+ fail error_message
+ end
+ creds = GRPC::Core::CallCredentials.new(failing_auth)
+
+ error_occured = false
+ begin
+ get_response(stub, credentials: creds)
+ rescue GRPC::BadStatus => e
+ error_occured = true
+ expect(e.code).to eq(GRPC::Core::StatusCodes::UNAUTHENTICATED)
+ expect(e.details.include?(error_message)).to be true
+ end
+ expect(error_occured).to eq(true)
+
+ # Kill the server thread so tests can complete
+ th.kill
+ end
end
describe 'without a call operation' do
- def get_response(stub)
+ def get_response(stub, credentials: nil)
+ puts credentials.inspect
stub.request_response(@method, @sent_msg, noop, noop,
- metadata: { k1: 'v1', k2: 'v2' })
+ metadata: { k1: 'v1', k2: 'v2' },
+ credentials: credentials)
end
it_behaves_like 'request response'
end
describe 'via a call operation' do
- def get_response(stub)
+ def get_response(stub, run_start_call_first: false, credentials: nil)
op = stub.request_response(@method, @sent_msg, noop, noop,
return_op: true,
metadata: { k1: 'v1', k2: 'v2' },
- deadline: from_relative_time(2))
+ deadline: from_relative_time(2),
+ credentials: credentials)
expect(op).to be_a(GRPC::ActiveCall::Operation)
- op.execute
+ op.start_call if run_start_call_first
+ result = op.execute
+ op.wait # make sure wait doesn't hang
+ result
end
it_behaves_like 'request response'
- end
- end
- describe '#client_streamer' do
- shared_examples 'client streaming' do
- before(:each) do
+ it 'sends metadata to the server ok when running start_call first' do
server_port = create_test_server
host = "localhost:#{server_port}"
- @stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
- @metadata = { k1: 'v1', k2: 'v2' }
- @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
- @resp = 'a_reply'
+ th = run_request_response(@sent_msg, @resp, @pass,
+ k1: 'v1', k2: 'v2')
+ stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
+ expect(get_response(stub)).to eq(@resp)
+ th.join
end
+ end
+ end
+
+ describe '#client_streamer' do
+ before(:each) do
+ Thread.abort_on_exception = true
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ @stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
+ @metadata = { k1: 'v1', k2: 'v2' }
+ @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
+ @resp = 'a_reply'
+ end
+ shared_examples 'client streaming' do
it 'should send requests to/receive a reply from a server' do
th = run_client_streamer(@sent_msgs, @resp, @pass)
expect(get_response(@stub)).to eq(@resp)
@@ -242,24 +294,33 @@ describe 'ClientStub' do
end
describe 'via a call operation' do
- def get_response(stub)
+ def get_response(stub, run_start_call_first: false)
op = stub.client_streamer(@method, @sent_msgs, noop, noop,
return_op: true, metadata: @metadata)
expect(op).to be_a(GRPC::ActiveCall::Operation)
- op.execute
+ op.start_call if run_start_call_first
+ result = op.execute
+ op.wait # make sure wait doesn't hang
+ result
end
it_behaves_like 'client streaming'
+
+ it 'sends metadata to the server ok when running start_call first' do
+ th = run_client_streamer(@sent_msgs, @resp, @pass, **@metadata)
+ expect(get_response(@stub, run_start_call_first: true)).to eq(@resp)
+ th.join
+ end
end
end
describe '#server_streamer' do
- shared_examples 'server streaming' do
- before(:each) do
- @sent_msg = 'a_msg'
- @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
- end
+ before(:each) do
+ @sent_msg = 'a_msg'
+ @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
+ end
+ shared_examples 'server streaming' do
it 'should send a request to/receive replies from a server' do
server_port = create_test_server
host = "localhost:#{server_port}"
@@ -303,29 +364,44 @@ describe 'ClientStub' do
end
describe 'via a call operation' do
- def get_responses(stub)
- op = stub.server_streamer(@method, @sent_msg, noop, noop,
- return_op: true,
- metadata: { k1: 'v1', k2: 'v2' })
- expect(op).to be_a(GRPC::ActiveCall::Operation)
- e = op.execute
+ after(:each) do
+ @op.wait # make sure wait doesn't hang
+ end
+ def get_responses(stub, run_start_call_first: false)
+ @op = stub.server_streamer(@method, @sent_msg, noop, noop,
+ return_op: true,
+ metadata: { k1: 'v1', k2: 'v2' })
+ expect(@op).to be_a(GRPC::ActiveCall::Operation)
+ @op.start_call if run_start_call_first
+ e = @op.execute
expect(e).to be_a(Enumerator)
e
end
it_behaves_like 'server streaming'
+
+ it 'should send metadata to the server ok when start_call is run first' do
+ server_port = create_test_server
+ host = "localhost:#{server_port}"
+ th = run_server_streamer(@sent_msg, @replys, @fail,
+ k1: 'v1', k2: 'v2')
+ stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
+ e = get_responses(stub, run_start_call_first: true)
+ expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
+ th.join
+ end
end
end
describe '#bidi_streamer' do
- shared_examples 'bidi streaming' do
- before(:each) do
- @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
- @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
- server_port = create_test_server
- @host = "localhost:#{server_port}"
- end
+ before(:each) do
+ @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
+ @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
+ server_port = create_test_server
+ @host = "localhost:#{server_port}"
+ end
+ shared_examples 'bidi streaming' do
it 'supports sending all the requests first', bidi: true do
th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
@pass)
@@ -363,16 +439,29 @@ describe 'ClientStub' do
end
describe 'via a call operation' do
- def get_responses(stub)
- op = stub.bidi_streamer(@method, @sent_msgs, noop, noop,
- return_op: true)
- expect(op).to be_a(GRPC::ActiveCall::Operation)
- e = op.execute
+ after(:each) do
+ @op.wait # make sure wait doesn't hang
+ end
+ def get_responses(stub, run_start_call_first: false)
+ @op = stub.bidi_streamer(@method, @sent_msgs, noop, noop,
+ return_op: true)
+ expect(@op).to be_a(GRPC::ActiveCall::Operation)
+ @op.start_call if run_start_call_first
+ e = @op.execute
expect(e).to be_a(Enumerator)
e
end
it_behaves_like 'bidi streaming'
+
+ it 'can run start_call before executing the call' do
+ th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
+ @pass)
+ stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
+ e = get_responses(stub, run_start_call_first: true)
+ expect(e.collect { |r| r }).to eq(@replys)
+ th.join
+ end
end
end
@@ -441,6 +530,15 @@ describe 'ClientStub' do
end
end
+ def create_secure_test_server
+ certs = load_test_certs
+ secure_credentials = GRPC::Core::ServerCredentials.new(
+ nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
+
+ @server = GRPC::Core::Server.new(nil)
+ @server.add_http2_port('0.0.0.0:0', secure_credentials)
+ end
+
def create_test_server
@server = GRPC::Core::Server.new(nil)
@server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
diff --git a/src/ruby/spec/generic/rpc_desc_spec.rb b/src/ruby/spec/generic/rpc_desc_spec.rb
index 1a895005bc..a3f0efa603 100644
--- a/src/ruby/spec/generic/rpc_desc_spec.rb
+++ b/src/ruby/spec/generic/rpc_desc_spec.rb
@@ -48,7 +48,7 @@ describe GRPC::RpcDesc do
@bidi_streamer = RpcDesc.new('ss', Stream.new(Object.new),
Stream.new(Object.new), 'encode', 'decode')
@bs_code = INTERNAL
- @no_reason = 'no reason given'
+ @no_reason = 'unkown error handling call on server'
@ok_response = Object.new
end
@@ -83,6 +83,7 @@ describe GRPC::RpcDesc do
before(:each) do
@call = double('active_call')
allow(@call).to receive(:single_req_view).and_return(@call)
+ allow(@call).to receive(:output_metadata).and_return(@call)
end
it_behaves_like 'it handles errors'
@@ -90,10 +91,10 @@ describe GRPC::RpcDesc do
it 'sends a response and closes the stream if there no errors' do
req = Object.new
expect(@call).to receive(:remote_read).once.and_return(req)
- expect(@call).to receive(:remote_send).once.with(@ok_response)
- expect(@call).to receive(:output_metadata).and_return(fake_md)
- expect(@call).to receive(:send_status).once.with(OK, 'OK', true,
- metadata: fake_md)
+ expect(@call).to receive(:output_metadata).once.and_return(fake_md)
+ expect(@call).to receive(:server_unary_response).once
+ .with(@ok_response, trailing_metadata: fake_md)
+
this_desc.run_server_method(@call, method(:fake_reqresp))
end
end
@@ -117,7 +118,9 @@ describe GRPC::RpcDesc do
end
it 'absorbs CallError with no further action' do
- expect(@call).to receive(:remote_send).once.and_raise(CallError)
+ expect(@call).to receive(:server_unary_response).once.and_raise(
+ CallError)
+ allow(@call).to receive(:output_metadata).and_return({})
blk = proc do
@client_streamer.run_server_method(@call, method(:fake_clstream))
end
@@ -125,10 +128,11 @@ describe GRPC::RpcDesc do
end
it 'sends a response and closes the stream if there no errors' do
- expect(@call).to receive(:remote_send).once.with(@ok_response)
- expect(@call).to receive(:output_metadata).and_return(fake_md)
- expect(@call).to receive(:send_status).once.with(OK, 'OK', true,
- metadata: fake_md)
+ expect(@call).to receive(:output_metadata).and_return(
+ fake_md)
+ expect(@call).to receive(:server_unary_response).once
+ .with(@ok_response, trailing_metadata: fake_md)
+
@client_streamer.run_server_method(@call, method(:fake_clstream))
end
end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index d362e48dee..c5694790fd 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -462,6 +462,7 @@ describe GRPC::RpcServer do
'connect_k1' => 'connect_v1'
}
wanted_md.each do |key, value|
+ puts "key: #{key}"
expect(op.metadata[key]).to eq(value)
end
@srv.stop
diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb
index 1b2fa96827..4711e09e88 100644
--- a/src/ruby/spec/pb/health/checker_spec.rb
+++ b/src/ruby/spec/pb/health/checker_spec.rb
@@ -97,15 +97,17 @@ describe Grpc::Health::Checker do
context 'initialization' do
it 'can be constructed with no args' do
- expect(subject).to_not be(nil)
+ checker = Grpc::Health::Checker.new
+ expect(checker).to_not be(nil)
end
end
context 'method `add_status` and `check`' do
success_tests.each do |t|
it "should succeed when #{t[:desc]}" do
- subject.add_status(t[:service], ServingStatus::NOT_SERVING)
- got = subject.check(HCReq.new(service: t[:service]), nil)
+ checker = Grpc::Health::Checker.new
+ checker.add_status(t[:service], ServingStatus::NOT_SERVING)
+ got = checker.check(HCReq.new(service: t[:service]), nil)
want = HCResp.new(status: ServingStatus::NOT_SERVING)
expect(got).to eq(want)
end
@@ -115,8 +117,9 @@ describe Grpc::Health::Checker do
context 'method `check`' do
success_tests.each do |t|
it "should fail with NOT_FOUND when #{t[:desc]}" do
+ checker = Grpc::Health::Checker.new
blk = proc do
- subject.check(HCReq.new(service: t[:service]), nil)
+ checker.check(HCReq.new(service: t[:service]), nil)
end
expected_msg = /#{StatusCodes::NOT_FOUND}/
expect(&blk).to raise_error GRPC::BadStatus, expected_msg
@@ -127,14 +130,15 @@ describe Grpc::Health::Checker do
context 'method `clear_status`' do
success_tests.each do |t|
it "should fail after clearing status when #{t[:desc]}" do
- subject.add_status(t[:service], ServingStatus::NOT_SERVING)
- got = subject.check(HCReq.new(service: t[:service]), nil)
+ checker = Grpc::Health::Checker.new
+ checker.add_status(t[:service], ServingStatus::NOT_SERVING)
+ got = checker.check(HCReq.new(service: t[:service]), nil)
want = HCResp.new(status: ServingStatus::NOT_SERVING)
expect(got).to eq(want)
- subject.clear_status(t[:service])
+ checker.clear_status(t[:service])
blk = proc do
- subject.check(HCReq.new(service: t[:service]), nil)
+ checker.check(HCReq.new(service: t[:service]), nil)
end
expected_msg = /#{StatusCodes::NOT_FOUND}/
expect(&blk).to raise_error GRPC::BadStatus, expected_msg
@@ -144,18 +148,19 @@ describe Grpc::Health::Checker do
context 'method `clear_all`' do
it 'should return NOT_FOUND after being invoked' do
+ checker = Grpc::Health::Checker.new
success_tests.each do |t|
- subject.add_status(t[:service], ServingStatus::NOT_SERVING)
- got = subject.check(HCReq.new(service: t[:service]), nil)
+ checker.add_status(t[:service], ServingStatus::NOT_SERVING)
+ got = checker.check(HCReq.new(service: t[:service]), nil)
want = HCResp.new(status: ServingStatus::NOT_SERVING)
expect(got).to eq(want)
end
- subject.clear_all
+ checker.clear_all
success_tests.each do |t|
blk = proc do
- subject.check(HCReq.new(service: t[:service]), nil)
+ checker.check(HCReq.new(service: t[:service]), nil)
end
expected_msg = /#{StatusCodes::NOT_FOUND}/
expect(&blk).to raise_error GRPC::BadStatus, expected_msg
@@ -184,8 +189,10 @@ describe Grpc::Health::Checker do
end
it 'should receive the correct status', server: true do
- @srv.handle(subject)
- subject.add_status('', ServingStatus::NOT_SERVING)
+ Thread.abort_on_exception = true
+ checker = Grpc::Health::Checker.new
+ @srv.handle(checker)
+ checker.add_status('', ServingStatus::NOT_SERVING)
t = Thread.new { @srv.run }
@srv.wait_till_running
@@ -198,7 +205,8 @@ describe Grpc::Health::Checker do
end
it 'should fail on unknown services', server: true do
- @srv.handle(subject)
+ checker = Grpc::Health::Checker.new
+ @srv.handle(checker)
t = Thread.new { @srv.run }
@srv.wait_till_running
blk = proc do