aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/c-ares/CMakeLists.txt34
-rwxr-xr-xsrc/c-ares/gen_build_yaml.py12
-rw-r--r--src/compiler/cpp_generator.cc675
-rw-r--r--src/compiler/objective_c_generator.cc21
-rw-r--r--src/compiler/objective_c_generator.h4
-rw-r--r--src/compiler/objective_c_plugin.cc40
-rw-r--r--src/compiler/php_generator.cc32
-rw-r--r--src/compiler/python_generator.cc4
-rw-r--r--src/core/ext/census/README.md61
-rw-r--r--src/core/ext/census/aggregation.h51
-rw-r--r--src/core/ext/census/base_resources.c56
-rw-r--r--src/core/ext/census/base_resources.h24
-rw-r--r--src/core/ext/census/census_interface.h61
-rw-r--r--src/core/ext/census/census_log.c588
-rw-r--r--src/core/ext/census/census_log.h76
-rw-r--r--src/core/ext/census/census_rpc_stats.c238
-rw-r--r--src/core/ext/census/census_rpc_stats.h86
-rw-r--r--src/core/ext/census/census_tracing.c226
-rw-r--r--src/core/ext/census/census_tracing.h81
-rw-r--r--src/core/ext/census/context.c494
-rw-r--r--src/core/ext/census/gen/README.md10
-rw-r--r--src/core/ext/census/gen/census.pb.c161
-rw-r--r--src/core/ext/census/gen/census.pb.h280
-rw-r--r--src/core/ext/census/gen/trace_context.pb.c39
-rw-r--r--src/core/ext/census/gen/trace_context.pb.h78
-rw-r--r--src/core/ext/census/grpc_context.cc (renamed from src/core/ext/census/grpc_context.c)3
-rw-r--r--src/core/ext/census/grpc_filter.c198
-rw-r--r--src/core/ext/census/grpc_plugin.c70
-rw-r--r--src/core/ext/census/hash_table.c288
-rw-r--r--src/core/ext/census/hash_table.h116
-rw-r--r--src/core/ext/census/initialize.c51
-rw-r--r--src/core/ext/census/intrusive_hash_map.c305
-rw-r--r--src/core/ext/census/intrusive_hash_map.h152
-rw-r--r--src/core/ext/census/intrusive_hash_map_internal.h48
-rw-r--r--src/core/ext/census/mlog.c585
-rw-r--r--src/core/ext/census/mlog.h80
-rw-r--r--src/core/ext/census/operation.c48
-rw-r--r--src/core/ext/census/placeholders.c49
-rw-r--r--src/core/ext/census/resource.c299
-rw-r--r--src/core/ext/census/resource.h48
-rw-r--r--src/core/ext/census/rpc_metric_id.h36
-rw-r--r--src/core/ext/census/trace_context.c71
-rw-r--r--src/core/ext/census/trace_context.h56
-rw-r--r--src/core/ext/census/trace_label.h46
-rw-r--r--src/core/ext/census/trace_propagation.h48
-rw-r--r--src/core/ext/census/trace_string.h35
-rw-r--r--src/core/ext/census/tracing.c55
-rw-r--r--src/core/ext/census/tracing.h109
-rw-r--r--src/core/ext/census/window_stats.c301
-rw-r--r--src/core/ext/census/window_stats.h158
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.cc158
-rw-r--r--src/core/ext/filters/client_channel/backup_poller.h (renamed from src/core/ext/census/grpc_filter.h)19
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc (renamed from src/core/ext/filters/client_channel/channel_connectivity.c)29
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc (renamed from src/core/ext/filters/client_channel/client_channel.c)943
-rw-r--r--src/core/ext/filters/client_channel/client_channel.h8
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.cc (renamed from src/core/ext/filters/client_channel/client_channel_factory.c)9
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.h8
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc (renamed from src/core/ext/filters/client_channel/client_channel_plugin.c)8
-rw-r--r--src/core/ext/filters/client_channel/connector.cc (renamed from src/core/ext/filters/client_channel/connector.c)0
-rw-r--r--src/core/ext/filters/client_channel/connector.h10
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.cc (renamed from src/core/ext/filters/client_channel/http_connect_handshaker.c)12
-rw-r--r--src/core/ext/filters/client_channel/http_connect_handshaker.h8
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.cc (renamed from src/core/ext/filters/client_channel/http_proxy.c)38
-rw-r--r--src/core/ext/filters/client_channel/http_proxy.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc (renamed from src/core/ext/filters/client_channel/lb_policy.c)2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c)14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c)707
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c)12
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h8
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc (renamed from src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c)50
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h3
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c710
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc617
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc (renamed from src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c)481
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc265
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h153
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.cc (renamed from src/core/ext/filters/client_channel/lb_policy_factory.c)18
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h10
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc (renamed from src/core/ext/filters/client_channel/lb_policy_registry.c)0
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h8
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc (renamed from src/core/ext/filters/client_channel/parse_address.c)0
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h8
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper.cc (renamed from src/core/ext/filters/client_channel/proxy_mapper.c)0
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper.h8
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.cc (renamed from src/core/ext/filters/client_channel/proxy_mapper_registry.c)2
-rw-r--r--src/core/ext/filters/client_channel/proxy_mapper_registry.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc (renamed from src/core/ext/filters/client_channel/resolver.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c)51
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c)90
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c)52
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc (renamed from src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c)41
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc (renamed from src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c)11
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc (renamed from src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c)4
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.cc (renamed from src/core/ext/filters/client_channel/resolver_factory.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h8
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.cc (renamed from src/core/ext/filters/client_channel/resolver_registry.c)0
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.h8
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.cc (renamed from src/core/ext/filters/client_channel/retry_throttle.c)15
-rw-r--r--src/core/ext/filters/client_channel/retry_throttle.h8
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc (renamed from src/core/ext/filters/client_channel/subchannel.c)101
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h19
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc (renamed from src/core/ext/filters/client_channel/subchannel_index.c)57
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.h15
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc (renamed from src/core/ext/filters/client_channel/uri_parser.c)7
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.h8
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.cc (renamed from src/core/ext/filters/deadline/deadline_filter.c)138
-rw-r--r--src/core/ext/filters/deadline/deadline_filter.h21
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc (renamed from src/core/ext/filters/http/client/http_client_filter.c)28
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.h8
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.cc (renamed from src/core/ext/filters/http/http_filters_plugin.c)6
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc (renamed from src/core/ext/filters/http/message_compress/message_compress_filter.c)294
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.h8
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc (renamed from src/core/ext/filters/http/server/http_server_filter.c)84
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.h8
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc (renamed from src/core/ext/filters/load_reporting/load_reporting_filter.c)23
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.h (renamed from src/core/ext/filters/load_reporting/load_reporting_filter.h)19
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc (renamed from src/core/ext/filters/load_reporting/load_reporting.c)32
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.h (renamed from src/core/ext/filters/load_reporting/load_reporting.h)15
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.cc (renamed from src/core/ext/filters/max_age/max_age_filter.c)91
-rw-r--r--src/core/ext/filters/max_age/max_age_filter.h8
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.cc (renamed from src/core/ext/filters/message_size/message_size_filter.c)10
-rw-r--r--src/core/ext/filters/message_size/message_size_filter.h8
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc (renamed from src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c)5
-rw-r--r--src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h8
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.cc (renamed from src/core/ext/filters/workarounds/workaround_utils.c)0
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.h8
-rw-r--r--src/core/ext/transport/chttp2/alpn/alpn.cc (renamed from src/core/ext/transport/chttp2/alpn/alpn.c)0
-rw-r--r--src/core/ext/transport/chttp2/alpn/alpn.h8
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.cc (renamed from src/core/ext/transport/chttp2/client/chttp2_connector.c)14
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.h8
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.cc (renamed from src/core/ext/transport/chttp2/client/insecure/channel_create.c)2
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc (renamed from src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c)2
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc (renamed from src/core/ext/transport/chttp2/client/secure/secure_channel_create.c)8
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.cc (renamed from src/core/ext/transport/chttp2/server/chttp2_server.c)43
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.h8
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc (renamed from src/core/ext/transport/chttp2/server/insecure/server_chttp2.c)0
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc (renamed from src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c)0
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc (renamed from src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c)6
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.cc (renamed from src/core/ext/transport/chttp2/transport/bin_decoder.c)0
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.cc (renamed from src/core/ext/transport/chttp2/transport/bin_encoder.c)0
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.cc (renamed from src/core/ext/transport/chttp2/transport/chttp2_plugin.c)5
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc (renamed from src/core/ext/transport/chttp2/transport/chttp2_transport.c)1087
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h9
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.c500
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.cc381
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.h336
-rw-r--r--src/core/ext/transport/chttp2/transport/frame.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.cc (renamed from src/core/ext/transport/chttp2/transport/frame_data.c)4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.cc (renamed from src/core/ext/transport/chttp2/transport/frame_goaway.c)4
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.cc (renamed from src/core/ext/transport/chttp2/transport/frame_ping.c)20
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.cc (renamed from src/core/ext/transport/chttp2/transport/frame_rst_stream.c)2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.cc (renamed from src/core/ext/transport/chttp2/transport/frame_settings.c)23
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.cc (renamed from src/core/ext/transport/chttp2/transport/frame_window_update.c)24
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.cc (renamed from src/core/ext/transport/chttp2/transport/hpack_encoder.c)345
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.cc (renamed from src/core/ext/transport/chttp2/transport/hpack_parser.c)66
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.cc (renamed from src/core/ext/transport/chttp2/transport/hpack_table.c)6
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/http2_settings.cc (renamed from src/core/ext/transport/chttp2/transport/http2_settings.c)0
-rw-r--r--src/core/ext/transport/chttp2/transport/http2_settings.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/huffsyms.cc (renamed from src/core/ext/transport/chttp2/transport/huffsyms.c)0
-rw-r--r--src/core/ext/transport/chttp2/transport/huffsyms.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.cc (renamed from src/core/ext/transport/chttp2/transport/incoming_metadata.c)9
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h10
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h288
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.cc (renamed from src/core/ext/transport/chttp2/transport/parsing.c)64
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.cc (renamed from src/core/ext/transport/chttp2/transport/stream_lists.c)53
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.cc (renamed from src/core/ext/transport/chttp2/transport/stream_map.c)10
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/varint.cc (renamed from src/core/ext/transport/chttp2/transport/varint.c)0
-rw-r--r--src/core/ext/transport/chttp2/transport/varint.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c497
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc639
-rw-r--r--src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc (renamed from src/core/ext/transport/cronet/client/secure/cronet_channel_create.c)2
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_api_dummy.cc (renamed from src/core/ext/transport/cronet/transport/cronet_api_dummy.c)0
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.cc (renamed from src/core/ext/transport/cronet/transport/cronet_transport.c)24
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.h8
-rw-r--r--src/core/ext/transport/inproc/inproc_plugin.cc (renamed from src/core/ext/transport/inproc/inproc_plugin.c)6
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.cc (renamed from src/core/ext/transport/inproc/inproc_transport.c)811
-rw-r--r--src/core/lib/backoff/backoff.cc (renamed from src/core/lib/support/backoff.c)33
-rw-r--r--src/core/lib/backoff/backoff.h (renamed from src/core/lib/support/backoff.h)42
-rw-r--r--src/core/lib/channel/channel_args.cc (renamed from src/core/lib/channel/channel_args.c)34
-rw-r--r--src/core/lib/channel/channel_args.h8
-rw-r--r--src/core/lib/channel/channel_stack.cc (renamed from src/core/lib/channel/channel_stack.c)16
-rw-r--r--src/core/lib/channel/channel_stack.h15
-rw-r--r--src/core/lib/channel/channel_stack_builder.cc (renamed from src/core/lib/channel/channel_stack_builder.c)51
-rw-r--r--src/core/lib/channel/channel_stack_builder.h10
-rw-r--r--src/core/lib/channel/connected_channel.cc (renamed from src/core/lib/channel/connected_channel.c)117
-rw-r--r--src/core/lib/channel/connected_channel.h8
-rw-r--r--src/core/lib/channel/handshaker.cc (renamed from src/core/lib/channel/handshaker.c)20
-rw-r--r--src/core/lib/channel/handshaker.h10
-rw-r--r--src/core/lib/channel/handshaker_factory.cc (renamed from src/core/lib/channel/handshaker_factory.c)0
-rw-r--r--src/core/lib/channel/handshaker_factory.h8
-rw-r--r--src/core/lib/channel/handshaker_registry.cc (renamed from src/core/lib/channel/handshaker_registry.c)2
-rw-r--r--src/core/lib/channel/handshaker_registry.h8
-rw-r--r--src/core/lib/compression/algorithm_metadata.h8
-rw-r--r--src/core/lib/compression/compression.cc (renamed from src/core/lib/compression/compression.c)4
-rw-r--r--src/core/lib/compression/message_compress.cc (renamed from src/core/lib/compression/message_compress.c)0
-rw-r--r--src/core/lib/compression/message_compress.h8
-rw-r--r--src/core/lib/compression/stream_compression.cc77
-rw-r--r--src/core/lib/compression/stream_compression.h40
-rw-r--r--src/core/lib/compression/stream_compression_gzip.cc (renamed from src/core/lib/compression/stream_compression.c)96
-rw-r--r--src/core/lib/compression/stream_compression_gzip.h (renamed from src/core/lib/iomgr/ev_epoll_thread_pool_linux.h)20
-rw-r--r--src/core/lib/compression/stream_compression_identity.cc93
-rw-r--r--src/core/lib/compression/stream_compression_identity.h35
-rw-r--r--src/core/lib/debug/stats.c67
-rw-r--r--src/core/lib/debug/stats.cc174
-rw-r--r--src/core/lib/debug/stats.h25
-rw-r--r--src/core/lib/debug/stats_data.cc707
-rw-r--r--src/core/lib/debug/stats_data.h476
-rw-r--r--src/core/lib/debug/stats_data.yaml291
-rw-r--r--src/core/lib/debug/stats_data_bq_schema.sql96
-rw-r--r--src/core/lib/debug/trace.cc (renamed from src/core/lib/debug/trace.c)7
-rw-r--r--src/core/lib/debug/trace.h10
-rw-r--r--src/core/lib/http/format_request.cc (renamed from src/core/lib/http/format_request.c)2
-rw-r--r--src/core/lib/http/format_request.h8
-rw-r--r--src/core/lib/http/httpcli.cc (renamed from src/core/lib/http/httpcli.c)28
-rw-r--r--src/core/lib/http/httpcli.h20
-rw-r--r--src/core/lib/http/httpcli_security_connector.cc (renamed from src/core/lib/http/httpcli_security_connector.c)29
-rw-r--r--src/core/lib/http/parser.cc (renamed from src/core/lib/http/parser.c)7
-rw-r--r--src/core/lib/http/parser.h8
-rw-r--r--src/core/lib/iomgr/block_annotate.h (renamed from src/core/lib/support/block_annotate.h)25
-rw-r--r--src/core/lib/iomgr/call_combiner.cc215
-rw-r--r--src/core/lib/iomgr/call_combiner.h129
-rw-r--r--src/core/lib/iomgr/closure.cc (renamed from src/core/lib/iomgr/closure.c)22
-rw-r--r--src/core/lib/iomgr/combiner.cc (renamed from src/core/lib/iomgr/combiner.c)17
-rw-r--r--src/core/lib/iomgr/combiner.h8
-rw-r--r--src/core/lib/iomgr/endpoint.cc (renamed from src/core/lib/iomgr/endpoint.c)6
-rw-r--r--src/core/lib/iomgr/endpoint.h19
-rw-r--r--src/core/lib/iomgr/endpoint_pair.h8
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.cc (renamed from src/core/lib/iomgr/endpoint_pair_posix.c)0
-rw-r--r--src/core/lib/iomgr/endpoint_pair_uv.cc (renamed from src/core/lib/iomgr/endpoint_pair_uv.c)0
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.cc (renamed from src/core/lib/iomgr/endpoint_pair_windows.c)0
-rw-r--r--src/core/lib/iomgr/error.cc (renamed from src/core/lib/iomgr/error.c)52
-rw-r--r--src/core/lib/iomgr/error.h2
-rw-r--r--src/core/lib/iomgr/error_internal.h8
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc (renamed from src/core/lib/iomgr/ev_epoll1_linux.c)375
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.h8
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c1961
-rw-r--r--src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h28
-rw-r--r--src/core/lib/iomgr/ev_epoll_thread_pool_linux.c1184
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.c1450
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc1483
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.h8
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc (renamed from src/core/lib/iomgr/ev_epollsig_linux.c)124
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.h8
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc (renamed from src/core/lib/iomgr/ev_poll_posix.c)215
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.h8
-rw-r--r--src/core/lib/iomgr/ev_posix.cc (renamed from src/core/lib/iomgr/ev_posix.c)56
-rw-r--r--src/core/lib/iomgr/ev_posix.h14
-rw-r--r--src/core/lib/iomgr/ev_windows.cc (renamed from src/core/lib/iomgr/ev_windows.c)0
-rw-r--r--src/core/lib/iomgr/exec_ctx.cc (renamed from src/core/lib/iomgr/exec_ctx.c)71
-rw-r--r--src/core/lib/iomgr/exec_ctx.h26
-rw-r--r--src/core/lib/iomgr/executor.c195
-rw-r--r--src/core/lib/iomgr/executor.cc302
-rw-r--r--src/core/lib/iomgr/executor.h15
-rw-r--r--src/core/lib/iomgr/gethostname.h8
-rw-r--r--src/core/lib/iomgr/gethostname_fallback.cc (renamed from src/core/lib/iomgr/gethostname_fallback.c)1
-rw-r--r--src/core/lib/iomgr/gethostname_host_name_max.cc (renamed from src/core/lib/iomgr/gethostname_host_name_max.c)1
-rw-r--r--src/core/lib/iomgr/gethostname_sysconf.cc (renamed from src/core/lib/iomgr/gethostname_sysconf.c)1
-rw-r--r--src/core/lib/iomgr/iocp_windows.cc (renamed from src/core/lib/iomgr/iocp_windows.c)36
-rw-r--r--src/core/lib/iomgr/iocp_windows.h10
-rw-r--r--src/core/lib/iomgr/iomgr.cc (renamed from src/core/lib/iomgr/iomgr.c)20
-rw-r--r--src/core/lib/iomgr/iomgr.h8
-rw-r--r--src/core/lib/iomgr/iomgr_internal.h8
-rw-r--r--src/core/lib/iomgr/iomgr_posix.cc (renamed from src/core/lib/iomgr/iomgr_posix.c)0
-rw-r--r--src/core/lib/iomgr/iomgr_uv.cc (renamed from src/core/lib/iomgr/iomgr_uv.c)0
-rw-r--r--src/core/lib/iomgr/iomgr_uv.h8
-rw-r--r--src/core/lib/iomgr/iomgr_windows.cc (renamed from src/core/lib/iomgr/iomgr_windows.c)0
-rw-r--r--src/core/lib/iomgr/is_epollexclusive_available.cc (renamed from src/core/lib/iomgr/is_epollexclusive_available.c)12
-rw-r--r--src/core/lib/iomgr/is_epollexclusive_available.h8
-rw-r--r--src/core/lib/iomgr/load_file.cc (renamed from src/core/lib/iomgr/load_file.c)7
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc (renamed from src/core/lib/iomgr/lockfree_event.c)0
-rw-r--r--src/core/lib/iomgr/lockfree_event.h8
-rw-r--r--src/core/lib/iomgr/network_status_tracker.cc (renamed from src/core/lib/iomgr/network_status_tracker.c)1
-rw-r--r--src/core/lib/iomgr/network_status_tracker.h8
-rw-r--r--src/core/lib/iomgr/polling_entity.cc (renamed from src/core/lib/iomgr/polling_entity.c)18
-rw-r--r--src/core/lib/iomgr/polling_entity.h16
-rw-r--r--src/core/lib/iomgr/pollset.h14
-rw-r--r--src/core/lib/iomgr/pollset_set.h8
-rw-r--r--src/core/lib/iomgr/pollset_set_uv.cc (renamed from src/core/lib/iomgr/pollset_set_uv.c)0
-rw-r--r--src/core/lib/iomgr/pollset_set_windows.cc (renamed from src/core/lib/iomgr/pollset_set_windows.c)0
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc (renamed from src/core/lib/iomgr/pollset_uv.c)11
-rw-r--r--src/core/lib/iomgr/pollset_uv.h8
-rw-r--r--src/core/lib/iomgr/pollset_windows.cc (renamed from src/core/lib/iomgr/pollset_windows.c)13
-rw-r--r--src/core/lib/iomgr/pollset_windows.h8
-rw-r--r--src/core/lib/iomgr/port.h10
-rw-r--r--src/core/lib/iomgr/resolve_address.h8
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc (renamed from src/core/lib/iomgr/resolve_address_posix.c)21
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.cc (renamed from src/core/lib/iomgr/resolve_address_uv.c)25
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.cc (renamed from src/core/lib/iomgr/resolve_address_windows.c)18
-rw-r--r--src/core/lib/iomgr/resource_quota.cc (renamed from src/core/lib/iomgr/resource_quota.c)78
-rw-r--r--src/core/lib/iomgr/resource_quota.h8
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.cc (renamed from src/core/lib/iomgr/sockaddr_utils.c)1
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.h8
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.cc (renamed from src/core/lib/iomgr/socket_factory_posix.c)8
-rw-r--r--src/core/lib/iomgr/socket_mutator.cc (renamed from src/core/lib/iomgr/socket_mutator.c)8
-rw-r--r--src/core/lib/iomgr/socket_utils.h8
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.cc (renamed from src/core/lib/iomgr/socket_utils_common_posix.c)0
-rw-r--r--src/core/lib/iomgr/socket_utils_linux.cc (renamed from src/core/lib/iomgr/socket_utils_linux.c)0
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.cc (renamed from src/core/lib/iomgr/socket_utils_posix.c)0
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h8
-rw-r--r--src/core/lib/iomgr/socket_utils_uv.cc (renamed from src/core/lib/iomgr/socket_utils_uv.c)0
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.cc (renamed from src/core/lib/iomgr/socket_utils_windows.c)4
-rw-r--r--src/core/lib/iomgr/socket_windows.cc (renamed from src/core/lib/iomgr/socket_windows.c)2
-rw-r--r--src/core/lib/iomgr/socket_windows.h8
-rw-r--r--src/core/lib/iomgr/tcp_client.h10
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc (renamed from src/core/lib/iomgr/tcp_client_posix.c)22
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.h8
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.cc (renamed from src/core/lib/iomgr/tcp_client_uv.c)24
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.cc (renamed from src/core/lib/iomgr/tcp_client_windows.c)20
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc (renamed from src/core/lib/iomgr/tcp_posix.c)210
-rw-r--r--src/core/lib/iomgr/tcp_posix.h8
-rw-r--r--src/core/lib/iomgr/tcp_server.h8
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc (renamed from src/core/lib/iomgr/tcp_server_posix.c)15
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix.h8
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.cc (renamed from src/core/lib/iomgr/tcp_server_utils_posix_common.c)2
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc (renamed from src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c)0
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc (renamed from src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c)0
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.cc (renamed from src/core/lib/iomgr/tcp_server_uv.c)20
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.cc (renamed from src/core/lib/iomgr/tcp_server_windows.c)18
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc (renamed from src/core/lib/iomgr/tcp_uv.c)33
-rw-r--r--src/core/lib/iomgr/tcp_uv.h8
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc (renamed from src/core/lib/iomgr/tcp_windows.c)26
-rw-r--r--src/core/lib/iomgr/tcp_windows.h10
-rw-r--r--src/core/lib/iomgr/time_averaged_stats.cc (renamed from src/core/lib/iomgr/time_averaged_stats.c)0
-rw-r--r--src/core/lib/iomgr/time_averaged_stats.h8
-rw-r--r--src/core/lib/iomgr/timer.h19
-rw-r--r--src/core/lib/iomgr/timer_generic.cc (renamed from src/core/lib/iomgr/timer_generic.c)269
-rw-r--r--src/core/lib/iomgr/timer_generic.h3
-rw-r--r--src/core/lib/iomgr/timer_heap.cc (renamed from src/core/lib/iomgr/timer_heap.c)8
-rw-r--r--src/core/lib/iomgr/timer_heap.h8
-rw-r--r--src/core/lib/iomgr/timer_manager.cc (renamed from src/core/lib/iomgr/timer_manager.c)58
-rw-r--r--src/core/lib/iomgr/timer_manager.h8
-rw-r--r--src/core/lib/iomgr/timer_uv.cc (renamed from src/core/lib/iomgr/timer_uv.c)17
-rw-r--r--src/core/lib/iomgr/udp_server.cc (renamed from src/core/lib/iomgr/udp_server.c)18
-rw-r--r--src/core/lib/iomgr/udp_server.h8
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.cc (renamed from src/core/lib/iomgr/unix_sockets_posix.c)6
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.h8
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix_noop.cc (renamed from src/core/lib/iomgr/unix_sockets_posix_noop.c)0
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.cc (renamed from src/core/lib/iomgr/wakeup_fd_cv.c)17
-rw-r--r--src/core/lib/iomgr/wakeup_fd_cv.h14
-rw-r--r--src/core/lib/iomgr/wakeup_fd_eventfd.cc (renamed from src/core/lib/iomgr/wakeup_fd_eventfd.c)0
-rw-r--r--src/core/lib/iomgr/wakeup_fd_nospecial.cc (renamed from src/core/lib/iomgr/wakeup_fd_nospecial.c)0
-rw-r--r--src/core/lib/iomgr/wakeup_fd_pipe.cc (renamed from src/core/lib/iomgr/wakeup_fd_pipe.c)1
-rw-r--r--src/core/lib/iomgr/wakeup_fd_pipe.h10
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.cc (renamed from src/core/lib/iomgr/wakeup_fd_posix.c)1
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.h8
-rw-r--r--src/core/lib/json/json.cc (renamed from src/core/lib/json/json.c)2
-rw-r--r--src/core/lib/json/json.h8
-rw-r--r--src/core/lib/json/json_reader.cc (renamed from src/core/lib/json/json_reader.c)0
-rw-r--r--src/core/lib/json/json_reader.h8
-rw-r--r--src/core/lib/json/json_string.cc (renamed from src/core/lib/json/json_string.c)26
-rw-r--r--src/core/lib/json/json_writer.cc (renamed from src/core/lib/json/json_writer.c)0
-rw-r--r--src/core/lib/json/json_writer.h8
-rw-r--r--src/core/lib/profiling/basic_timers.cc (renamed from src/core/lib/profiling/basic_timers.c)12
-rw-r--r--src/core/lib/profiling/stap_timers.cc (renamed from src/core/lib/profiling/stap_timers.c)0
-rw-r--r--src/core/lib/security/context/security_context.cc (renamed from src/core/lib/security/context/security_context.c)26
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.cc (renamed from src/core/lib/security/credentials/composite/composite_credentials.c)26
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.h8
-rw-r--r--src/core/lib/security/credentials/credentials.cc (renamed from src/core/lib/security/credentials/credentials.c)22
-rw-r--r--src/core/lib/security/credentials/credentials.h8
-rw-r--r--src/core/lib/security/credentials/credentials_metadata.cc (renamed from src/core/lib/security/credentials/credentials_metadata.c)3
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.cc (renamed from src/core/lib/security/credentials/fake/fake_credentials.c)18
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.h8
-rw-r--r--src/core/lib/security/credentials/google_default/credentials_generic.cc (renamed from src/core/lib/security/credentials/google_default/credentials_generic.c)0
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.cc (renamed from src/core/lib/security/credentials/google_default/google_default_credentials.c)19
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h8
-rw-r--r--src/core/lib/security/credentials/iam/iam_credentials.cc (renamed from src/core/lib/security/credentials/iam/iam_credentials.c)3
-rw-r--r--src/core/lib/security/credentials/jwt/json_token.cc (renamed from src/core/lib/security/credentials/jwt/json_token.c)9
-rw-r--r--src/core/lib/security/credentials/jwt/json_token.h8
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.cc (renamed from src/core/lib/security/credentials/jwt/jwt_credentials.c)6
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.h8
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.cc (renamed from src/core/lib/security/credentials/jwt/jwt_verifier.c)43
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.h10
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.cc (renamed from src/core/lib/security/credentials/oauth2/oauth2_credentials.c)62
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.h14
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.cc (renamed from src/core/lib/security/credentials/plugin/plugin_credentials.c)185
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.h2
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.c194
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.cc343
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.h27
-rw-r--r--src/core/lib/security/transport/auth_filters.h8
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc (renamed from src/core/lib/security/transport/client_auth_filter.c)235
-rw-r--r--src/core/lib/security/transport/lb_targets_info.cc (renamed from src/core/lib/security/transport/lb_targets_info.c)13
-rw-r--r--src/core/lib/security/transport/lb_targets_info.h8
-rw-r--r--src/core/lib/security/transport/secure_endpoint.cc (renamed from src/core/lib/security/transport/secure_endpoint.c)208
-rw-r--r--src/core/lib/security/transport/secure_endpoint.h19
-rw-r--r--src/core/lib/security/transport/security_connector.cc (renamed from src/core/lib/security/transport/security_connector.c)415
-rw-r--r--src/core/lib/security/transport/security_connector.h42
-rw-r--r--src/core/lib/security/transport/security_handshaker.cc (renamed from src/core/lib/security/transport/security_handshaker.c)69
-rw-r--r--src/core/lib/security/transport/security_handshaker.h8
-rw-r--r--src/core/lib/security/transport/server_auth_filter.cc (renamed from src/core/lib/security/transport/server_auth_filter.c)115
-rw-r--r--src/core/lib/security/transport/tsi_error.cc (renamed from src/core/lib/security/transport/tsi_error.c)0
-rw-r--r--src/core/lib/security/transport/tsi_error.h8
-rw-r--r--src/core/lib/security/util/json_util.cc (renamed from src/core/lib/security/util/json_util.c)0
-rw-r--r--src/core/lib/security/util/json_util.h8
-rw-r--r--src/core/lib/slice/b64.cc (renamed from src/core/lib/slice/b64.c)4
-rw-r--r--src/core/lib/slice/b64.h8
-rw-r--r--src/core/lib/slice/percent_encoding.cc (renamed from src/core/lib/slice/percent_encoding.c)0
-rw-r--r--src/core/lib/slice/percent_encoding.h8
-rw-r--r--src/core/lib/slice/slice.cc (renamed from src/core/lib/slice/slice.c)30
-rw-r--r--src/core/lib/slice/slice_buffer.cc (renamed from src/core/lib/slice/slice_buffer.c)9
-rw-r--r--src/core/lib/slice/slice_hash_table.cc (renamed from src/core/lib/slice/slice_hash_table.c)5
-rw-r--r--src/core/lib/slice/slice_hash_table.h8
-rw-r--r--src/core/lib/slice/slice_intern.cc (renamed from src/core/lib/slice/slice_intern.c)14
-rw-r--r--src/core/lib/slice/slice_internal.h8
-rw-r--r--src/core/lib/slice/slice_string_helpers.cc (renamed from src/core/lib/slice/slice_string_helpers.c)0
-rw-r--r--src/core/lib/slice/slice_traits.h8
-rw-r--r--src/core/lib/support/alloc.cc (renamed from src/core/lib/support/alloc.c)0
-rw-r--r--src/core/lib/support/arena.cc (renamed from src/core/lib/support/arena.c)0
-rw-r--r--src/core/lib/support/arena.h8
-rw-r--r--src/core/lib/support/atm.cc (renamed from src/core/lib/support/atm.c)0
-rw-r--r--src/core/lib/support/avl.cc (renamed from src/core/lib/support/avl.c)0
-rw-r--r--src/core/lib/support/cmdline.cc (renamed from src/core/lib/support/cmdline.c)0
-rw-r--r--src/core/lib/support/cpu_iphone.cc (renamed from src/core/lib/support/cpu_iphone.c)2
-rw-r--r--src/core/lib/support/cpu_linux.cc (renamed from src/core/lib/support/cpu_linux.c)0
-rw-r--r--src/core/lib/support/cpu_posix.cc (renamed from src/core/lib/support/cpu_posix.c)1
-rw-r--r--src/core/lib/support/cpu_windows.cc (renamed from src/core/lib/support/cpu_windows.c)1
-rw-r--r--src/core/lib/support/env_linux.cc (renamed from src/core/lib/support/env_linux.c)0
-rw-r--r--src/core/lib/support/env_posix.cc (renamed from src/core/lib/support/env_posix.c)0
-rw-r--r--src/core/lib/support/env_windows.cc (renamed from src/core/lib/support/env_windows.c)7
-rw-r--r--src/core/lib/support/histogram.cc (renamed from src/core/lib/support/histogram.c)0
-rw-r--r--src/core/lib/support/host_port.cc (renamed from src/core/lib/support/host_port.c)0
-rw-r--r--src/core/lib/support/log.cc (renamed from src/core/lib/support/log.c)2
-rw-r--r--src/core/lib/support/log_android.cc (renamed from src/core/lib/support/log_android.c)8
-rw-r--r--src/core/lib/support/log_linux.cc (renamed from src/core/lib/support/log_linux.c)4
-rw-r--r--src/core/lib/support/log_posix.cc (renamed from src/core/lib/support/log_posix.c)6
-rw-r--r--src/core/lib/support/log_windows.cc (renamed from src/core/lib/support/log_windows.c)6
-rw-r--r--src/core/lib/support/manual_constructor.h76
-rw-r--r--src/core/lib/support/memory.h41
-rw-r--r--src/core/lib/support/mpscq.cc (renamed from src/core/lib/support/mpscq.c)0
-rw-r--r--src/core/lib/support/mpscq.h8
-rw-r--r--src/core/lib/support/murmur_hash.cc (renamed from src/core/lib/support/murmur_hash.c)0
-rw-r--r--src/core/lib/support/murmur_hash.h8
-rw-r--r--src/core/lib/support/spinlock.h5
-rw-r--r--src/core/lib/support/stack_lockfree.cc (renamed from src/core/lib/support/stack_lockfree.c)0
-rw-r--r--src/core/lib/support/stack_lockfree.h8
-rw-r--r--src/core/lib/support/string.cc (renamed from src/core/lib/support/string.c)17
-rw-r--r--src/core/lib/support/string.h3
-rw-r--r--src/core/lib/support/string_posix.cc (renamed from src/core/lib/support/string_posix.c)1
-rw-r--r--src/core/lib/support/string_util_windows.cc (renamed from src/core/lib/support/string_util_windows.c)11
-rw-r--r--src/core/lib/support/string_windows.cc (renamed from src/core/lib/support/string_windows.c)3
-rw-r--r--src/core/lib/support/string_windows.h8
-rw-r--r--src/core/lib/support/subprocess_posix.cc (renamed from src/core/lib/support/subprocess_posix.c)0
-rw-r--r--src/core/lib/support/subprocess_windows.cc (renamed from src/core/lib/support/subprocess_windows.c)2
-rw-r--r--src/core/lib/support/sync.cc (renamed from src/core/lib/support/sync.c)0
-rw-r--r--src/core/lib/support/sync_posix.cc (renamed from src/core/lib/support/sync_posix.c)0
-rw-r--r--src/core/lib/support/sync_windows.cc (renamed from src/core/lib/support/sync_windows.c)2
-rw-r--r--src/core/lib/support/thd.cc (renamed from src/core/lib/support/thd.c)0
-rw-r--r--src/core/lib/support/thd_posix.cc (renamed from src/core/lib/support/thd_posix.c)0
-rw-r--r--src/core/lib/support/thd_windows.cc (renamed from src/core/lib/support/thd_windows.c)2
-rw-r--r--src/core/lib/support/time.cc (renamed from src/core/lib/support/time.c)0
-rw-r--r--src/core/lib/support/time_posix.cc (renamed from src/core/lib/support/time_posix.c)7
-rw-r--r--src/core/lib/support/time_precise.cc (renamed from src/core/lib/support/time_precise.c)2
-rw-r--r--src/core/lib/support/time_precise.h8
-rw-r--r--src/core/lib/support/time_windows.cc (renamed from src/core/lib/support/time_windows.c)5
-rw-r--r--src/core/lib/support/tls_pthread.cc (renamed from src/core/lib/support/tls_pthread.c)0
-rw-r--r--src/core/lib/support/tmpfile_msys.cc (renamed from src/core/lib/support/tmpfile_msys.c)0
-rw-r--r--src/core/lib/support/tmpfile_posix.cc (renamed from src/core/lib/support/tmpfile_posix.c)0
-rw-r--r--src/core/lib/support/tmpfile_windows.cc (renamed from src/core/lib/support/tmpfile_windows.c)0
-rw-r--r--src/core/lib/support/vector.h (renamed from src/core/lib/debug/stats_data.c)23
-rw-r--r--src/core/lib/support/wrap_memcpy.cc (renamed from src/core/lib/support/wrap_memcpy.c)2
-rw-r--r--src/core/lib/surface/alarm.cc (renamed from src/core/lib/surface/alarm.c)44
-rw-r--r--src/core/lib/surface/alarm_internal.h8
-rw-r--r--src/core/lib/surface/api_trace.cc (renamed from src/core/lib/surface/api_trace.c)0
-rw-r--r--src/core/lib/surface/api_trace.h8
-rw-r--r--src/core/lib/surface/byte_buffer.cc (renamed from src/core/lib/surface/byte_buffer.c)6
-rw-r--r--src/core/lib/surface/byte_buffer_reader.cc (renamed from src/core/lib/surface/byte_buffer_reader.c)0
-rw-r--r--src/core/lib/surface/call.cc (renamed from src/core/lib/surface/call.c)373
-rw-r--r--src/core/lib/surface/call.h4
-rw-r--r--src/core/lib/surface/call_details.cc (renamed from src/core/lib/surface/call_details.c)0
-rw-r--r--src/core/lib/surface/call_log_batch.cc (renamed from src/core/lib/surface/call_log_batch.c)2
-rw-r--r--src/core/lib/surface/channel.cc (renamed from src/core/lib/surface/channel.c)44
-rw-r--r--src/core/lib/surface/channel.h10
-rw-r--r--src/core/lib/surface/channel_init.cc (renamed from src/core/lib/surface/channel_init.c)12
-rw-r--r--src/core/lib/surface/channel_ping.cc (renamed from src/core/lib/surface/channel_ping.c)4
-rw-r--r--src/core/lib/surface/channel_stack_type.cc (renamed from src/core/lib/surface/channel_stack_type.c)0
-rw-r--r--src/core/lib/surface/channel_stack_type.h8
-rw-r--r--src/core/lib/surface/completion_queue.cc (renamed from src/core/lib/surface/completion_queue.c)333
-rw-r--r--src/core/lib/surface/completion_queue.h3
-rw-r--r--src/core/lib/surface/completion_queue_factory.cc (renamed from src/core/lib/surface/completion_queue_factory.c)0
-rw-r--r--src/core/lib/surface/completion_queue_factory.h8
-rw-r--r--src/core/lib/surface/event_string.cc (renamed from src/core/lib/surface/event_string.c)0
-rw-r--r--src/core/lib/surface/event_string.h8
-rw-r--r--src/core/lib/surface/init.cc (renamed from src/core/lib/surface/init.c)12
-rw-r--r--src/core/lib/surface/init.h8
-rw-r--r--src/core/lib/surface/init_secure.cc (renamed from src/core/lib/surface/init_secure.c)6
-rw-r--r--src/core/lib/surface/init_unsecure.cc (renamed from src/core/lib/surface/init_unsecure.c)0
-rw-r--r--src/core/lib/surface/lame_client.cc21
-rw-r--r--src/core/lib/surface/lame_client.h8
-rw-r--r--src/core/lib/surface/metadata_array.cc (renamed from src/core/lib/surface/metadata_array.c)0
-rw-r--r--src/core/lib/surface/server.cc (renamed from src/core/lib/surface/server.c)169
-rw-r--r--src/core/lib/surface/server.h8
-rw-r--r--src/core/lib/surface/validate_metadata.cc (renamed from src/core/lib/surface/validate_metadata.c)1
-rw-r--r--src/core/lib/surface/validate_metadata.h8
-rw-r--r--src/core/lib/surface/version.cc (renamed from src/core/lib/surface/version.c)4
-rw-r--r--src/core/lib/transport/bdp_estimator.c110
-rw-r--r--src/core/lib/transport/bdp_estimator.cc84
-rw-r--r--src/core/lib/transport/bdp_estimator.h105
-rw-r--r--src/core/lib/transport/byte_stream.cc (renamed from src/core/lib/transport/byte_stream.c)0
-rw-r--r--src/core/lib/transport/byte_stream.h8
-rw-r--r--src/core/lib/transport/connectivity_state.cc (renamed from src/core/lib/transport/connectivity_state.c)6
-rw-r--r--src/core/lib/transport/connectivity_state.h8
-rw-r--r--src/core/lib/transport/error_utils.cc (renamed from src/core/lib/transport/error_utils.c)9
-rw-r--r--src/core/lib/transport/error_utils.h14
-rw-r--r--src/core/lib/transport/metadata.cc (renamed from src/core/lib/transport/metadata.c)19
-rw-r--r--src/core/lib/transport/metadata.h8
-rw-r--r--src/core/lib/transport/metadata_batch.cc (renamed from src/core/lib/transport/metadata_batch.c)38
-rw-r--r--src/core/lib/transport/metadata_batch.h11
-rw-r--r--src/core/lib/transport/pid_controller.c63
-rw-r--r--src/core/lib/transport/pid_controller.cc48
-rw-r--r--src/core/lib/transport/pid_controller.h116
-rw-r--r--src/core/lib/transport/service_config.cc (renamed from src/core/lib/transport/service_config.c)8
-rw-r--r--src/core/lib/transport/service_config.h8
-rw-r--r--src/core/lib/transport/static_metadata.c854
-rw-r--r--src/core/lib/transport/static_metadata.cc582
-rw-r--r--src/core/lib/transport/static_metadata.h7
-rw-r--r--src/core/lib/transport/status_conversion.cc (renamed from src/core/lib/transport/status_conversion.c)9
-rw-r--r--src/core/lib/transport/status_conversion.h14
-rw-r--r--src/core/lib/transport/timeout_encoding.cc (renamed from src/core/lib/transport/timeout_encoding.c)75
-rw-r--r--src/core/lib/transport/timeout_encoding.h13
-rw-r--r--src/core/lib/transport/transport.cc (renamed from src/core/lib/transport/transport.c)49
-rw-r--r--src/core/lib/transport/transport.h13
-rw-r--r--src/core/lib/transport/transport_impl.h11
-rw-r--r--src/core/lib/transport/transport_op_string.cc (renamed from src/core/lib/transport/transport_op_string.c)17
-rw-r--r--src/core/plugin_registry/grpc_cronet_plugin_registry.cc (renamed from src/core/plugin_registry/grpc_cronet_plugin_registry.c)28
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.cc (renamed from src/core/plugin_registry/grpc_plugin_registry.c)76
-rw-r--r--src/core/plugin_registry/grpc_unsecure_plugin_registry.cc (renamed from src/core/plugin_registry/grpc_unsecure_plugin_registry.c)72
-rw-r--r--src/core/tsi/fake_transport_security.cc (renamed from src/core/tsi/fake_transport_security.c)170
-rw-r--r--src/core/tsi/fake_transport_security.h5
-rw-r--r--src/core/tsi/gts_transport_security.cc (renamed from src/core/tsi/gts_transport_security.c)4
-rw-r--r--src/core/tsi/gts_transport_security.h8
-rw-r--r--src/core/tsi/ssl_transport_security.cc (renamed from src/core/tsi/ssl_transport_security.c)153
-rw-r--r--src/core/tsi/ssl_transport_security.h37
-rw-r--r--src/core/tsi/ssl_types.h8
-rw-r--r--src/core/tsi/transport_security.cc (renamed from src/core/tsi/transport_security.c)5
-rw-r--r--src/core/tsi/transport_security.h10
-rw-r--r--src/core/tsi/transport_security_adapter.cc (renamed from src/core/tsi/transport_security_adapter.c)14
-rw-r--r--src/core/tsi/transport_security_grpc.cc (renamed from src/core/tsi/transport_security_grpc.c)33
-rw-r--r--src/core/tsi/transport_security_grpc.h22
-rw-r--r--src/cpp/client/channel_cc.cc22
-rw-r--r--src/cpp/client/client_context.cc2
-rw-r--r--src/cpp/client/create_channel.cc3
-rw-r--r--src/cpp/client/generic_stub.cc33
-rw-r--r--src/cpp/client/secure_credentials.cc80
-rw-r--r--src/cpp/client/secure_credentials.h17
-rw-r--r--src/cpp/common/channel_arguments.cc4
-rw-r--r--src/cpp/common/channel_filter.cc4
-rw-r--r--src/cpp/common/channel_filter.h11
-rw-r--r--src/cpp/common/completion_queue_cc.cc27
-rw-r--r--src/cpp/common/version_cc.cc2
-rw-r--r--src/cpp/server/health/default_health_check_service.cc10
-rw-r--r--src/cpp/server/health/default_health_check_service.h2
-rw-r--r--src/cpp/server/server_cc.cc74
-rw-r--r--src/cpp/server/server_context.cc10
-rw-r--r--src/cpp/util/byte_buffer_cc.cc31
-rw-r--r--src/cpp/util/core_stats.cc90
-rw-r--r--src/cpp/util/core_stats.h (renamed from src/core/ext/census/trace_status.h)23
-rw-r--r--src/cpp/util/error_details.cc3
-rw-r--r--src/cpp/util/slice_cc.cc2
-rwxr-xr-xsrc/csharp/Grpc.Auth/Grpc.Auth.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj3
-rw-r--r--src/csharp/Grpc.Core/AsyncClientStreamingCall.cs16
-rw-r--r--src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs16
-rw-r--r--src/csharp/Grpc.Core/AsyncServerStreamingCall.cs14
-rw-r--r--src/csharp/Grpc.Core/AsyncUnaryCall.cs15
-rwxr-xr-xsrc/csharp/Grpc.Core/Grpc.Core.csproj3
-rw-r--r--src/csharp/Grpc.Core/Internal/AsyncCall.cs6
-rw-r--r--src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs9
-rwxr-xr-xsrc/csharp/Grpc.Core/Version.csproj.include2
-rw-r--r--src/csharp/Grpc.Core/VersionInfo.cs4
-rwxr-xr-xsrc/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Examples/Grpc.Examples.csproj1
-rwxr-xr-xsrc/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj2
-rwxr-xr-xsrc/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj1
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj6
-rw-r--r--src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs59
-rw-r--r--src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Reflection/Grpc.Reflection.csproj1
-rwxr-xr-xsrc/csharp/build_packages_dotnetcli.bat2
-rwxr-xr-xsrc/csharp/build_packages_dotnetcli.sh4
-rw-r--r--src/csharp/doc/.gitignore2
-rw-r--r--src/csharp/doc/README.md9
-rw-r--r--src/csharp/doc/docfx.json37
-rw-r--r--src/csharp/doc/grpc_csharp_public.shfbproj83
-rw-r--r--src/csharp/doc/toc.yml3
-rw-r--r--src/csharp/ext/grpc_csharp_ext.c8
-rw-r--r--src/node/ext/call_credentials.cc10
-rw-r--r--src/node/ext/call_credentials.h8
-rw-r--r--src/node/health_check/package.json4
-rw-r--r--src/node/tools/package.json2
-rw-r--r--src/objective-c/!ProtoCompiler-gRPCPlugin.podspec2
-rw-r--r--src/objective-c/BoringSSL.podspec6
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.h7
-rw-r--r--src/objective-c/GRPCClient/GRPCCall.m3
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.h1
-rw-r--r--src/objective-c/GRPCClient/private/GRPCChannel.m12
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.h1
-rw-r--r--src/objective-c/GRPCClient/private/GRPCHost.m6
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.h3
-rw-r--r--src/objective-c/GRPCClient/private/GRPCWrappedCall.m10
-rw-r--r--src/objective-c/GRPCClient/private/version.h2
-rw-r--r--src/objective-c/tests/GRPCClientTests.m68
-rw-r--r--src/objective-c/tests/RemoteTestClient/RemoteTest.podspec2
-rw-r--r--src/objective-c/tests/Tests.xcodeproj/project.pbxproj83
-rwxr-xr-xsrc/objective-c/tests/run_tests.sh14
-rw-r--r--src/objective-c/tests/version.h (renamed from src/core/ext/census/census_init.c)20
-rw-r--r--src/php/composer.json2
-rw-r--r--src/php/ext/grpc/call_credentials.c46
-rwxr-xr-xsrc/php/ext/grpc/call_credentials.h9
-rw-r--r--src/php/ext/grpc/server.c4
-rw-r--r--src/php/ext/grpc/version.h2
-rw-r--r--src/php/lib/Grpc/BaseStub.php8
-rw-r--r--src/php/tests/qps/client.php56
-rw-r--r--src/php/tests/qps/composer.json3
-rw-r--r--src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Core/Stats.php33
-rw-r--r--src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Control.php215
-rw-r--r--src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/ProxyService.php21
-rw-r--r--src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Services.php53
-rw-r--r--src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Stats.php47
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Core/Bucket.php75
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Core/Histogram.php49
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Core/Metric.php102
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Core/Stats.php49
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/BenchmarkServiceClient.php68
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/BoolValue.php21
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ByteBufferParams.php24
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ChannelArg.php34
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ClientArgs.php27
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ClientConfig.php288
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ClientStats.php147
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ClientStatus.php15
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ClientType.php12
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ClosedLoopParams.php4
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ComplexProtoParams.php4
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/CoreRequest.php2
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/CoreResponse.php19
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/EchoStatus.php26
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/HistogramData.php76
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/HistogramParams.php38
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/LoadParams.php27
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/Mark.php21
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/Payload.php38
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/PayloadConfig.php38
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/PayloadType.php8
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/PoissonParams.php21
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ProxyClientServiceClient.php34
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ProxyStat.php13
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ReconnectInfo.php32
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ReconnectParams.php15
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ReportQpsScenarioServiceClient.php50
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/RequestResultCount.php24
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ResponseParameters.php57
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/RpcType.php18
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/Scenario.php144
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResult.php193
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResultSummary.php352
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/Scenarios.php21
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/SecurityParams.php52
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ServerArgs.php27
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ServerConfig.php211
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ServerStats.php151
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ServerStatus.php49
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/ServerType.php12
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/SimpleProtoParams.php24
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/SimpleRequest.php148
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/SimpleResponse.php57
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallRequest.php42
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallResponse.php21
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallRequest.php82
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallResponse.php23
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/Void.php2
-rw-r--r--src/php/tests/qps/generated_code/Grpc/Testing/WorkerServiceClient.php40
-rw-r--r--src/php/tests/qps/histogram.php93
-rw-r--r--src/proto/grpc/core/BUILD (renamed from src/python/grpcio_tests/tests/unit/_sanity/__init__.py)13
-rw-r--r--src/proto/grpc/core/stats.proto38
-rw-r--r--src/proto/grpc/testing/BUILD3
-rw-r--r--src/proto/grpc/testing/echo_messages.proto1
-rw-r--r--src/proto/grpc/testing/proxy-service.proto2
-rw-r--r--src/proto/grpc/testing/stats.proto8
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi11
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi10
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi50
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi14
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi13
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi134
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi2
-rw-r--r--src/python/grpcio/grpc/_grpcio_metadata.py2
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py591
-rw-r--r--src/python/grpcio/grpc_version.py2
-rw-r--r--src/python/grpcio/support.py10
-rw-r--r--src/python/grpcio_health_checking/grpc_version.py2
-rw-r--r--src/python/grpcio_health_checking/setup.py2
-rw-r--r--src/python/grpcio_reflection/grpc_version.py2
-rw-r--r--src/python/grpcio_reflection/setup.py2
-rw-r--r--src/python/grpcio_testing/grpc_testing/__init__.py2
-rw-r--r--src/python/grpcio_testing/grpc_version.py4
-rw-r--r--src/python/grpcio_tests/commands.py49
-rw-r--r--src/python/grpcio_tests/grpc_version.py2
-rw-r--r--src/python/grpcio_tests/tests/_sanity/__init__.py (renamed from src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py)0
-rw-r--r--src/python/grpcio_tests/tests/_sanity/_sanity_test.py (renamed from src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py)17
-rw-r--r--src/python/grpcio_tests/tests/http2/negative_http2_client.py4
-rw-r--r--src/python/grpcio_tests/tests/interop/client.py6
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py23
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py519
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py341
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/sub/messages.proto (renamed from src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/messages.proto)0
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py13
-rw-r--r--src/python/grpcio_tests/tests/qps/benchmark_client.py4
-rw-r--r--src/python/grpcio_tests/tests/qps/benchmark_server.py6
-rw-r--r--src/python/grpcio_tests/tests/stress/client.py4
-rw-r--r--src/python/grpcio_tests/tests/stress/metrics_server.py3
-rw-r--r--src/python/grpcio_tests/tests/tests.json16
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/_common.py118
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py131
-rw-r--r--src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py126
-rw-r--r--src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py2
-rwxr-xr-xsrc/ruby/end2end/killed_client_thread_client.rb2
-rwxr-xr-xsrc/ruby/end2end/killed_client_thread_driver.rb8
-rwxr-xr-xsrc/ruby/end2end/load_grpc_with_gc_stress_driver.rb32
-rw-r--r--src/ruby/ext/grpc/extconf.rb4
-rw-r--r--src/ruby/ext/grpc/rb_call.c2
-rw-r--r--src/ruby/ext/grpc/rb_call_credentials.c8
-rw-r--r--src/ruby/ext/grpc/rb_grpc.c2
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c116
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h187
-rw-r--r--src/ruby/lib/grpc.rb1
-rw-r--r--src/ruby/lib/grpc/generic/active_call.rb43
-rw-r--r--src/ruby/lib/grpc/generic/bidi_call.rb29
-rw-r--r--src/ruby/lib/grpc/generic/client_stub.rb133
-rw-r--r--src/ruby/lib/grpc/generic/interceptor_registry.rb53
-rw-r--r--src/ruby/lib/grpc/generic/interceptors.rb186
-rw-r--r--src/ruby/lib/grpc/generic/rpc_desc.rb80
-rw-r--r--src/ruby/lib/grpc/generic/rpc_server.rb18
-rw-r--r--src/ruby/lib/grpc/google_rpc_status_utils.rb35
-rw-r--r--src/ruby/lib/grpc/version.rb2
-rw-r--r--src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb1
-rw-r--r--src/ruby/qps/histogram.rb15
-rwxr-xr-xsrc/ruby/qps/proxy-worker.rb58
-rw-r--r--src/ruby/qps/src/proto/grpc/testing/proxy-service_pb.rb1
-rw-r--r--src/ruby/qps/src/proto/grpc/testing/proxy-service_services_pb.rb1
-rw-r--r--src/ruby/spec/channel_connection_spec.rb35
-rw-r--r--src/ruby/spec/client_server_spec.rb270
-rw-r--r--src/ruby/spec/generic/active_call_spec.rb76
-rw-r--r--src/ruby/spec/generic/client_interceptors_spec.rb153
-rw-r--r--src/ruby/spec/generic/interceptor_registry_spec.rb65
-rw-r--r--src/ruby/spec/generic/rpc_server_spec.rb35
-rw-r--r--src/ruby/spec/generic/server_interceptors_spec.rb218
-rw-r--r--src/ruby/spec/google_rpc_status_utils_spec.rb292
-rw-r--r--src/ruby/spec/spec_helper.rb4
-rw-r--r--src/ruby/spec/support/helpers.rb73
-rw-r--r--src/ruby/spec/support/services.rb147
-rw-r--r--src/ruby/tools/version.rb2
782 files changed, 23702 insertions, 22518 deletions
diff --git a/src/c-ares/CMakeLists.txt b/src/c-ares/CMakeLists.txt
deleted file mode 100644
index 15bd5a2add..0000000000
--- a/src/c-ares/CMakeLists.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-# c-ares cmake file for gRPC
-#
-# This is currently very experimental, and unsupported.
-#
-# Copyright 2016 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-string(TOLOWER ${CMAKE_SYSTEM_NAME} cares_system_name)
-
-include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares)
-include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/cares)
-
-if(${cares_system_name} MATCHES windows)
- add_definitions(-DCARES_STATICLIB=1)
- add_definitions(-DWIN32_LEAN_AND_MEAN=1)
-else()
- include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/config_${cares_system_name})
- add_definitions(-DHAVE_CONFIG_H=1)
- add_definitions(-D_GNU_SOURCE=1)
-endif()
-
-file(GLOB lib_sources ../../third_party/cares/cares/*.c)
-add_library(cares ${lib_sources})
diff --git a/src/c-ares/gen_build_yaml.py b/src/c-ares/gen_build_yaml.py
index 6f8b7485ac..4600d8d224 100755
--- a/src/c-ares/gen_build_yaml.py
+++ b/src/c-ares/gen_build_yaml.py
@@ -29,10 +29,14 @@ try:
subprocess.call("third_party/cares/cares/configure", shell=True)
def config_platform(x):
- if 'linux' in sys.platform:
- return 'src/cares/cares/config_linux/ares_config.h'
if 'darwin' in sys.platform:
return 'src/cares/cares/config_darwin/ares_config.h'
+ if 'freebsd' in sys.platform:
+ return 'src/cares/cares/config_freebsd/ares_config.h'
+ if 'linux' in sys.platform:
+ return 'src/cares/cares/config_linux/ares_config.h'
+ if 'openbsd' in sys.platform:
+ return 'src/cares/cares/config_openbsd/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_config.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_config.h'
@@ -124,8 +128,10 @@ try:
"third_party/cares/cares/config-win32.h",
"third_party/cares/cares/setup_once.h",
"third_party/cares/ares_build.h",
+ "third_party/cares/config_darwin/ares_config.h",
+ "third_party/cares/config_freebsd/ares_config.h",
"third_party/cares/config_linux/ares_config.h",
- "third_party/cares/config_darwin/ares_config.h"
+ "third_party/cares/config_openbsd/ares_config.h"
],
}]
except:
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index b09bf99677..253280bd24 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -140,7 +140,6 @@ grpc::string GetHeaderIncludes(grpc_generator::File *file,
printer->Print(vars, "namespace grpc {\n");
printer->Print(vars, "class CompletionQueue;\n");
printer->Print(vars, "class Channel;\n");
- printer->Print(vars, "class RpcService;\n");
printer->Print(vars, "class ServerCompletionQueue;\n");
printer->Print(vars, "class ServerContext;\n");
printer->Print(vars, "} // namespace grpc\n\n");
@@ -165,25 +164,37 @@ void PrintHeaderClientMethodInterfaces(
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
+ struct {
+ grpc::string prefix;
+ grpc::string method_params; // extra arguments to method
+ grpc::string raw_args; // extra arguments to raw version of method
+ } async_prefixes[] = {{"Async", ", void* tag", ", tag"},
+ {"PrepareAsync", "", ""}};
+
if (is_public) {
if (method->NoStreaming()) {
printer->Print(
*vars,
"virtual ::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) = 0;\n");
- printer->Print(*vars,
- "std::unique_ptr< "
- "::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
- "Async$Method$(::grpc::ClientContext* context, "
- "const $Request$& request, "
- "::grpc::CompletionQueue* cq) {\n");
- printer->Indent();
- printer->Print(*vars,
- "return std::unique_ptr< "
- "::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
- "Async$Method$Raw(context, request, cq));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ printer->Print(
+ *vars,
+ "std::unique_ptr< "
+ "::grpc::ClientAsyncResponseReaderInterface< $Response$>> "
+ "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
+ "const $Request$& request, "
+ "::grpc::CompletionQueue* cq) {\n");
+ printer->Indent();
+ printer->Print(
+ *vars,
+ "return std::unique_ptr< "
+ "::grpc::ClientAsyncResponseReaderInterface< $Response$>>("
+ "$AsyncPrefix$$Method$Raw(context, request, cq));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
@@ -197,19 +208,26 @@ void PrintHeaderClientMethodInterfaces(
"($Method$Raw(context, response));\n");
printer->Outdent();
printer->Print("}\n");
- printer->Print(
- *vars,
- "std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
- " Async$Method$(::grpc::ClientContext* context, $Response$* "
- "response, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Indent();
- printer->Print(*vars,
- "return std::unique_ptr< "
- "::grpc::ClientAsyncWriterInterface< $Request$>>("
- "Async$Method$Raw(context, response, cq, tag));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "std::unique_ptr< ::grpc::ClientAsyncWriterInterface< $Request$>>"
+ " $AsyncPrefix$$Method$(::grpc::ClientContext* context, "
+ "$Response$* "
+ "response, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Indent();
+ printer->Print(*vars,
+ "return std::unique_ptr< "
+ "::grpc::ClientAsyncWriterInterface< $Request$>>("
+ "$AsyncPrefix$$Method$Raw(context, response, "
+ "cq$AsyncRawArgs$));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@@ -223,19 +241,25 @@ void PrintHeaderClientMethodInterfaces(
"($Method$Raw(context, request));\n");
printer->Outdent();
printer->Print("}\n");
- printer->Print(
- *vars,
- "std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> "
- "Async$Method$("
- "::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Indent();
- printer->Print(*vars,
- "return std::unique_ptr< "
- "::grpc::ClientAsyncReaderInterface< $Response$>>("
- "Async$Method$Raw(context, request, cq, tag));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "std::unique_ptr< ::grpc::ClientAsyncReaderInterface< $Response$>> "
+ "$AsyncPrefix$$Method$("
+ "::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Indent();
+ printer->Print(
+ *vars,
+ "return std::unique_ptr< "
+ "::grpc::ClientAsyncReaderInterface< $Response$>>("
+ "$AsyncPrefix$$Method$Raw(context, request, cq$AsyncRawArgs$));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
} else if (method->BidiStreaming()) {
printer->Print(*vars,
"std::unique_ptr< ::grpc::ClientReaderWriterInterface< "
@@ -249,61 +273,84 @@ void PrintHeaderClientMethodInterfaces(
"$Method$Raw(context));\n");
printer->Outdent();
printer->Print("}\n");
- printer->Print(
- *vars,
- "std::unique_ptr< "
- "::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> "
- "Async$Method$(::grpc::ClientContext* context, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Indent();
- printer->Print(
- *vars,
- "return std::unique_ptr< "
- "::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>("
- "Async$Method$Raw(context, cq, tag));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "std::unique_ptr< "
+ "::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>> "
+ "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Indent();
+ printer->Print(
+ *vars,
+ "return std::unique_ptr< "
+ "::grpc::ClientAsyncReaderWriterInterface< $Request$, $Response$>>("
+ "$AsyncPrefix$$Method$Raw(context, cq$AsyncRawArgs$));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
}
} else {
if (method->NoStreaming()) {
- printer->Print(
- *vars,
- "virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* "
- "Async$Method$Raw(::grpc::ClientContext* context, "
- "const $Request$& request, "
- "::grpc::CompletionQueue* cq) = 0;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ printer->Print(
+ *vars,
+ "virtual ::grpc::ClientAsyncResponseReaderInterface< $Response$>* "
+ "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
+ "const $Request$& request, "
+ "::grpc::CompletionQueue* cq) = 0;\n");
+ }
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
"virtual ::grpc::ClientWriterInterface< $Request$>*"
" $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) = 0;\n");
- printer->Print(*vars,
- "virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
- " Async$Method$Raw(::grpc::ClientContext* context, "
- "$Response$* response, "
- "::grpc::CompletionQueue* cq, void* tag) = 0;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ printer->Print(
+ *vars,
+ "virtual ::grpc::ClientAsyncWriterInterface< $Request$>*"
+ " $AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
+ "$Response$* response, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
+ }
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
- "virtual ::grpc::ClientReaderInterface< $Response$>* $Method$Raw("
+ "virtual ::grpc::ClientReaderInterface< $Response$>* "
+ "$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) = 0;\n");
- printer->Print(
- *vars,
- "virtual ::grpc::ClientAsyncReaderInterface< $Response$>* "
- "Async$Method$Raw("
- "::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag) = 0;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ printer->Print(
+ *vars,
+ "virtual ::grpc::ClientAsyncReaderInterface< $Response$>* "
+ "$AsyncPrefix$$Method$Raw("
+ "::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
+ }
} else if (method->BidiStreaming()) {
printer->Print(*vars,
"virtual ::grpc::ClientReaderWriterInterface< $Request$, "
"$Response$>* "
"$Method$Raw(::grpc::ClientContext* context) = 0;\n");
- printer->Print(*vars,
- "virtual ::grpc::ClientAsyncReaderWriterInterface< "
- "$Request$, $Response$>* "
- "Async$Method$Raw(::grpc::ClientContext* context, "
- "::grpc::CompletionQueue* cq, void* tag) = 0;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ printer->Print(
+ *vars,
+ "virtual ::grpc::ClientAsyncReaderWriterInterface< "
+ "$Request$, $Response$>* "
+ "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) = 0;\n");
+ }
}
}
}
@@ -315,25 +362,35 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
+ struct {
+ grpc::string prefix;
+ grpc::string method_params; // extra arguments to method
+ grpc::string raw_args; // extra arguments to raw version of method
+ } async_prefixes[] = {{"Async", ", void* tag", ", tag"},
+ {"PrepareAsync", "", ""}};
+
if (is_public) {
if (method->NoStreaming()) {
printer->Print(
*vars,
"::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) override;\n");
- printer->Print(
- *vars,
- "std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
- "Async$Method$(::grpc::ClientContext* context, "
- "const $Request$& request, "
- "::grpc::CompletionQueue* cq) {\n");
- printer->Indent();
- printer->Print(*vars,
- "return std::unique_ptr< "
- "::grpc::ClientAsyncResponseReader< $Response$>>("
- "Async$Method$Raw(context, request, cq));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ printer->Print(
+ *vars,
+ "std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
+ "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
+ "const $Request$& request, "
+ "::grpc::CompletionQueue* cq) {\n");
+ printer->Indent();
+ printer->Print(*vars,
+ "return std::unique_ptr< "
+ "::grpc::ClientAsyncResponseReader< $Response$>>("
+ "$AsyncPrefix$$Method$Raw(context, request, cq));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
@@ -346,18 +403,24 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"($Method$Raw(context, response));\n");
printer->Outdent();
printer->Print("}\n");
- printer->Print(*vars,
- "std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
- " Async$Method$(::grpc::ClientContext* context, "
- "$Response$* response, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Indent();
- printer->Print(
- *vars,
- "return std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>("
- "Async$Method$Raw(context, response, cq, tag));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(*vars,
+ "std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>"
+ " $AsyncPrefix$$Method$(::grpc::ClientContext* context, "
+ "$Response$* response, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Indent();
+ printer->Print(
+ *vars,
+ "return std::unique_ptr< ::grpc::ClientAsyncWriter< $Request$>>("
+ "$AsyncPrefix$$Method$Raw(context, response, "
+ "cq$AsyncRawArgs$));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
@@ -371,19 +434,24 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"($Method$Raw(context, request));\n");
printer->Outdent();
printer->Print("}\n");
- printer->Print(
- *vars,
- "std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>> "
- "Async$Method$("
- "::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Indent();
- printer->Print(
- *vars,
- "return std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>>("
- "Async$Method$Raw(context, request, cq, tag));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>> "
+ "$AsyncPrefix$$Method$("
+ "::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Indent();
+ printer->Print(
+ *vars,
+ "return std::unique_ptr< ::grpc::ClientAsyncReader< $Response$>>("
+ "$AsyncPrefix$$Method$Raw(context, request, cq$AsyncRawArgs$));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
@@ -396,53 +464,80 @@ void PrintHeaderClientMethod(grpc_generator::Printer *printer,
"$Method$Raw(context));\n");
printer->Outdent();
printer->Print("}\n");
- printer->Print(*vars,
- "std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
- "$Request$, $Response$>> "
- "Async$Method$(::grpc::ClientContext* context, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Indent();
- printer->Print(*vars,
- "return std::unique_ptr< "
- "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
- "Async$Method$Raw(context, cq, tag));\n");
- printer->Outdent();
- printer->Print("}\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(*vars,
+ "std::unique_ptr< ::grpc::ClientAsyncReaderWriter< "
+ "$Request$, $Response$>> "
+ "$AsyncPrefix$$Method$(::grpc::ClientContext* context, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Indent();
+ printer->Print(
+ *vars,
+ "return std::unique_ptr< "
+ "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>>("
+ "$AsyncPrefix$$Method$Raw(context, cq$AsyncRawArgs$));\n");
+ printer->Outdent();
+ printer->Print("}\n");
+ }
}
} else {
if (method->NoStreaming()) {
- printer->Print(*vars,
- "::grpc::ClientAsyncResponseReader< $Response$>* "
- "Async$Method$Raw(::grpc::ClientContext* context, "
- "const $Request$& request, "
- "::grpc::CompletionQueue* cq) override;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ printer->Print(
+ *vars,
+ "::grpc::ClientAsyncResponseReader< $Response$>* "
+ "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
+ "const $Request$& request, "
+ "::grpc::CompletionQueue* cq) override;\n");
+ }
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) "
"override;\n");
- printer->Print(*vars,
- "::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw("
- "::grpc::ClientContext* context, $Response$* response, "
- "::grpc::CompletionQueue* cq, void* tag) override;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "::grpc::ClientAsyncWriter< $Request$>* $AsyncPrefix$$Method$Raw("
+ "::grpc::ClientContext* context, $Response$* response, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
+ }
} else if (ServerOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)"
" override;\n");
- printer->Print(
- *vars,
- "::grpc::ClientAsyncReader< $Response$>* Async$Method$Raw("
- "::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag) override;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "::grpc::ClientAsyncReader< $Response$>* $AsyncPrefix$$Method$Raw("
+ "::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
+ }
} else if (method->BidiStreaming()) {
printer->Print(*vars,
"::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$Method$Raw(::grpc::ClientContext* context) override;\n");
- printer->Print(*vars,
- "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
- "Async$Method$Raw(::grpc::ClientContext* context, "
- "::grpc::CompletionQueue* cq, void* tag) override;\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncRawArgs"] = async_prefix.raw_args;
+ printer->Print(
+ *vars,
+ "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
+ "$AsyncPrefix$$Method$Raw(::grpc::ClientContext* context, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) override;\n");
+ }
}
}
}
@@ -451,7 +546,8 @@ void PrintHeaderClientMethodData(grpc_generator::Printer *printer,
const grpc_generator::Method *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["Method"] = method->name();
- printer->Print(*vars, "const ::grpc::RpcMethod rpcmethod_$Method$_;\n");
+ printer->Print(*vars,
+ "const ::grpc::internal::RpcMethod rpcmethod_$Method$_;\n");
}
void PrintHeaderServerMethodSync(grpc_generator::Printer *printer,
@@ -623,7 +719,7 @@ void PrintHeaderServerMethodStreamedUnary(
printer->Print(*vars,
"WithStreamedUnaryMethod_$Method$() {\n"
" ::grpc::Service::MarkMethodStreamed($Idx$,\n"
- " new ::grpc::StreamedUnaryHandler< $Request$, "
+ " new ::grpc::internal::StreamedUnaryHandler< $Request$, "
"$Response$>(std::bind"
"(&WithStreamedUnaryMethod_$Method$<BaseClass>::"
"Streamed$Method$, this, std::placeholders::_1, "
@@ -671,15 +767,16 @@ void PrintHeaderServerMethodSplitStreaming(
"{}\n");
printer->Print(" public:\n");
printer->Indent();
- printer->Print(*vars,
- "WithSplitStreamingMethod_$Method$() {\n"
- " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
- " new ::grpc::SplitServerStreamingHandler< $Request$, "
- "$Response$>(std::bind"
- "(&WithSplitStreamingMethod_$Method$<BaseClass>::"
- "Streamed$Method$, this, std::placeholders::_1, "
- "std::placeholders::_2)));\n"
- "}\n");
+ printer->Print(
+ *vars,
+ "WithSplitStreamingMethod_$Method$() {\n"
+ " ::grpc::Service::MarkMethodStreamed($Idx$,\n"
+ " new ::grpc::internal::SplitServerStreamingHandler< $Request$, "
+ "$Response$>(std::bind"
+ "(&WithSplitStreamingMethod_$Method$<BaseClass>::"
+ "Streamed$Method$, this, std::placeholders::_1, "
+ "std::placeholders::_2)));\n"
+ "}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
@@ -819,7 +916,8 @@ void PrintHeaderService(grpc_generator::Printer *printer,
" {\n public:\n");
printer->Indent();
printer->Print(
- "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
+ "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& "
+ "channel);\n");
for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderClientMethod(printer, service->method(i).get(), vars, true);
}
@@ -1077,99 +1175,133 @@ void PrintSourceClientMethod(grpc_generator::Printer *printer,
(*vars)["Method"] = method->name();
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
+ struct {
+ grpc::string prefix;
+ grpc::string start; // bool literal expressed as string
+ grpc::string method_params; // extra arguments to method
+ grpc::string create_args; // extra arguments to creator
+ } async_prefixes[] = {{"Async", "true", ", void* tag", ", tag"},
+ {"PrepareAsync", "false", "", ", nullptr"}};
if (method->NoStreaming()) {
printer->Print(*vars,
"::grpc::Status $ns$$Service$::Stub::$Method$("
"::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) {\n");
printer->Print(*vars,
- " return ::grpc::BlockingUnaryCall(channel_.get(), "
- "rpcmethod_$Method$_, "
- "context, request, response);\n"
- "}\n\n");
- printer->Print(
- *vars,
- "::grpc::ClientAsyncResponseReader< $Response$>* "
- "$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
- "const $Request$& request, "
- "::grpc::CompletionQueue* cq) {\n");
- printer->Print(*vars,
- " return "
- "::grpc::ClientAsyncResponseReader< $Response$>::Create("
- "channel_.get(), cq, "
- "rpcmethod_$Method$_, "
- "context, request);\n"
- "}\n\n");
+ " return ::grpc::internal::BlockingUnaryCall"
+ "(channel_.get(), rpcmethod_$Method$_, "
+ "context, request, response);\n}\n\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncStart"] = async_prefix.start;
+ printer->Print(*vars,
+ "::grpc::ClientAsyncResponseReader< $Response$>* "
+ "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
+ "ClientContext* context, "
+ "const $Request$& request, "
+ "::grpc::CompletionQueue* cq) {\n");
+ printer->Print(
+ *vars,
+ " return "
+ "::grpc::internal::ClientAsyncResponseReaderFactory< $Response$>"
+ "::Create(channel_.get(), cq, "
+ "rpcmethod_$Method$_, "
+ "context, request, $AsyncStart$);\n"
+ "}\n\n");
+ }
} else if (ClientOnlyStreaming(method)) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* "
"$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, $Response$* response) {\n");
- printer->Print(*vars,
- " return new ::grpc::ClientWriter< $Request$>("
- "channel_.get(), "
- "rpcmethod_$Method$_, "
- "context, response);\n"
- "}\n\n");
- printer->Print(*vars,
- "::grpc::ClientAsyncWriter< $Request$>* "
- "$ns$$Service$::Stub::Async$Method$Raw("
- "::grpc::ClientContext* context, $Response$* response, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Print(*vars,
- " return ::grpc::ClientAsyncWriter< $Request$>::Create("
- "channel_.get(), cq, "
- "rpcmethod_$Method$_, "
- "context, response, tag);\n"
- "}\n\n");
+ printer->Print(
+ *vars,
+ " return ::grpc::internal::ClientWriterFactory< $Request$>::Create("
+ "channel_.get(), "
+ "rpcmethod_$Method$_, "
+ "context, response);\n"
+ "}\n\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncStart"] = async_prefix.start;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncCreateArgs"] = async_prefix.create_args;
+ printer->Print(*vars,
+ "::grpc::ClientAsyncWriter< $Request$>* "
+ "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
+ "::grpc::ClientContext* context, $Response$* response, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Print(
+ *vars,
+ " return ::grpc::internal::ClientAsyncWriterFactory< $Request$>"
+ "::Create(channel_.get(), cq, "
+ "rpcmethod_$Method$_, "
+ "context, response, $AsyncStart$$AsyncCreateArgs$);\n"
+ "}\n\n");
+ }
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"::grpc::ClientReader< $Response$>* "
"$ns$$Service$::Stub::$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request) {\n");
- printer->Print(*vars,
- " return new ::grpc::ClientReader< $Response$>("
- "channel_.get(), "
- "rpcmethod_$Method$_, "
- "context, request);\n"
- "}\n\n");
- printer->Print(*vars,
- "::grpc::ClientAsyncReader< $Response$>* "
- "$ns$$Service$::Stub::Async$Method$Raw("
- "::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Print(*vars,
- " return ::grpc::ClientAsyncReader< $Response$>::Create("
- "channel_.get(), cq, "
- "rpcmethod_$Method$_, "
- "context, request, tag);\n"
- "}\n\n");
+ printer->Print(
+ *vars,
+ " return ::grpc::internal::ClientReaderFactory< $Response$>::Create("
+ "channel_.get(), "
+ "rpcmethod_$Method$_, "
+ "context, request);\n"
+ "}\n\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncStart"] = async_prefix.start;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncCreateArgs"] = async_prefix.create_args;
+ printer->Print(
+ *vars,
+ "::grpc::ClientAsyncReader< $Response$>* "
+ "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw("
+ "::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Print(
+ *vars,
+ " return ::grpc::internal::ClientAsyncReaderFactory< $Response$>"
+ "::Create(channel_.get(), cq, "
+ "rpcmethod_$Method$_, "
+ "context, request, $AsyncStart$$AsyncCreateArgs$);\n"
+ "}\n\n");
+ }
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$ns$$Service$::Stub::$Method$Raw(::grpc::ClientContext* context) {\n");
printer->Print(*vars,
- " return new ::grpc::ClientReaderWriter< "
- "$Request$, $Response$>("
+ " return ::grpc::internal::ClientReaderWriterFactory< "
+ "$Request$, $Response$>::Create("
"channel_.get(), "
"rpcmethod_$Method$_, "
"context);\n"
"}\n\n");
- printer->Print(
- *vars,
- "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
- "$ns$$Service$::Stub::Async$Method$Raw(::grpc::ClientContext* context, "
- "::grpc::CompletionQueue* cq, void* tag) {\n");
- printer->Print(
- *vars,
- " return "
- "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>::Create("
- "channel_.get(), cq, "
- "rpcmethod_$Method$_, "
- "context, tag);\n"
- "}\n\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncStart"] = async_prefix.start;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["AsyncCreateArgs"] = async_prefix.create_args;
+ printer->Print(*vars,
+ "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
+ "$ns$$Service$::Stub::$AsyncPrefix$$Method$Raw(::grpc::"
+ "ClientContext* context, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$) {\n");
+ printer->Print(*vars,
+ " return "
+ "::grpc::internal::ClientAsyncReaderWriterFactory< "
+ "$Request$, $Response$>::Create("
+ "channel_.get(), cq, "
+ "rpcmethod_$Method$_, "
+ "context, $AsyncStart$$AsyncCreateArgs$);\n"
+ "}\n\n");
+ }
}
}
@@ -1279,7 +1411,7 @@ void PrintSourceService(grpc_generator::Printer *printer,
printer->Print(*vars,
", rpcmethod_$Method$_("
"$prefix$$Service$_method_names[$Idx$], "
- "::grpc::RpcMethod::$StreamingType$, "
+ "::grpc::internal::RpcMethod::$StreamingType$, "
"channel"
")\n");
}
@@ -1302,38 +1434,38 @@ void PrintSourceService(grpc_generator::Printer *printer,
if (method->NoStreaming()) {
printer->Print(
*vars,
- "AddMethod(new ::grpc::RpcServiceMethod(\n"
+ "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
- " ::grpc::RpcMethod::NORMAL_RPC,\n"
- " new ::grpc::RpcMethodHandler< $ns$$Service$::Service, "
+ " ::grpc::internal::RpcMethod::NORMAL_RPC,\n"
+ " new ::grpc::internal::RpcMethodHandler< $ns$$Service$::Service, "
"$Request$, "
"$Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ClientOnlyStreaming(method.get())) {
printer->Print(
*vars,
- "AddMethod(new ::grpc::RpcServiceMethod(\n"
+ "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
- " ::grpc::RpcMethod::CLIENT_STREAMING,\n"
- " new ::grpc::ClientStreamingHandler< "
+ " ::grpc::internal::RpcMethod::CLIENT_STREAMING,\n"
+ " new ::grpc::internal::ClientStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (ServerOnlyStreaming(method.get())) {
printer->Print(
*vars,
- "AddMethod(new ::grpc::RpcServiceMethod(\n"
+ "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
- " ::grpc::RpcMethod::SERVER_STREAMING,\n"
- " new ::grpc::ServerStreamingHandler< "
+ " ::grpc::internal::RpcMethod::SERVER_STREAMING,\n"
+ " new ::grpc::internal::ServerStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
- "AddMethod(new ::grpc::RpcServiceMethod(\n"
+ "AddMethod(new ::grpc::internal::RpcServiceMethod(\n"
" $prefix$$Service$_method_names[$Idx$],\n"
- " ::grpc::RpcMethod::BIDI_STREAMING,\n"
- " new ::grpc::BidiStreamingHandler< "
+ " ::grpc::internal::RpcMethod::BIDI_STREAMING,\n"
+ " new ::grpc::internal::BidiStreamingHandler< "
"$ns$$Service$::Service, $Request$, $Response$>(\n"
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
}
@@ -1460,50 +1592,79 @@ void PrintMockClientMethods(grpc_generator::Printer *printer,
(*vars)["Request"] = method->input_type_name();
(*vars)["Response"] = method->output_type_name();
+ struct {
+ grpc::string prefix;
+ grpc::string method_params; // extra arguments to method
+ int extra_method_param_count;
+ } async_prefixes[] = {{"Async", ", void* tag", 1}, {"PrepareAsync", "", 0}};
+
if (method->NoStreaming()) {
printer->Print(
*vars,
"MOCK_METHOD3($Method$, ::grpc::Status(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response));\n");
- printer->Print(*vars,
- "MOCK_METHOD3(Async$Method$Raw, "
- "::grpc::ClientAsyncResponseReaderInterface< $Response$>*"
- "(::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq));\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ printer->Print(
+ *vars,
+ "MOCK_METHOD3($AsyncPrefix$$Method$Raw, "
+ "::grpc::ClientAsyncResponseReaderInterface< $Response$>*"
+ "(::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq));\n");
+ }
} else if (ClientOnlyStreaming(method)) {
printer->Print(
*vars,
"MOCK_METHOD2($Method$Raw, "
"::grpc::ClientWriterInterface< $Request$>*"
"(::grpc::ClientContext* context, $Response$* response));\n");
- printer->Print(*vars,
- "MOCK_METHOD4(Async$Method$Raw, "
- "::grpc::ClientAsyncWriterInterface< $Request$>*"
- "(::grpc::ClientContext* context, $Response$* response, "
- "::grpc::CompletionQueue* cq, void* tag));\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["MockArgs"] =
+ std::to_string(3 + async_prefix.extra_method_param_count);
+ printer->Print(*vars,
+ "MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
+ "::grpc::ClientAsyncWriterInterface< $Request$>*"
+ "(::grpc::ClientContext* context, $Response$* response, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$));\n");
+ }
} else if (ServerOnlyStreaming(method)) {
printer->Print(
*vars,
"MOCK_METHOD2($Method$Raw, "
"::grpc::ClientReaderInterface< $Response$>*"
"(::grpc::ClientContext* context, const $Request$& request));\n");
- printer->Print(*vars,
- "MOCK_METHOD4(Async$Method$Raw, "
- "::grpc::ClientAsyncReaderInterface< $Response$>*"
- "(::grpc::ClientContext* context, const $Request$& request, "
- "::grpc::CompletionQueue* cq, void* tag));\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["MockArgs"] =
+ std::to_string(3 + async_prefix.extra_method_param_count);
+ printer->Print(
+ *vars,
+ "MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
+ "::grpc::ClientAsyncReaderInterface< $Response$>*"
+ "(::grpc::ClientContext* context, const $Request$& request, "
+ "::grpc::CompletionQueue* cq$AsyncMethodParams$));\n");
+ }
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
"MOCK_METHOD1($Method$Raw, "
"::grpc::ClientReaderWriterInterface< $Request$, $Response$>*"
"(::grpc::ClientContext* context));\n");
- printer->Print(
- *vars,
- "MOCK_METHOD3(Async$Method$Raw, "
- "::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*"
- "(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, "
- "void* tag));\n");
+ for (auto async_prefix : async_prefixes) {
+ (*vars)["AsyncPrefix"] = async_prefix.prefix;
+ (*vars)["AsyncMethodParams"] = async_prefix.method_params;
+ (*vars)["MockArgs"] =
+ std::to_string(2 + async_prefix.extra_method_param_count);
+ printer->Print(
+ *vars,
+ "MOCK_METHOD$MockArgs$($AsyncPrefix$$Method$Raw, "
+ "::grpc::ClientAsyncReaderWriterInterface<$Request$, $Response$>*"
+ "(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq"
+ "$AsyncMethodParams$));\n");
+ }
}
}
diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc
index c05d15375b..33b5fedd5c 100644
--- a/src/compiler/objective_c_generator.cc
+++ b/src/compiler/objective_c_generator.cc
@@ -17,6 +17,7 @@
*/
#include <map>
+#include <set>
#include <sstream>
#include "src/compiler/config.h"
@@ -29,7 +30,9 @@ using ::google::protobuf::compiler::objectivec::ClassName;
using ::grpc::protobuf::io::Printer;
using ::grpc::protobuf::MethodDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
+using ::grpc::protobuf::FileDescriptor;
using ::std::map;
+using ::std::set;
namespace grpc_objective_c_generator {
namespace {
@@ -190,6 +193,24 @@ void PrintMethodImplementations(Printer *printer,
} // namespace
+::grpc::string GetAllMessageClasses(const FileDescriptor *file) {
+ ::grpc::string output;
+ set< ::grpc::string> classes;
+ for (int i = 0; i < file->service_count(); i++) {
+ const auto service = file->service(i);
+ for (int i = 0; i < service->method_count(); i++) {
+ const auto method = service->method(i);
+ classes.insert(ClassName(method->input_type()));
+ classes.insert(ClassName(method->output_type()));
+ }
+ }
+ for (auto one_class : classes) {
+ output += " @class " + one_class + ";\n";
+ }
+
+ return output;
+}
+
::grpc::string GetHeader(const ServiceDescriptor *service) {
::grpc::string output;
{
diff --git a/src/compiler/objective_c_generator.h b/src/compiler/objective_c_generator.h
index edbee7ff52..e912a52415 100644
--- a/src/compiler/objective_c_generator.h
+++ b/src/compiler/objective_c_generator.h
@@ -24,8 +24,12 @@
namespace grpc_objective_c_generator {
using ::grpc::protobuf::ServiceDescriptor;
+using ::grpc::protobuf::FileDescriptor;
using ::grpc::string;
+// Returns forward declaration of classes in the generated header file.
+string GetAllMessageClasses(const FileDescriptor *file);
+
// Returns the content to be included in the "global_scope" insertion point of
// the generated header file.
string GetHeader(const ServiceDescriptor *service);
diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc
index 96a3375e96..e751d0562e 100644
--- a/src/compiler/objective_c_plugin.cc
+++ b/src/compiler/objective_c_plugin.cc
@@ -58,9 +58,10 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
"#import <RxLibrary/GRXWriteable.h>\n"
"#import <RxLibrary/GRXWriter.h>\n";
- // TODO(jcanizales): Instead forward-declare the input and output types
- // and import the files in the .pbrpc.m
::grpc::string proto_imports;
+ proto_imports += "#if GPB_GRPC_FORWARD_DECLARE_MESSAGE_PROTO\n" +
+ grpc_objective_c_generator::GetAllMessageClasses(file) +
+ "#else\n";
for (int i = 0; i < file->dependency_count(); i++) {
::grpc::string header =
grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
@@ -70,19 +71,20 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
grpc_generator::StripPrefix(&base_name, "google/protobuf/");
// create the import code snippet
proto_imports +=
- "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
- " #import <" +
+ " #if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
+ " #import <" +
::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name +
">\n"
- "#else\n"
- " #import \"" +
+ " #else\n"
+ " #import \"" +
header +
"\"\n"
- "#endif\n";
+ " #endif\n";
} else {
- proto_imports += ::grpc::string("#import \"") + header + "\"\n";
+ proto_imports += ::grpc::string(" #import \"") + header + "\"\n";
}
}
+ proto_imports += "#endif\n";
::grpc::string declarations;
for (int i = 0; i < file->service_count(); i++) {
@@ -106,6 +108,28 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
".pbrpc.h\"\n\n"
"#import <ProtoRPC/ProtoRPC.h>\n"
"#import <RxLibrary/GRXWriter+Immediate.h>\n";
+ for (int i = 0; i < file->dependency_count(); i++) {
+ ::grpc::string header =
+ grpc_objective_c_generator::MessageHeaderName(file->dependency(i));
+ const grpc::protobuf::FileDescriptor *dependency = file->dependency(i);
+ if (IsProtobufLibraryBundledProtoFile(dependency)) {
+ ::grpc::string base_name = header;
+ grpc_generator::StripPrefix(&base_name, "google/protobuf/");
+ // create the import code snippet
+ imports +=
+ "#if GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS\n"
+ " #import <" +
+ ::grpc::string(ProtobufLibraryFrameworkName) + "/" + base_name +
+ ">\n"
+ "#else\n"
+ " #import \"" +
+ header +
+ "\"\n"
+ "#endif\n";
+ } else {
+ imports += ::grpc::string("#import \"") + header + "\"\n";
+ }
+ }
::grpc::string definitions;
for (int i = 0; i < file->service_count(); i++) {
diff --git a/src/compiler/php_generator.cc b/src/compiler/php_generator.cc
index 67967d0bd7..6d9ff3a29c 100644
--- a/src/compiler/php_generator.cc
+++ b/src/compiler/php_generator.cc
@@ -33,7 +33,7 @@ using std::map;
namespace grpc_php_generator {
namespace {
-grpc::string MessageIdentifierName(const grpc::string &name) {
+grpc::string ConvertToPhpNamespace(const grpc::string &name) {
std::vector<grpc::string> tokens = grpc_generator::tokenize(name, ".");
std::ostringstream oss;
for (unsigned int i = 0; i < tokens.size(); i++) {
@@ -43,14 +43,33 @@ grpc::string MessageIdentifierName(const grpc::string &name) {
return oss.str();
}
+grpc::string PackageName(const FileDescriptor *file) {
+ if (file->options().has_php_namespace()) {
+ return file->options().php_namespace();
+ } else {
+ return ConvertToPhpNamespace(file->package());
+ }
+}
+
+grpc::string MessageIdentifierName(const grpc::string &name,
+ const FileDescriptor *file) {
+ std::vector<grpc::string> tokens = grpc_generator::tokenize(name, ".");
+ std::ostringstream oss;
+ oss << PackageName(file) << "\\"
+ << grpc_generator::CapitalizeFirstLetter(tokens[tokens.size() - 1]);
+ return oss.str();
+}
+
void PrintMethod(const MethodDescriptor *method, Printer *out) {
const Descriptor *input_type = method->input_type();
const Descriptor *output_type = method->output_type();
map<grpc::string, grpc::string> vars;
vars["service_name"] = method->service()->full_name();
vars["name"] = method->name();
- vars["input_type_id"] = MessageIdentifierName(input_type->full_name());
- vars["output_type_id"] = MessageIdentifierName(output_type->full_name());
+ vars["input_type_id"] =
+ MessageIdentifierName(input_type->full_name(), input_type->file());
+ vars["output_type_id"] =
+ MessageIdentifierName(output_type->full_name(), output_type->file());
out->Print("/**\n");
out->Print(GetPHPComments(method, " *").c_str());
@@ -149,12 +168,7 @@ grpc::string GenerateFile(const FileDescriptor *file,
}
map<grpc::string, grpc::string> vars;
- grpc::string php_namespace;
- if (file->options().has_php_namespace()) {
- php_namespace = file->options().php_namespace();
- } else {
- php_namespace = MessageIdentifierName(file->package());
- }
+ grpc::string php_namespace = PackageName(file);
vars["package"] = php_namespace;
out.Print(vars, "namespace $package$;\n\n");
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 31b177c28e..ef2d90de9e 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -767,9 +767,9 @@ bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
ProtoBufFile pbfile(file);
PrivateGenerator generator(config_, &pbfile);
- if (parameter == "grpc_2_0") {
+ if (parameter == "" || parameter == "grpc_2_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true);
- } else if (parameter == "") {
+ } else if (parameter == "grpc_1_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true) &&
GenerateGrpc(context, generator, pb2_file_name, false);
} else {
diff --git a/src/core/ext/census/README.md b/src/core/ext/census/README.md
deleted file mode 100644
index a9826fe889..0000000000
--- a/src/core/ext/census/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-<!---
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
--->
-
-# Census - a resource measurement and tracing system
-
-This directory contains code for Census, which will ultimately provide the
-following features for any gRPC-using system:
-* A [dapper](http://research.google.com/pubs/pub36356.html)-like tracing
- system, enabling tracing across a distributed infrastructure.
-* RPC statistics and measurements for key metrics, such as latency, bytes
- transferred, number of errors etc.
-* Resource measurement framework which can be used for measuring custom
- metrics. Through the use of [tags](#Tags), these can be broken down across
- the entire distributed stack.
-* Easy integration of the above with
- [Google Cloud Trace](https://cloud.google.com/tools/cloud-trace) and
- [Google Cloud Monitoring](https://cloud.google.com/monitoring/).
-
-## Concepts
-
-### Context
-
-### Operations
-
-### Tags
-
-### Metrics
-
-## API
-
-### Internal/RPC API
-
-### External/Client API
-
-### RPC API
-
-## Files in this directory
-
-Note that files and functions in this directory can be split into two
-categories:
-* Files that define core census library functions. Functions etc. in these
- files are named census\_\*, and constitute the core census library
- functionality. At some time in the future, these will become a standalone
- library.
-* Files that define functions etc. that provide a convenient interface between
- grpc and the core census functionality. These files are all named
- grpc\_\*.{c,h}, and define function names beginning with grpc\_census\_\*.
-
diff --git a/src/core/ext/census/aggregation.h b/src/core/ext/census/aggregation.h
deleted file mode 100644
index 1ba7953ecc..0000000000
--- a/src/core/ext/census/aggregation.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <stddef.h>
-
-#ifndef GRPC_CORE_EXT_CENSUS_AGGREGATION_H
-#define GRPC_CORE_EXT_CENSUS_AGGREGATION_H
-
-/** Structure used to describe an aggregation type. */
-struct census_aggregation_ops {
- /* Create a new aggregation. The pointer returned can be used in future calls
- to clone(), free(), record(), data() and reset(). */
- void *(*create)(const void *create_arg);
- /* Make a copy of an aggregation created by create() */
- void *(*clone)(const void *aggregation);
- /* Destroy an aggregation created by create() */
- void (*free)(void *aggregation);
- /* Record a new value against aggregation. */
- void (*record)(void *aggregation, double value);
- /* Return current aggregation data. The caller must cast this object into
- the correct type for the aggregation result. The object returned can be
- freed by using free_data(). */
- void *(*data)(const void *aggregation);
- /* free data returned by data() */
- void (*free_data)(void *data);
- /* Reset an aggregation to default (zero) values. */
- void (*reset)(void *aggregation);
- /* Merge 'from' aggregation into 'to'. Both aggregations must be compatible */
- void (*merge)(void *to, const void *from);
- /* Fill buffer with printable string version of aggregation contents. For
- debugging only. Returns the number of bytes added to buffer (a value == n
- implies the buffer was of insufficient size). */
- size_t (*print)(const void *aggregation, char *buffer, size_t n);
-};
-
-#endif /* GRPC_CORE_EXT_CENSUS_AGGREGATION_H */
diff --git a/src/core/ext/census/base_resources.c b/src/core/ext/census/base_resources.c
deleted file mode 100644
index 2114bf04cd..0000000000
--- a/src/core/ext/census/base_resources.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/base_resources.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/census.h>
-#include <grpc/support/log.h>
-
-#include "src/core/ext/census/resource.h"
-
-// Add base RPC resource definitions for use by RPC runtime.
-//
-// TODO(aveitch): All of these are currently hardwired definitions encoded in
-// the code in this file. These should be converted to use an external
-// configuration mechanism, in which these resources are defined in a text
-// file, which is compiled to .pb format and read by still-to-be-written
-// configuration functions.
-
-// Define all base resources. This should be called by census initialization.
-void define_base_resources() {
- google_census_Resource_BasicUnit numerator =
- google_census_Resource_BasicUnit_SECS;
- resource r = {"client_rpc_latency", // name
- "Client RPC latency in seconds", // description
- 0, // prefix
- 1, // n_numerators
- &numerator, // numerators
- 0, // n_denominators
- NULL}; // denominators
- define_resource(&r);
- r = (resource){"server_rpc_latency", // name
- "Server RPC latency in seconds", // description
- 0, // prefix
- 1, // n_numerators
- &numerator, // numerators
- 0, // n_denominators
- NULL}; // denominators
- define_resource(&r);
-}
diff --git a/src/core/ext/census/base_resources.h b/src/core/ext/census/base_resources.h
deleted file mode 100644
index 78a4d1fae5..0000000000
--- a/src/core/ext/census/base_resources.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H
-#define GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H
-
-/* Define all base resources. This should be called by census initialization. */
-void define_base_resources();
-
-#endif /* GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H */
diff --git a/src/core/ext/census/census_interface.h b/src/core/ext/census/census_interface.h
deleted file mode 100644
index a42b68ad65..0000000000
--- a/src/core/ext/census/census_interface.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H
-
-#include <grpc/support/port_platform.h>
-
-/* Maximum length of an individual census trace annotation. */
-#define CENSUS_MAX_ANNOTATION_LENGTH 200
-
-/* Structure of a census op id. Define as structure because 64bit integer is not
- available on every platform for C89. */
-typedef struct census_op_id {
- uint32_t upper;
- uint32_t lower;
-} census_op_id;
-
-typedef struct census_rpc_stats census_rpc_stats;
-
-/* Initializes Census library. No-op if Census is already initialized. */
-void census_init(void);
-
-/* Shutdown Census Library. */
-void census_shutdown(void);
-
-/* Annotates grpc method name on a census_op_id. The method name has the format
- of <full quantified rpc service name>/<rpc function name>. Returns 0 iff
- op_id and method_name are all valid. op_id is valid after its creation and
- before calling census_tracing_end_op().
-
- TODO(hongyu): Figure out valid characters set for service name and command
- name and document requirements here.*/
-int census_add_method_tag(census_op_id op_id, const char *method_name);
-
-/* Annotates tracing information to a specific op_id.
- Up to CENSUS_MAX_ANNOTATION_LENGTH bytes are recorded. */
-void census_tracing_print(census_op_id op_id, const char *annotation);
-
-/* Starts tracing for an RPC. Returns a locally unique census_op_id */
-census_op_id census_tracing_start_op(void);
-
-/* Ends tracing. Calling this function will invalidate the input op_id. */
-void census_tracing_end_op(census_op_id op_id);
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H */
diff --git a/src/core/ext/census/census_log.c b/src/core/ext/census/census_log.c
deleted file mode 100644
index 100047f12b..0000000000
--- a/src/core/ext/census/census_log.c
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* Available log space is divided up in blocks of
- CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the
- following three data structures:
- - Free blocks (free_block_list)
- - Blocks with unread data (dirty_block_list)
- - Blocks currently attached to cores (core_local_blocks[])
-
- census_log_start_write() moves a block from core_local_blocks[] to the
- end of dirty_block_list when block:
- - is out-of-space OR
- - has an incomplete record (an incomplete record occurs when a thread calls
- census_log_start_write() and is context-switched before calling
- census_log_end_write()
- So, blocks in dirty_block_list are ordered, from oldest to newest, by time
- when block is detached from the core.
-
- census_log_read_next() first iterates over dirty_block_list and then
- core_local_blocks[]. It moves completely read blocks from dirty_block_list
- to free_block_list. Blocks in core_local_blocks[] are not freed, even when
- completely read.
-
- If log is configured to discard old records and free_block_list is empty,
- census_log_start_write() iterates over dirty_block_list to allocate a
- new block. It moves the oldest available block (no pending read/write) to
- core_local_blocks[].
-
- core_local_block_struct is used to implement a map from core id to the block
- associated with that core. This mapping is advisory. It is possible that the
- block returned by this mapping is no longer associated with that core. This
- mapping is updated, lazily, by census_log_start_write().
-
- Locking in block struct:
-
- Exclusive g_log.lock must be held before calling any functions operatong on
- block structs except census_log_start_write() and
- census_log_end_write().
-
- Writes to a block are serialized via writer_lock.
- census_log_start_write() acquires this lock and
- census_log_end_write() releases it. On failure to acquire the lock,
- writer allocates a new block for the current core and updates
- core_local_block accordingly.
-
- Simultaneous read and write access is allowed. Reader can safely read up to
- committed bytes (bytes_committed).
-
- reader_lock protects the block, currently being read, from getting recycled.
- start_read() acquires reader_lock and end_read() releases the lock.
-
- Read/write access to a block is disabled via try_disable_access(). It returns
- with both writer_lock and reader_lock held. These locks are subsequently
- released by enable_access() to enable access to the block.
-
- A note on naming: Most function/struct names are prepended by cl_
- (shorthand for census_log). Further, functions that manipulate structures
- include the name of the structure, which will be passed as the first
- argument. E.g. cl_block_initialize() will initialize a cl_block.
-*/
-#include "src/core/ext/census/census_log.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
-#include <string.h>
-
-/* End of platform specific code */
-
-typedef struct census_log_block_list_struct {
- struct census_log_block_list_struct *next;
- struct census_log_block_list_struct *prev;
- struct census_log_block *block;
-} cl_block_list_struct;
-
-typedef struct census_log_block {
- /* Pointer to underlying buffer */
- char *buffer;
- gpr_atm writer_lock;
- gpr_atm reader_lock;
- /* Keeps completely written bytes. Declared atomic because accessed
- simultaneously by reader and writer. */
- gpr_atm bytes_committed;
- /* Bytes already read */
- int32_t bytes_read;
- /* Links for list */
- cl_block_list_struct link;
-/* We want this structure to be cacheline aligned. We assume the following
- sizes for the various parts on 32/64bit systems:
- type 32b size 64b size
- char* 4 8
- 3x gpr_atm 12 24
- int32_t 4 8 (assumes padding)
- cl_block_list_struct 12 24
- TOTAL 32 64
-
- Depending on the size of our cacheline and the architecture, we
- selectively add char buffering to this structure. The size is checked
- via assert in census_log_initialize(). */
-#if defined(GPR_ARCH_64)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_BLOCK_PAD_SIZE > 0
- char padding[CL_BLOCK_PAD_SIZE];
-#endif
-} cl_block;
-
-/* A list of cl_blocks, doubly-linked through cl_block::link. */
-typedef struct census_log_block_list {
- int32_t count; /* Number of items in list. */
- cl_block_list_struct ht; /* head/tail of linked list. */
-} cl_block_list;
-
-/* Cacheline aligned block pointers to avoid false sharing. Block pointer must
- be initialized via set_block(), before calling other functions */
-typedef struct census_log_core_local_block {
- gpr_atm block;
-/* Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 */
-#if defined(GPR_ARCH_64)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
- char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
-#endif
-} cl_core_local_block;
-
-struct census_log {
- int discard_old_records;
- /* Number of cores (aka hardware-contexts) */
- unsigned num_cores;
- /* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
- int32_t num_blocks;
- cl_block *blocks; /* Block metadata. */
- cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */
- gpr_mu lock;
- int initialized; /* has log been initialized? */
- /* Keeps the state of the reader iterator. A value of 0 indicates that
- iterator has reached the end. census_log_init_reader() resets the
- value to num_core to restart iteration. */
- uint32_t read_iterator_state;
- /* Points to the block being read. If non-NULL, the block is locked for
- reading (block_being_read_->reader_lock is held). */
- cl_block *block_being_read;
- /* A non-zero value indicates that log is full. */
- gpr_atm is_full;
- char *buffer;
- cl_block_list free_block_list;
- cl_block_list dirty_block_list;
- gpr_atm out_of_space_count;
-};
-
-/* Single internal log */
-static struct census_log g_log;
-
-/* Functions that operate on an atomic memory location used as a lock */
-
-/* Returns non-zero if lock is acquired */
-static int cl_try_lock(gpr_atm *lock) { return gpr_atm_acq_cas(lock, 0, 1); }
-
-static void cl_unlock(gpr_atm *lock) { gpr_atm_rel_store(lock, 0); }
-
-/* Functions that operate on cl_core_local_block's */
-
-static void cl_core_local_block_set_block(cl_core_local_block *clb,
- cl_block *block) {
- gpr_atm_rel_store(&clb->block, (gpr_atm)block);
-}
-
-static cl_block *cl_core_local_block_get_block(cl_core_local_block *clb) {
- return (cl_block *)gpr_atm_acq_load(&clb->block);
-}
-
-/* Functions that operate on cl_block_list_struct's */
-
-static void cl_block_list_struct_initialize(cl_block_list_struct *bls,
- cl_block *block) {
- bls->next = bls->prev = bls;
- bls->block = block;
-}
-
-/* Functions that operate on cl_block_list's */
-
-static void cl_block_list_initialize(cl_block_list *list) {
- list->count = 0;
- cl_block_list_struct_initialize(&list->ht, NULL);
-}
-
-/* Returns head of *this, or NULL if empty. */
-static cl_block *cl_block_list_head(cl_block_list *list) {
- return list->ht.next->block;
-}
-
-/* Insert element *e after *pos. */
-static void cl_block_list_insert(cl_block_list *list, cl_block_list_struct *pos,
- cl_block_list_struct *e) {
- list->count++;
- e->next = pos->next;
- e->prev = pos;
- e->next->prev = e;
- e->prev->next = e;
-}
-
-/* Insert block at the head of the list */
-static void cl_block_list_insert_at_head(cl_block_list *list, cl_block *block) {
- cl_block_list_insert(list, &list->ht, &block->link);
-}
-
-/* Insert block at the tail of the list */
-static void cl_block_list_insert_at_tail(cl_block_list *list, cl_block *block) {
- cl_block_list_insert(list, list->ht.prev, &block->link);
-}
-
-/* Removes block *b. Requires *b be in the list. */
-static void cl_block_list_remove(cl_block_list *list, cl_block *b) {
- list->count--;
- b->link.next->prev = b->link.prev;
- b->link.prev->next = b->link.next;
-}
-
-/* Functions that operate on cl_block's */
-
-static void cl_block_initialize(cl_block *block, char *buffer) {
- block->buffer = buffer;
- gpr_atm_rel_store(&block->writer_lock, 0);
- gpr_atm_rel_store(&block->reader_lock, 0);
- gpr_atm_rel_store(&block->bytes_committed, 0);
- block->bytes_read = 0;
- cl_block_list_struct_initialize(&block->link, block);
-}
-
-/* Guards against exposing partially written buffer to the reader. */
-static void cl_block_set_bytes_committed(cl_block *block,
- int32_t bytes_committed) {
- gpr_atm_rel_store(&block->bytes_committed, bytes_committed);
-}
-
-static int32_t cl_block_get_bytes_committed(cl_block *block) {
- return gpr_atm_acq_load(&block->bytes_committed);
-}
-
-/* Tries to disable future read/write access to this block. Succeeds if:
- - no in-progress write AND
- - no in-progress read AND
- - 'discard_data' set to true OR no unread data
- On success, clears the block state and returns with writer_lock_ and
- reader_lock_ held. These locks are released by a subsequent
- cl_block_access_enable() call. */
-static int cl_block_try_disable_access(cl_block *block, int discard_data) {
- if (!cl_try_lock(&block->writer_lock)) {
- return 0;
- }
- if (!cl_try_lock(&block->reader_lock)) {
- cl_unlock(&block->writer_lock);
- return 0;
- }
- if (!discard_data &&
- (block->bytes_read != cl_block_get_bytes_committed(block))) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
- return 0;
- }
- cl_block_set_bytes_committed(block, 0);
- block->bytes_read = 0;
- return 1;
-}
-
-static void cl_block_enable_access(cl_block *block) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
-}
-
-/* Returns with writer_lock held. */
-static void *cl_block_start_write(cl_block *block, size_t size) {
- int32_t bytes_committed;
- if (!cl_try_lock(&block->writer_lock)) {
- return NULL;
- }
- bytes_committed = cl_block_get_bytes_committed(block);
- if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
- cl_unlock(&block->writer_lock);
- return NULL;
- }
- return block->buffer + bytes_committed;
-}
-
-/* Releases writer_lock and increments committed bytes by 'bytes_written'.
- 'bytes_written' must be <= 'size' specified in the corresponding
- StartWrite() call. This function is thread-safe. */
-static void cl_block_end_write(cl_block *block, size_t bytes_written) {
- cl_block_set_bytes_committed(
- block, cl_block_get_bytes_committed(block) + bytes_written);
- cl_unlock(&block->writer_lock);
-}
-
-/* Returns a pointer to the first unread byte in buffer. The number of bytes
- available are returned in 'bytes_available'. Acquires reader lock that is
- released by a subsequent cl_block_end_read() call. Returns NULL if:
- - read in progress
- - no data available */
-static void *cl_block_start_read(cl_block *block, size_t *bytes_available) {
- void *record;
- if (!cl_try_lock(&block->reader_lock)) {
- return NULL;
- }
- /* bytes_committed may change from under us. Use bytes_available to update
- bytes_read below. */
- *bytes_available = cl_block_get_bytes_committed(block) - block->bytes_read;
- if (*bytes_available == 0) {
- cl_unlock(&block->reader_lock);
- return NULL;
- }
- record = block->buffer + block->bytes_read;
- block->bytes_read += *bytes_available;
- return record;
-}
-
-static void cl_block_end_read(cl_block *block) {
- cl_unlock(&block->reader_lock);
-}
-
-/* Internal functions operating on g_log */
-
-/* Allocates a new free block (or recycles an available dirty block if log is
- configured to discard old records). Returns NULL if out-of-space. */
-static cl_block *cl_allocate_block(void) {
- cl_block *block = cl_block_list_head(&g_log.free_block_list);
- if (block != NULL) {
- cl_block_list_remove(&g_log.free_block_list, block);
- return block;
- }
- if (!g_log.discard_old_records) {
- /* No free block and log is configured to keep old records. */
- return NULL;
- }
- /* Recycle dirty block. Start from the oldest. */
- for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
- block = block->link.next->block) {
- if (cl_block_try_disable_access(block, 1 /* discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, block);
- return block;
- }
- }
- return NULL;
-}
-
-/* Allocates a new block and updates core id => block mapping. 'old_block'
- points to the block that the caller thinks is attached to
- 'core_id'. 'old_block' may be NULL. Returns non-zero if:
- - allocated a new block OR
- - 'core_id' => 'old_block' mapping changed (another thread allocated a
- block before lock was acquired). */
-static int cl_allocate_core_local_block(int32_t core_id, cl_block *old_block) {
- /* Now that we have the lock, check if core-local mapping has changed. */
- cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
- cl_block *block = cl_core_local_block_get_block(core_local_block);
- if ((block != NULL) && (block != old_block)) {
- return 1;
- }
- if (block != NULL) {
- cl_core_local_block_set_block(core_local_block, NULL);
- cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
- }
- block = cl_allocate_block();
- if (block == NULL) {
- gpr_atm_rel_store(&g_log.is_full, 1);
- return 0;
- }
- cl_core_local_block_set_block(core_local_block, block);
- cl_block_enable_access(block);
- return 1;
-}
-
-static cl_block *cl_get_block(void *record) {
- uintptr_t p = (uintptr_t)((char *)record - g_log.buffer);
- uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
- return &g_log.blocks[index];
-}
-
-/* Gets the next block to read and tries to free 'prev' block (if not NULL).
- Returns NULL if reached the end. */
-static cl_block *cl_next_block_to_read(cl_block *prev) {
- cl_block *block = NULL;
- if (g_log.read_iterator_state == g_log.num_cores) {
- /* We are traversing dirty list; find the next dirty block. */
- if (prev != NULL) {
- /* Try to free the previous block if there is no unread data. This block
- may have unread data if previously incomplete record completed between
- read_next() calls. */
- block = prev->link.next->block;
- if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, prev);
- cl_block_list_insert_at_head(&g_log.free_block_list, prev);
- gpr_atm_rel_store(&g_log.is_full, 0);
- }
- } else {
- block = cl_block_list_head(&g_log.dirty_block_list);
- }
- if (block != NULL) {
- return block;
- }
- /* We are done with the dirty list; moving on to core-local blocks. */
- }
- while (g_log.read_iterator_state > 0) {
- g_log.read_iterator_state--;
- block = cl_core_local_block_get_block(
- &g_log.core_local_blocks[g_log.read_iterator_state]);
- if (block != NULL) {
- return block;
- }
- }
- return NULL;
-}
-
-/* External functions: primary stats_log interface */
-void census_log_initialize(size_t size_in_mb, int discard_old_records) {
- int32_t ix;
- /* Check cacheline alignment. */
- GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(!g_log.initialized);
- g_log.discard_old_records = discard_old_records;
- g_log.num_cores = gpr_cpu_num_cores();
- /* Ensure at least as many blocks as there are cores. */
- g_log.num_blocks = GPR_MAX(
- g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE);
- gpr_mu_init(&g_log.lock);
- g_log.read_iterator_state = 0;
- g_log.block_being_read = NULL;
- gpr_atm_rel_store(&g_log.is_full, 0);
- g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
- g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.core_local_blocks, 0,
- g_log.num_cores * sizeof(cl_core_local_block));
- g_log.blocks = (cl_block *)gpr_malloc_aligned(
- g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
- g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- cl_block_list_initialize(&g_log.free_block_list);
- cl_block_list_initialize(&g_log.dirty_block_list);
- for (ix = 0; ix < g_log.num_blocks; ++ix) {
- cl_block *block = g_log.blocks + ix;
- cl_block_initialize(block,
- g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
- cl_block_try_disable_access(block, 1 /* discard data */);
- cl_block_list_insert_at_tail(&g_log.free_block_list, block);
- }
- gpr_atm_rel_store(&g_log.out_of_space_count, 0);
- g_log.initialized = 1;
-}
-
-void census_log_shutdown(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_destroy(&g_log.lock);
- gpr_free_aligned(g_log.core_local_blocks);
- g_log.core_local_blocks = NULL;
- gpr_free_aligned(g_log.blocks);
- g_log.blocks = NULL;
- gpr_free(g_log.buffer);
- g_log.buffer = NULL;
- g_log.initialized = 0;
-}
-
-void *census_log_start_write(size_t size) {
- /* Used to bound number of times block allocation is attempted. */
- int32_t attempts_remaining = g_log.num_blocks;
- /* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
- int32_t core_id = gpr_cpu_current_cpu();
- GPR_ASSERT(g_log.initialized);
- if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
- return NULL;
- }
- do {
- int allocated;
- void *record = NULL;
- cl_block *block =
- cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
- if (block && (record = cl_block_start_write(block, size))) {
- return record;
- }
- /* Need to allocate a new block. We are here if:
- - No block associated with the core OR
- - Write in-progress on the block OR
- - block is out of space */
- if (gpr_atm_acq_load(&g_log.is_full)) {
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
- }
- gpr_mu_lock(&g_log.lock);
- allocated = cl_allocate_core_local_block(core_id, block);
- gpr_mu_unlock(&g_log.lock);
- if (!allocated) {
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
- }
- } while (attempts_remaining--);
- /* Give up. */
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
-}
-
-void census_log_end_write(void *record, size_t bytes_written) {
- GPR_ASSERT(g_log.initialized);
- cl_block_end_write(cl_get_block(record), bytes_written);
-}
-
-void census_log_init_reader(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- /* If a block is locked for reading unlock it. */
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- g_log.block_being_read = NULL;
- }
- g_log.read_iterator_state = g_log.num_cores;
- gpr_mu_unlock(&g_log.lock);
-}
-
-const void *census_log_read_next(size_t *bytes_available) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- }
- do {
- g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
- if (g_log.block_being_read != NULL) {
- void *record =
- cl_block_start_read(g_log.block_being_read, bytes_available);
- if (record != NULL) {
- gpr_mu_unlock(&g_log.lock);
- return record;
- }
- }
- } while (g_log.block_being_read != NULL);
- gpr_mu_unlock(&g_log.lock);
- return NULL;
-}
-
-size_t census_log_remaining_space(void) {
- size_t space;
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- if (g_log.discard_old_records) {
- /* Remaining space is not meaningful; just return the entire log space. */
- space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
- } else {
- space = g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
- }
- gpr_mu_unlock(&g_log.lock);
- return space;
-}
-
-int census_log_out_of_space_count(void) {
- GPR_ASSERT(g_log.initialized);
- return gpr_atm_acq_load(&g_log.out_of_space_count);
-}
diff --git a/src/core/ext/census/census_log.h b/src/core/ext/census/census_log.h
deleted file mode 100644
index 6b68b6bd37..0000000000
--- a/src/core/ext/census/census_log.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H
-
-#include <stddef.h>
-
-/* Maximum record size, in bytes. */
-#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
-#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
-
-/* Initialize the statistics logging subsystem with the given log size. A log
- size of 0 will result in the smallest possible log for the platform
- (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
- discard_old_records is non-zero, then new records will displace older ones
- when the log is full. This function must be called before any other
- census_log functions.
-*/
-void census_log_initialize(size_t size_in_mb, int discard_old_records);
-
-/* Shutdown the logging subsystem. Caller must ensure that:
- - no in progress or future call to any census_log functions
- - no incomplete records
-*/
-void census_log_shutdown(void);
-
-/* Allocates and returns a 'size' bytes record and marks it in use. A
- subsequent census_log_end_write() marks the record complete. The
- 'bytes_written' census_log_end_write() argument must be <=
- 'size'. Returns NULL if out-of-space AND:
- - log is configured to keep old records OR
- - all blocks are pinned by incomplete records.
-*/
-void *census_log_start_write(size_t size);
-
-void census_log_end_write(void *record, size_t bytes_written);
-
-/* census_log_read_next() iterates over blocks with data and for each block
- returns a pointer to the first unread byte. The number of bytes that can be
- read are returned in 'bytes_available'. Reader is expected to read all
- available data. Reading the data consumes it i.e. it cannot be read again.
- census_log_read_next() returns NULL if the end is reached i.e last block
- is read. census_log_init_reader() starts the iteration or aborts the
- current iteration.
-*/
-void census_log_init_reader(void);
-const void *census_log_read_next(size_t *bytes_available);
-
-/* Returns estimated remaining space across all blocks, in bytes. If log is
- configured to discard old records, returns total log space. Otherwise,
- returns space available in empty blocks (partially filled blocks are
- treated as full).
-*/
-size_t census_log_remaining_space(void);
-
-/* Returns the number of times grpc_stats_log_start_write() failed due to
- out-of-space. */
-int census_log_out_of_space_count(void);
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_LOG_H */
diff --git a/src/core/ext/census/census_rpc_stats.c b/src/core/ext/census/census_rpc_stats.c
deleted file mode 100644
index 0aca1f109e..0000000000
--- a/src/core/ext/census/census_rpc_stats.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include "src/core/ext/census/census_interface.h"
-#include "src/core/ext/census/census_rpc_stats.h"
-#include "src/core/ext/census/census_tracing.h"
-#include "src/core/ext/census/hash_table.h"
-#include "src/core/ext/census/window_stats.h"
-#include "src/core/lib/support/murmur_hash.h"
-#include "src/core/lib/support/string.h"
-
-#define NUM_INTERVALS 3
-#define MINUTE_INTERVAL 0
-#define HOUR_INTERVAL 1
-#define TOTAL_INTERVAL 2
-
-/* for easier typing */
-typedef census_per_method_rpc_stats per_method_stats;
-
-/* Ensure mu is only initialized once. */
-static gpr_once g_stats_store_mu_init = GPR_ONCE_INIT;
-/* Guards two stats stores. */
-static gpr_mu g_mu;
-static census_ht *g_client_stats_store = NULL;
-static census_ht *g_server_stats_store = NULL;
-
-static void init_mutex(void) { gpr_mu_init(&g_mu); }
-
-static void init_mutex_once(void) {
- gpr_once_init(&g_stats_store_mu_init, init_mutex);
-}
-
-static int cmp_str_keys(const void *k1, const void *k2) {
- return strcmp((const char *)k1, (const char *)k2);
-}
-
-/* TODO(hongyu): replace it with cityhash64 */
-static uint64_t simple_hash(const void *k) {
- size_t len = strlen(k);
- uint64_t higher = gpr_murmur_hash3((const char *)k, len / 2, 0);
- return higher << 32 |
- gpr_murmur_hash3((const char *)k + len / 2, len - len / 2, 0);
-}
-
-static void delete_stats(void *stats) {
- census_window_stats_destroy((struct census_window_stats *)stats);
-}
-
-static void delete_key(void *key) { gpr_free(key); }
-
-static const census_ht_option ht_opt = {
- CENSUS_HT_POINTER /* key type */, 1999 /* n_of_buckets */,
- simple_hash /* hash function */, cmp_str_keys /* key comparator */,
- delete_stats /* data deleter */, delete_key /* key deleter */
-};
-
-static void init_rpc_stats(void *stats) {
- memset(stats, 0, sizeof(census_rpc_stats));
-}
-
-static void stat_add_proportion(double p, void *base, const void *addme) {
- census_rpc_stats *b = (census_rpc_stats *)base;
- census_rpc_stats *a = (census_rpc_stats *)addme;
- b->cnt += p * a->cnt;
- b->rpc_error_cnt += p * a->rpc_error_cnt;
- b->app_error_cnt += p * a->app_error_cnt;
- b->elapsed_time_ms += p * a->elapsed_time_ms;
- b->api_request_bytes += p * a->api_request_bytes;
- b->wire_request_bytes += p * a->wire_request_bytes;
- b->api_response_bytes += p * a->api_response_bytes;
- b->wire_response_bytes += p * a->wire_response_bytes;
-}
-
-static void stat_add(void *base, const void *addme) {
- stat_add_proportion(1.0, base, addme);
-}
-
-static gpr_timespec min_hour_total_intervals[3] = {
- {60, 0}, {3600, 0}, {36000000, 0}};
-
-static const census_window_stats_stat_info window_stats_settings = {
- sizeof(census_rpc_stats), init_rpc_stats, stat_add, stat_add_proportion};
-
-census_rpc_stats *census_rpc_stats_create_empty(void) {
- census_rpc_stats *ret =
- (census_rpc_stats *)gpr_malloc(sizeof(census_rpc_stats));
- memset(ret, 0, sizeof(census_rpc_stats));
- return ret;
-}
-
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data) {
- int i = 0;
- for (i = 0; i < data->num_entries; i++) {
- if (data->stats[i].method != NULL) {
- gpr_free((void *)data->stats[i].method);
- }
- }
- if (data->stats != NULL) {
- gpr_free(data->stats);
- }
- data->num_entries = 0;
- data->stats = NULL;
-}
-
-static void record_stats(census_ht *store, census_op_id op_id,
- const census_rpc_stats *stats) {
- gpr_mu_lock(&g_mu);
- if (store != NULL) {
- census_trace_obj *trace = NULL;
- census_internal_lock_trace_store();
- trace = census_get_trace_obj_locked(op_id);
- if (trace != NULL) {
- const char *method_name = census_get_trace_method_name(trace);
- struct census_window_stats *window_stats = NULL;
- census_ht_key key;
- key.ptr = (void *)method_name;
- window_stats = census_ht_find(store, key);
- census_internal_unlock_trace_store();
- if (window_stats == NULL) {
- window_stats = census_window_stats_create(3, min_hour_total_intervals,
- 30, &window_stats_settings);
- key.ptr = gpr_strdup(key.ptr);
- census_ht_insert(store, key, (void *)window_stats);
- }
- census_window_stats_add(window_stats, gpr_now(GPR_CLOCK_REALTIME), stats);
- } else {
- census_internal_unlock_trace_store();
- }
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats *stats) {
- record_stats(g_client_stats_store, op_id, stats);
-}
-
-void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats *stats) {
- record_stats(g_server_stats_store, op_id, stats);
-}
-
-/* Get stats from input stats store */
-static void get_stats(census_ht *store, census_aggregated_rpc_stats *data) {
- GPR_ASSERT(data != NULL);
- if (data->num_entries != 0) {
- census_aggregated_rpc_stats_set_empty(data);
- }
- gpr_mu_lock(&g_mu);
- if (store != NULL) {
- size_t n;
- unsigned i, j;
- gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
- census_ht_kv *kv = census_ht_get_all_elements(store, &n);
- if (kv != NULL) {
- data->num_entries = n;
- data->stats =
- (per_method_stats *)gpr_malloc(sizeof(per_method_stats) * n);
- for (i = 0; i < n; i++) {
- census_window_stats_sums sums[NUM_INTERVALS];
- for (j = 0; j < NUM_INTERVALS; j++) {
- sums[j].statistic = (void *)census_rpc_stats_create_empty();
- }
- data->stats[i].method = gpr_strdup(kv[i].k.ptr);
- census_window_stats_get_sums(kv[i].v, now, sums);
- data->stats[i].minute_stats =
- *(census_rpc_stats *)sums[MINUTE_INTERVAL].statistic;
- data->stats[i].hour_stats =
- *(census_rpc_stats *)sums[HOUR_INTERVAL].statistic;
- data->stats[i].total_stats =
- *(census_rpc_stats *)sums[TOTAL_INTERVAL].statistic;
- for (j = 0; j < NUM_INTERVALS; j++) {
- gpr_free(sums[j].statistic);
- }
- }
- gpr_free(kv);
- }
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_get_client_stats(census_aggregated_rpc_stats *data) {
- get_stats(g_client_stats_store, data);
-}
-
-void census_get_server_stats(census_aggregated_rpc_stats *data) {
- get_stats(g_server_stats_store, data);
-}
-
-void census_stats_store_init(void) {
- init_mutex_once();
- gpr_mu_lock(&g_mu);
- if (g_client_stats_store == NULL && g_server_stats_store == NULL) {
- g_client_stats_store = census_ht_create(&ht_opt);
- g_server_stats_store = census_ht_create(&ht_opt);
- } else {
- gpr_log(GPR_ERROR, "Census stats store already initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_stats_store_shutdown(void) {
- init_mutex_once();
- gpr_mu_lock(&g_mu);
- if (g_client_stats_store != NULL) {
- census_ht_destroy(g_client_stats_store);
- g_client_stats_store = NULL;
- } else {
- gpr_log(GPR_ERROR, "Census server stats store not initialized.");
- }
- if (g_server_stats_store != NULL) {
- census_ht_destroy(g_server_stats_store);
- g_server_stats_store = NULL;
- } else {
- gpr_log(GPR_ERROR, "Census client stats store not initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
diff --git a/src/core/ext/census/census_rpc_stats.h b/src/core/ext/census/census_rpc_stats.h
deleted file mode 100644
index 8004ade37d..0000000000
--- a/src/core/ext/census/census_rpc_stats.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H
-
-#include <grpc/support/port_platform.h>
-#include "src/core/ext/census/census_interface.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct census_rpc_stats {
- uint64_t cnt;
- uint64_t rpc_error_cnt;
- uint64_t app_error_cnt;
- double elapsed_time_ms;
- double api_request_bytes;
- double wire_request_bytes;
- double api_response_bytes;
- double wire_response_bytes;
-};
-
-/* Creates an empty rpc stats object on heap. */
-census_rpc_stats *census_rpc_stats_create_empty(void);
-
-typedef struct census_per_method_rpc_stats {
- const char *method;
- census_rpc_stats minute_stats; /* cumulative stats in the past minute */
- census_rpc_stats hour_stats; /* cumulative stats in the past hour */
- census_rpc_stats total_stats; /* cumulative stats from last gc */
-} census_per_method_rpc_stats;
-
-typedef struct census_aggregated_rpc_stats {
- int num_entries;
- census_per_method_rpc_stats *stats;
-} census_aggregated_rpc_stats;
-
-/* Initializes an aggregated rpc stats object to an empty state. */
-void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data);
-
-/* Records client side stats of a rpc. */
-void census_record_rpc_client_stats(census_op_id op_id,
- const census_rpc_stats *stats);
-
-/* Records server side stats of a rpc. */
-void census_record_rpc_server_stats(census_op_id op_id,
- const census_rpc_stats *stats);
-
-/* The following two functions are intended for inprocess query of
- per-service per-method stats from grpc implementations. */
-
-/* Populates *data_map with server side aggregated per-service per-method
- stats.
- DO NOT CALL from outside of grpc code. */
-void census_get_server_stats(census_aggregated_rpc_stats *data_map);
-
-/* Populates *data_map with client side aggregated per-service per-method
- stats.
- DO NOT CALL from outside of grpc code. */
-void census_get_client_stats(census_aggregated_rpc_stats *data_map);
-
-void census_stats_store_init(void);
-void census_stats_store_shutdown(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H */
diff --git a/src/core/ext/census/census_tracing.c b/src/core/ext/census/census_tracing.c
deleted file mode 100644
index 199b260dd1..0000000000
--- a/src/core/ext/census/census_tracing.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/census_tracing.h"
-#include "src/core/ext/census/census_interface.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-#include <grpc/support/sync.h>
-#include "src/core/ext/census/hash_table.h"
-#include "src/core/lib/support/string.h"
-
-void census_trace_obj_destroy(census_trace_obj *obj) {
- census_trace_annotation *p = obj->annotations;
- while (p != NULL) {
- census_trace_annotation *next = p->next;
- gpr_free(p);
- p = next;
- }
- gpr_free(obj->method);
- gpr_free(obj);
-}
-
-static void delete_trace_obj(void *obj) {
- census_trace_obj_destroy((census_trace_obj *)obj);
-}
-
-static const census_ht_option ht_opt = {
- CENSUS_HT_UINT64 /* key type */,
- 571 /* n_of_buckets */,
- NULL /* hash */,
- NULL /* compare_keys */,
- delete_trace_obj /* delete data */,
- NULL /* delete key */
-};
-
-static gpr_once g_init_mutex_once = GPR_ONCE_INIT;
-static gpr_mu g_mu; /* Guards following two static variables. */
-static census_ht *g_trace_store = NULL;
-static uint64_t g_id = 0;
-
-static census_ht_key op_id_as_key(census_op_id *id) {
- return *(census_ht_key *)id;
-}
-
-static uint64_t op_id_2_uint64(census_op_id *id) {
- uint64_t ret;
- memcpy(&ret, id, sizeof(census_op_id));
- return ret;
-}
-
-static void init_mutex(void) { gpr_mu_init(&g_mu); }
-
-static void init_mutex_once(void) {
- gpr_once_init(&g_init_mutex_once, init_mutex);
-}
-
-census_op_id census_tracing_start_op(void) {
- gpr_mu_lock(&g_mu);
- {
- census_trace_obj *ret = gpr_malloc(sizeof(census_trace_obj));
- memset(ret, 0, sizeof(census_trace_obj));
- g_id++;
- memcpy(&ret->id, &g_id, sizeof(census_op_id));
- ret->rpc_stats.cnt = 1;
- ret->ts = gpr_now(GPR_CLOCK_REALTIME);
- census_ht_insert(g_trace_store, op_id_as_key(&ret->id), (void *)ret);
- gpr_log(GPR_DEBUG, "Start tracing for id %lu", g_id);
- gpr_mu_unlock(&g_mu);
- return ret->id;
- }
-}
-
-int census_add_method_tag(census_op_id op_id, const char *method) {
- int ret = 0;
- census_trace_obj *trace = NULL;
- gpr_mu_lock(&g_mu);
- trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
- if (trace == NULL) {
- ret = 1;
- } else {
- trace->method = gpr_strdup(method);
- }
- gpr_mu_unlock(&g_mu);
- return ret;
-}
-
-void census_tracing_print(census_op_id op_id, const char *anno_txt) {
- census_trace_obj *trace = NULL;
- gpr_mu_lock(&g_mu);
- trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
- if (trace != NULL) {
- census_trace_annotation *anno = gpr_malloc(sizeof(census_trace_annotation));
- anno->ts = gpr_now(GPR_CLOCK_REALTIME);
- {
- char *d = anno->txt;
- const char *s = anno_txt;
- int n = 0;
- for (; n < CENSUS_MAX_ANNOTATION_LENGTH && *s != '\0'; ++n) {
- *d++ = *s++;
- }
- *d = '\0';
- }
- anno->next = trace->annotations;
- trace->annotations = anno;
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_tracing_end_op(census_op_id op_id) {
- census_trace_obj *trace = NULL;
- gpr_mu_lock(&g_mu);
- trace = census_ht_find(g_trace_store, op_id_as_key(&op_id));
- if (trace != NULL) {
- trace->rpc_stats.elapsed_time_ms = gpr_timespec_to_micros(
- gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), trace->ts));
- gpr_log(GPR_DEBUG, "End tracing for id %lu, method %s, latency %f us",
- op_id_2_uint64(&op_id), trace->method,
- trace->rpc_stats.elapsed_time_ms);
- census_ht_erase(g_trace_store, op_id_as_key(&op_id));
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_tracing_init(void) {
- init_mutex_once();
- gpr_mu_lock(&g_mu);
- if (g_trace_store == NULL) {
- g_id = 1;
- g_trace_store = census_ht_create(&ht_opt);
- } else {
- gpr_log(GPR_ERROR, "Census trace store already initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_tracing_shutdown(void) {
- gpr_mu_lock(&g_mu);
- if (g_trace_store != NULL) {
- census_ht_destroy(g_trace_store);
- g_trace_store = NULL;
- } else {
- gpr_log(GPR_ERROR, "Census trace store is not initialized.");
- }
- gpr_mu_unlock(&g_mu);
-}
-
-void census_internal_lock_trace_store(void) { gpr_mu_lock(&g_mu); }
-
-void census_internal_unlock_trace_store(void) { gpr_mu_unlock(&g_mu); }
-
-census_trace_obj *census_get_trace_obj_locked(census_op_id op_id) {
- if (g_trace_store == NULL) {
- gpr_log(GPR_ERROR, "Census trace store is not initialized.");
- return NULL;
- }
- return (census_trace_obj *)census_ht_find(g_trace_store,
- op_id_as_key(&op_id));
-}
-
-const char *census_get_trace_method_name(const census_trace_obj *trace) {
- return trace->method;
-}
-
-static census_trace_annotation *dup_annotation_chain(
- census_trace_annotation *from) {
- census_trace_annotation *ret = NULL;
- census_trace_annotation **to = &ret;
- for (; from != NULL; from = from->next) {
- *to = gpr_malloc(sizeof(census_trace_annotation));
- memcpy(*to, from, sizeof(census_trace_annotation));
- to = &(*to)->next;
- }
- return ret;
-}
-
-static census_trace_obj *trace_obj_dup(census_trace_obj *from) {
- census_trace_obj *to = NULL;
- GPR_ASSERT(from != NULL);
- to = gpr_malloc(sizeof(census_trace_obj));
- to->id = from->id;
- to->ts = from->ts;
- to->rpc_stats = from->rpc_stats;
- to->method = gpr_strdup(from->method);
- to->annotations = dup_annotation_chain(from->annotations);
- return to;
-}
-
-census_trace_obj **census_get_active_ops(int *num_active_ops) {
- census_trace_obj **ret = NULL;
- gpr_mu_lock(&g_mu);
- if (g_trace_store != NULL) {
- size_t n = 0;
- census_ht_kv *all_kvs = census_ht_get_all_elements(g_trace_store, &n);
- *num_active_ops = (int)n;
- if (n != 0) {
- size_t i = 0;
- ret = gpr_malloc(sizeof(census_trace_obj *) * n);
- for (i = 0; i < n; i++) {
- ret[i] = trace_obj_dup((census_trace_obj *)all_kvs[i].v);
- }
- }
- gpr_free(all_kvs);
- }
- gpr_mu_unlock(&g_mu);
- return ret;
-}
diff --git a/src/core/ext/census/census_tracing.h b/src/core/ext/census/census_tracing.h
deleted file mode 100644
index ccb767fcd0..0000000000
--- a/src/core/ext/census/census_tracing.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H
-#define GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H
-
-#include <grpc/support/time.h>
-#include "src/core/ext/census/census_rpc_stats.h"
-
-/* WARNING: The data structures and APIs provided by this file are for GRPC
- library's internal use ONLY. They might be changed in backward-incompatible
- ways and are not subject to any deprecation policy.
- They are not recommended for external use.
- */
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Struct for a trace annotation. */
-typedef struct census_trace_annotation {
- gpr_timespec ts; /* timestamp of the annotation */
- char txt[CENSUS_MAX_ANNOTATION_LENGTH + 1]; /* actual txt annotation */
- struct census_trace_annotation *next;
-} census_trace_annotation;
-
-typedef struct census_trace_obj {
- census_op_id id;
- gpr_timespec ts;
- census_rpc_stats rpc_stats;
- char *method;
- census_trace_annotation *annotations;
-} census_trace_obj;
-
-/* Deletes trace object. */
-void census_trace_obj_destroy(census_trace_obj *obj);
-
-/* Initializes trace store. This function is thread safe. */
-void census_tracing_init(void);
-
-/* Shutsdown trace store. This function is thread safe. */
-void census_tracing_shutdown(void);
-
-/* Gets trace obj corresponding to the input op_id. Returns NULL if trace store
- is not initialized or trace obj is not found. Requires trace store being
- locked before calling this function. */
-census_trace_obj *census_get_trace_obj_locked(census_op_id op_id);
-
-/* The following two functions acquire and release the trace store global lock.
- They are for census internal use only. */
-void census_internal_lock_trace_store(void);
-void census_internal_unlock_trace_store(void);
-
-/* Gets method name associated with the input trace object. */
-const char *census_get_trace_method_name(const census_trace_obj *trace);
-
-/* Returns an array of pointers to trace objects of currently active operations
- and fills in number of active operations. Returns NULL if there are no active
- operations.
- Caller owns the returned objects. */
-census_trace_obj **census_get_active_ops(int *num_active_ops);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_TRACING_H */
diff --git a/src/core/ext/census/context.c b/src/core/ext/census/context.c
deleted file mode 100644
index 1019b287d7..0000000000
--- a/src/core/ext/census/context.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-#include <grpc/support/useful.h>
-#include <stdbool.h>
-#include <string.h>
-#include "src/core/lib/support/string.h"
-
-// Functions in this file support the public context API, including
-// encoding/decoding as part of context propagation across RPC's. The overall
-// requirements (in approximate priority order) for the
-// context representation:
-// 1. Efficient conversion to/from wire format
-// 2. Minimal bytes used on-wire
-// 3. Efficient context creation
-// 4. Efficient lookup of tag value for a key
-// 5. Efficient iteration over tags
-// 6. Minimal memory footprint
-//
-// Notes on tradeoffs/decisions:
-// * tag includes 1 byte length of key, as well as nil-terminating byte. These
-// are to aid in efficient parsing and the ability to directly return key
-// strings. This is more important than saving a single byte/tag on the wire.
-// * The wire encoding uses only single byte values. This eliminates the need
-// to handle endian-ness conversions. It also means there is a hard upper
-// limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS.
-// * Keep all tag information (keys/values/flags) in a single memory buffer,
-// that can be directly copied to the wire.
-
-// min and max valid chars in tag keys and values. All printable ASCII is OK.
-#define MIN_VALID_TAG_CHAR 32 // ' '
-#define MAX_VALID_TAG_CHAR 126 // '~'
-
-// Structure representing a set of tags. Essentially a count of number of tags
-// present, and pointer to a chunk of memory that contains the per-tag details.
-struct tag_set {
- int ntags; // number of tags.
- int ntags_alloc; // ntags + number of deleted tags (total number of tags
- // in all of kvm). This will always be == ntags, except during the process
- // of building a new tag set.
- size_t kvm_size; // number of bytes allocated for key/value storage.
- size_t kvm_used; // number of bytes of used key/value memory
- char *kvm; // key/value memory. Consists of repeated entries of:
- // Offset Size Description
- // 0 1 Key length, including trailing 0. (K)
- // 1 1 Value length, including trailing 0 (V)
- // 2 1 Flags
- // 3 K Key bytes
- // 3 + K V Value bytes
- //
- // We refer to the first 3 entries as the 'tag header'. If extra values are
- // introduced in the header, you will need to modify the TAG_HEADER_SIZE
- // constant, the raw_tag structure (and everything that uses it) and the
- // encode/decode functions appropriately.
-};
-
-// Number of bytes in tag header.
-#define TAG_HEADER_SIZE 3 // key length (1) + value length (1) + flags (1)
-// Offsets to tag header entries.
-#define KEY_LEN_OFFSET 0
-#define VALUE_LEN_OFFSET 1
-#define FLAG_OFFSET 2
-
-// raw_tag represents the raw-storage form of a tag in the kvm of a tag_set.
-struct raw_tag {
- uint8_t key_len;
- uint8_t value_len;
- uint8_t flags;
- char *key;
- char *value;
-};
-
-// Use a reserved flag bit for indication of deleted tag.
-#define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED
-#define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED)
-
-// Primary representation of a context. Composed of 2 underlying tag_set
-// structs, one each for propagated and local (non-propagated) tags. This is
-// to efficiently support tag encoding/decoding.
-// TODO(aveitch): need to add tracing id's/structure.
-struct census_context {
- struct tag_set tags[2];
- census_context_status status;
-};
-
-// Indices into the tags member of census_context
-#define PROPAGATED_TAGS 0
-#define LOCAL_TAGS 1
-
-// Validate (check all characters are in range and size is less than limit) a
-// key or value string. Returns 0 if the string is invalid, or the length
-// (including terminator) if valid.
-static size_t validate_tag(const char *kv) {
- size_t len = 1;
- char ch;
- while ((ch = *kv++) != 0) {
- if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) {
- return 0;
- }
- len++;
- }
- if (len > CENSUS_MAX_TAG_KV_LEN) {
- return 0;
- }
- return len;
-}
-
-// Extract a raw tag given a pointer (raw) to the tag header. Allow for some
-// extra bytes in the tag header (see encode/decode functions for usage: this
-// allows for future expansion of the tag header).
-static char *decode_tag(struct raw_tag *tag, char *header, int offset) {
- tag->key_len = (uint8_t)(*header++);
- tag->value_len = (uint8_t)(*header++);
- tag->flags = (uint8_t)(*header++);
- header += offset;
- tag->key = header;
- header += tag->key_len;
- tag->value = header;
- return header + tag->value_len;
-}
-
-// Make a copy (in 'to') of an existing tag_set.
-static void tag_set_copy(struct tag_set *to, const struct tag_set *from) {
- memcpy(to, from, sizeof(struct tag_set));
- to->kvm = gpr_malloc(to->kvm_size);
- memcpy(to->kvm, from->kvm, from->kvm_used);
-}
-
-// Delete a tag from a tag_set, if it exists (returns true if it did).
-static bool tag_set_delete_tag(struct tag_set *tags, const char *key,
- size_t key_len) {
- char *kvp = tags->kvm;
- for (int i = 0; i < tags->ntags_alloc; i++) {
- uint8_t *flags = (uint8_t *)(kvp + FLAG_OFFSET);
- struct raw_tag tag;
- kvp = decode_tag(&tag, kvp, 0);
- if (CENSUS_TAG_IS_DELETED(tag.flags)) continue;
- if ((key_len == tag.key_len) && (memcmp(key, tag.key, key_len) == 0)) {
- *flags |= CENSUS_TAG_DELETED;
- tags->ntags--;
- return true;
- }
- }
- return false;
-}
-
-// Delete a tag from a context, return true if it existed.
-static bool context_delete_tag(census_context *context, const census_tag *tag,
- size_t key_len) {
- return (
- tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) ||
- tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len));
-}
-
-// Add a tag to a tag_set. Return true on success, false if the tag could
-// not be added because of constraints on tag set size. This function should
-// not be called if the tag may already exist (in a non-deleted state) in
-// the tag_set, as that would result in two tags with the same key.
-static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
- size_t key_len, size_t value_len) {
- if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) {
- return false;
- }
- const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE;
- if (tags->kvm_used + tag_size > tags->kvm_size) {
- // allocate new memory if needed
- tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
- char *new_kvm = gpr_malloc(tags->kvm_size);
- if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used);
- gpr_free(tags->kvm);
- tags->kvm = new_kvm;
- }
- char *kvp = tags->kvm + tags->kvm_used;
- *kvp++ = (char)key_len;
- *kvp++ = (char)value_len;
- // ensure reserved flags are not used.
- *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS));
- memcpy(kvp, tag->key, key_len);
- kvp += key_len;
- memcpy(kvp, tag->value, value_len);
- tags->kvm_used += tag_size;
- tags->ntags++;
- tags->ntags_alloc++;
- return true;
-}
-
-// Add/modify/delete a tag to/in a context. Caller must validate that tag key
-// etc. are valid.
-static void context_modify_tag(census_context *context, const census_tag *tag,
- size_t key_len, size_t value_len) {
- // First delete the tag if it is already present.
- bool deleted = context_delete_tag(context, tag, key_len);
- bool added = false;
- if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
- added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len,
- value_len);
- } else {
- added =
- tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len);
- }
-
- if (deleted) {
- context->status.n_modified_tags++;
- } else {
- if (added) {
- context->status.n_added_tags++;
- } else {
- context->status.n_ignored_tags++;
- }
- }
-}
-
-// Remove memory used for deleted tags from a tag set. Basic algorithm:
-// 1) Walk through tag set to find first deleted tag. Record where it is.
-// 2) Find the next not-deleted tag. Copy all of kvm from there to the end
-// "over" the deleted tags
-// 3) repeat #1 and #2 until we have seen all tags
-// 4) if we are still looking for a not-deleted tag, then all the end portion
-// of the kvm is deleted. Just reduce the used amount of memory by the
-// appropriate amount.
-static void tag_set_flatten(struct tag_set *tags) {
- if (tags->ntags == tags->ntags_alloc) return;
- bool found_deleted = false; // found a deleted tag.
- char *kvp = tags->kvm;
- char *dbase = NULL; // record location of deleted tag
- for (int i = 0; i < tags->ntags_alloc; i++) {
- struct raw_tag tag;
- char *next_kvp = decode_tag(&tag, kvp, 0);
- if (found_deleted) {
- if (!CENSUS_TAG_IS_DELETED(tag.flags)) {
- ptrdiff_t reduce = kvp - dbase; // #bytes in deleted tags
- GPR_ASSERT(reduce > 0);
- ptrdiff_t copy_size = tags->kvm + tags->kvm_used - kvp;
- GPR_ASSERT(copy_size > 0);
- memmove(dbase, kvp, (size_t)copy_size);
- tags->kvm_used -= (size_t)reduce;
- next_kvp -= reduce;
- found_deleted = false;
- }
- } else {
- if (CENSUS_TAG_IS_DELETED(tag.flags)) {
- dbase = kvp;
- found_deleted = true;
- }
- }
- kvp = next_kvp;
- }
- if (found_deleted) {
- GPR_ASSERT(dbase > tags->kvm);
- tags->kvm_used = (size_t)(dbase - tags->kvm);
- }
- tags->ntags_alloc = tags->ntags;
-}
-
-census_context *census_context_create(const census_context *base,
- const census_tag *tags, int ntags,
- census_context_status const **status) {
- census_context *context = gpr_malloc(sizeof(census_context));
- // If we are given a base, copy it into our new tag set. Otherwise set it
- // to zero/NULL everything.
- if (base == NULL) {
- memset(context, 0, sizeof(census_context));
- } else {
- tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]);
- tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]);
- memset(&context->status, 0, sizeof(context->status));
- }
- // Walk over the additional tags and, for those that aren't invalid, modify
- // the context to add/replace/delete as required.
- for (int i = 0; i < ntags; i++) {
- const census_tag *tag = &tags[i];
- size_t key_len = validate_tag(tag->key);
- // ignore the tag if it is invalid or too short.
- if (key_len <= 1) {
- context->status.n_invalid_tags++;
- } else {
- if (tag->value != NULL) {
- size_t value_len = validate_tag(tag->value);
- if (value_len != 0) {
- context_modify_tag(context, tag, key_len, value_len);
- } else {
- context->status.n_invalid_tags++;
- }
- } else {
- if (context_delete_tag(context, tag, key_len)) {
- context->status.n_deleted_tags++;
- }
- }
- }
- }
- // Remove any deleted tags, update status if needed, and return.
- tag_set_flatten(&context->tags[PROPAGATED_TAGS]);
- tag_set_flatten(&context->tags[LOCAL_TAGS]);
- context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
- context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags;
- if (status) {
- *status = &context->status;
- }
- return context;
-}
-
-const census_context_status *census_context_get_status(
- const census_context *context) {
- return &context->status;
-}
-
-void census_context_destroy(census_context *context) {
- gpr_free(context->tags[PROPAGATED_TAGS].kvm);
- gpr_free(context->tags[LOCAL_TAGS].kvm);
- gpr_free(context);
-}
-
-void census_context_initialize_iterator(const census_context *context,
- census_context_iterator *iterator) {
- iterator->context = context;
- iterator->index = 0;
- if (context->tags[PROPAGATED_TAGS].ntags != 0) {
- iterator->base = PROPAGATED_TAGS;
- iterator->kvm = context->tags[PROPAGATED_TAGS].kvm;
- } else if (context->tags[LOCAL_TAGS].ntags != 0) {
- iterator->base = LOCAL_TAGS;
- iterator->kvm = context->tags[LOCAL_TAGS].kvm;
- } else {
- iterator->base = -1;
- }
-}
-
-int census_context_next_tag(census_context_iterator *iterator,
- census_tag *tag) {
- if (iterator->base < 0) {
- return 0;
- }
- struct raw_tag raw;
- iterator->kvm = decode_tag(&raw, iterator->kvm, 0);
- tag->key = raw.key;
- tag->value = raw.value;
- tag->flags = raw.flags;
- if (++iterator->index == iterator->context->tags[iterator->base].ntags) {
- do {
- if (iterator->base == LOCAL_TAGS) {
- iterator->base = -1;
- return 1;
- }
- } while (iterator->context->tags[++iterator->base].ntags == 0);
- iterator->index = 0;
- iterator->kvm = iterator->context->tags[iterator->base].kvm;
- }
- return 1;
-}
-
-// Find a tag in a tag_set by key. Return true if found, false otherwise.
-static bool tag_set_get_tag(const struct tag_set *tags, const char *key,
- size_t key_len, census_tag *tag) {
- char *kvp = tags->kvm;
- for (int i = 0; i < tags->ntags; i++) {
- struct raw_tag raw;
- kvp = decode_tag(&raw, kvp, 0);
- if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) {
- tag->key = raw.key;
- tag->value = raw.value;
- tag->flags = raw.flags;
- return true;
- }
- }
- return false;
-}
-
-int census_context_get_tag(const census_context *context, const char *key,
- census_tag *tag) {
- size_t key_len = strlen(key) + 1;
- if (key_len == 1) {
- return 0;
- }
- if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) ||
- tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) {
- return 1;
- }
- return 0;
-}
-
-// Context encoding and decoding functions.
-//
-// Wire format for tag_set's on the wire:
-//
-// First, a tag set header:
-//
-// offset bytes description
-// 0 1 version number
-// 1 1 number of bytes in this header. This allows for future
-// expansion.
-// 2 1 number of bytes in each tag header.
-// 3 1 ntags value from tag set.
-//
-// This is followed by the key/value memory from struct tag_set.
-
-#define ENCODED_VERSION 0 // Version number
-#define ENCODED_HEADER_SIZE 4 // size of tag set header
-
-// Encode a tag set. Returns 0 if buffer is too small.
-static size_t tag_set_encode(const struct tag_set *tags, char *buffer,
- size_t buf_size) {
- if (buf_size < ENCODED_HEADER_SIZE + tags->kvm_used) {
- return 0;
- }
- buf_size -= ENCODED_HEADER_SIZE;
- *buffer++ = (char)ENCODED_VERSION;
- *buffer++ = (char)ENCODED_HEADER_SIZE;
- *buffer++ = (char)TAG_HEADER_SIZE;
- *buffer++ = (char)tags->ntags;
- if (tags->ntags == 0) {
- return ENCODED_HEADER_SIZE;
- }
- memcpy(buffer, tags->kvm, tags->kvm_used);
- return ENCODED_HEADER_SIZE + tags->kvm_used;
-}
-
-size_t census_context_encode(const census_context *context, char *buffer,
- size_t buf_size) {
- return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
-}
-
-// Decode a tag set.
-static void tag_set_decode(struct tag_set *tags, const char *buffer,
- size_t size) {
- uint8_t version = (uint8_t)(*buffer++);
- uint8_t header_size = (uint8_t)(*buffer++);
- uint8_t tag_header_size = (uint8_t)(*buffer++);
- tags->ntags = tags->ntags_alloc = (int)(*buffer++);
- if (tags->ntags == 0) {
- tags->ntags_alloc = 0;
- tags->kvm_size = 0;
- tags->kvm_used = 0;
- tags->kvm = NULL;
- return;
- }
- if (header_size != ENCODED_HEADER_SIZE) {
- GPR_ASSERT(version != ENCODED_VERSION);
- GPR_ASSERT(ENCODED_HEADER_SIZE < header_size);
- buffer += (header_size - ENCODED_HEADER_SIZE);
- }
- tags->kvm_used = size - header_size;
- tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN;
- tags->kvm = gpr_malloc(tags->kvm_size);
- if (tag_header_size != TAG_HEADER_SIZE) {
- // something new in the tag information. I don't understand it, so
- // don't copy it over.
- GPR_ASSERT(version != ENCODED_VERSION);
- GPR_ASSERT(tag_header_size > TAG_HEADER_SIZE);
- char *kvp = tags->kvm;
- for (int i = 0; i < tags->ntags; i++) {
- memcpy(kvp, buffer, TAG_HEADER_SIZE);
- kvp += header_size;
- struct raw_tag raw;
- buffer =
- decode_tag(&raw, (char *)buffer, tag_header_size - TAG_HEADER_SIZE);
- memcpy(kvp, raw.key, (size_t)raw.key_len + raw.value_len);
- kvp += raw.key_len + raw.value_len;
- }
- } else {
- memcpy(tags->kvm, buffer, tags->kvm_used);
- }
-}
-
-census_context *census_context_decode(const char *buffer, size_t size) {
- census_context *context = gpr_malloc(sizeof(census_context));
- memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
- if (buffer == NULL) {
- memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set));
- } else {
- tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size);
- }
- memset(&context->status, 0, sizeof(context->status));
- context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
- return context;
-}
diff --git a/src/core/ext/census/gen/README.md b/src/core/ext/census/gen/README.md
deleted file mode 100644
index d4612bc7c8..0000000000
--- a/src/core/ext/census/gen/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-Files generated for use by Census stats and trace recording subsystem.
-
-# Files
-* census.pb.{h,c} - Generated from src/core/ext/census/census.proto, using the
- script `tools/codegen/core/gen_nano_proto.sh src/proto/census/census.proto
- $PWD/src/core/ext/census/gen src/core/ext/census/gen`
-* trace_context.pb.{h,c} - Generated from
- src/core/ext/census/trace_context.proto, using the script
- `tools/codegen/core/gen_nano_proto.sh src/proto/census/trace_context.proto
- $PWD/src/core/ext/census/gen src/core/ext/census/gen`
diff --git a/src/core/ext/census/gen/census.pb.c b/src/core/ext/census/gen/census.pb.c
deleted file mode 100644
index 88efa73661..0000000000
--- a/src/core/ext/census/gen/census.pb.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.3.5-dev */
-
-#include "src/core/ext/census/gen/census.pb.h"
-
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-
-
-const pb_field_t google_census_Duration_fields[3] = {
- PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Duration, seconds, seconds, 0),
- PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Duration, nanos, seconds, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Timestamp_fields[3] = {
- PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Timestamp, seconds, seconds, 0),
- PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Timestamp, nanos, seconds, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Resource_fields[4] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Resource, name, name, 0),
- PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Resource, description, name, 0),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Resource, unit, description, &google_census_Resource_MeasurementUnit_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Resource_MeasurementUnit_fields[4] = {
- PB_FIELD( 1, INT32 , OPTIONAL, STATIC , FIRST, google_census_Resource_MeasurementUnit, prefix, prefix, 0),
- PB_FIELD( 2, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, numerator, prefix, 0),
- PB_FIELD( 3, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, denominator, numerator, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_AggregationDescriptor_fields[4] = {
- PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, google_census_AggregationDescriptor, type, type, 0),
- PB_ONEOF_FIELD(options, 2, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, bucket_boundaries, type, &google_census_AggregationDescriptor_BucketBoundaries_fields),
- PB_ONEOF_FIELD(options, 3, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, interval_boundaries, type, &google_census_AggregationDescriptor_IntervalBoundaries_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2] = {
- PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_BucketBoundaries, bounds, bounds, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2] = {
- PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_IntervalBoundaries, window_size, window_size, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Distribution_fields[5] = {
- PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Distribution, count, count, 0),
- PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution, mean, count, 0),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Distribution, range, mean, &google_census_Distribution_Range_fields),
- PB_FIELD( 4, INT64 , REPEATED, CALLBACK, OTHER, google_census_Distribution, bucket_count, range, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Distribution_Range_fields[3] = {
- PB_FIELD( 1, DOUBLE , OPTIONAL, STATIC , FIRST, google_census_Distribution_Range, min, min, 0),
- PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution_Range, max, min, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_IntervalStats_fields[2] = {
- PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, google_census_IntervalStats, window, window, &google_census_IntervalStats_Window_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_IntervalStats_Window_fields[4] = {
- PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_census_IntervalStats_Window, window_size, window_size, &google_census_Duration_fields),
- PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, count, window_size, 0),
- PB_FIELD( 3, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, mean, count, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Tag_fields[3] = {
- PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, google_census_Tag, key, key, 0),
- PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, google_census_Tag, value, key, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_View_fields[6] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_View, name, name, 0),
- PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, description, name, 0),
- PB_FIELD( 3, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, resource_name, description, 0),
- PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_View, aggregation, resource_name, &google_census_AggregationDescriptor_fields),
- PB_FIELD( 5, STRING , REPEATED, CALLBACK, OTHER, google_census_View, tag_key, aggregation, 0),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Aggregation_fields[7] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Aggregation, name, name, 0),
- PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Aggregation, description, name, 0),
- PB_ONEOF_FIELD(data, 3, UINT64 , ONEOF, STATIC , OTHER, google_census_Aggregation, count, description, 0),
- PB_ONEOF_FIELD(data, 4, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, distribution, description, &google_census_Distribution_fields),
- PB_ONEOF_FIELD(data, 5, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, interval_stats, description, &google_census_IntervalStats_fields),
- PB_FIELD( 6, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Aggregation, tag, data.interval_stats, &google_census_Tag_fields),
- PB_LAST_FIELD
-};
-
-const pb_field_t google_census_Metric_fields[5] = {
- PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Metric, view_name, view_name, 0),
- PB_FIELD( 2, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Metric, aggregation, view_name, &google_census_Aggregation_fields),
- PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, start, aggregation, &google_census_Timestamp_fields),
- PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, end, start, &google_census_Timestamp_fields),
- PB_LAST_FIELD
-};
-
-
-/* Check that field information fits in pb_field_t */
-#if !defined(PB_FIELD_32BIT)
-/* If you get an error here, it means that you need to define PB_FIELD_32BIT
- * compile-time option. You can do that in pb.h or on compiler command line.
- *
- * The reason you need to do this is that some of your messages contain tag
- * numbers or field sizes that are larger than what can fit in 8 or 16 bit
- * field descriptors.
- */
-PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Aggregation, tag) < 65536 && pb_membersize(google_census_Metric, aggregation) < 65536 && pb_membersize(google_census_Metric, start) < 65536 && pb_membersize(google_census_Metric, end) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric)
-#endif
-
-#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
-/* If you get an error here, it means that you need to define PB_FIELD_16BIT
- * compile-time option. You can do that in pb.h or on compiler command line.
- *
- * The reason you need to do this is that some of your messages contain tag
- * numbers or field sizes that are larger than what can fit in the default
- * 8 bit descriptors.
- */
-PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Aggregation, tag) < 256 && pb_membersize(google_census_Metric, aggregation) < 256 && pb_membersize(google_census_Metric, start) < 256 && pb_membersize(google_census_Metric, end) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric)
-#endif
-
-
-/* On some platforms (such as AVR), double is really float.
- * These are not directly supported by nanopb, but see example_avr_double.
- * To get rid of this error, remove any double fields from your .proto.
- */
-PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)
-
diff --git a/src/core/ext/census/gen/census.pb.h b/src/core/ext/census/gen/census.pb.h
deleted file mode 100644
index 5f28335664..0000000000
--- a/src/core/ext/census/gen/census.pb.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb header */
-/* Generated by nanopb-0.3.5-dev */
-
-#ifndef GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
-#define GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H
-#include "third_party/nanopb/pb.h"
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Enum definitions */
-typedef enum _google_census_Resource_BasicUnit {
- google_census_Resource_BasicUnit_UNKNOWN = 0,
- google_census_Resource_BasicUnit_BITS = 1,
- google_census_Resource_BasicUnit_BYTES = 2,
- google_census_Resource_BasicUnit_SECS = 3,
- google_census_Resource_BasicUnit_CORES = 4,
- google_census_Resource_BasicUnit_MAX_UNITS = 5
-} google_census_Resource_BasicUnit;
-
-typedef enum _google_census_AggregationDescriptor_AggregationType {
- google_census_AggregationDescriptor_AggregationType_UNKNOWN = 0,
- google_census_AggregationDescriptor_AggregationType_COUNT = 1,
- google_census_AggregationDescriptor_AggregationType_DISTRIBUTION = 2,
- google_census_AggregationDescriptor_AggregationType_INTERVAL = 3
-} google_census_AggregationDescriptor_AggregationType;
-
-/* Struct definitions */
-typedef struct _google_census_AggregationDescriptor_BucketBoundaries {
- pb_callback_t bounds;
-} google_census_AggregationDescriptor_BucketBoundaries;
-
-typedef struct _google_census_AggregationDescriptor_IntervalBoundaries {
- pb_callback_t window_size;
-} google_census_AggregationDescriptor_IntervalBoundaries;
-
-typedef struct _google_census_IntervalStats {
- pb_callback_t window;
-} google_census_IntervalStats;
-
-typedef struct _google_census_AggregationDescriptor {
- bool has_type;
- google_census_AggregationDescriptor_AggregationType type;
- pb_size_t which_options;
- union {
- google_census_AggregationDescriptor_BucketBoundaries bucket_boundaries;
- google_census_AggregationDescriptor_IntervalBoundaries interval_boundaries;
- } options;
-} google_census_AggregationDescriptor;
-
-typedef struct _google_census_Distribution_Range {
- bool has_min;
- double min;
- bool has_max;
- double max;
-} google_census_Distribution_Range;
-
-typedef struct _google_census_Duration {
- bool has_seconds;
- int64_t seconds;
- bool has_nanos;
- int32_t nanos;
-} google_census_Duration;
-
-typedef struct _google_census_Resource_MeasurementUnit {
- bool has_prefix;
- int32_t prefix;
- pb_callback_t numerator;
- pb_callback_t denominator;
-} google_census_Resource_MeasurementUnit;
-
-typedef struct _google_census_Tag {
- bool has_key;
- char key[255];
- bool has_value;
- char value[255];
-} google_census_Tag;
-
-typedef struct _google_census_Timestamp {
- bool has_seconds;
- int64_t seconds;
- bool has_nanos;
- int32_t nanos;
-} google_census_Timestamp;
-
-typedef struct _google_census_Distribution {
- bool has_count;
- int64_t count;
- bool has_mean;
- double mean;
- bool has_range;
- google_census_Distribution_Range range;
- pb_callback_t bucket_count;
-} google_census_Distribution;
-
-typedef struct _google_census_IntervalStats_Window {
- bool has_window_size;
- google_census_Duration window_size;
- bool has_count;
- int64_t count;
- bool has_mean;
- double mean;
-} google_census_IntervalStats_Window;
-
-typedef struct _google_census_Metric {
- pb_callback_t view_name;
- pb_callback_t aggregation;
- bool has_start;
- google_census_Timestamp start;
- bool has_end;
- google_census_Timestamp end;
-} google_census_Metric;
-
-typedef struct _google_census_Resource {
- pb_callback_t name;
- pb_callback_t description;
- bool has_unit;
- google_census_Resource_MeasurementUnit unit;
-} google_census_Resource;
-
-typedef struct _google_census_View {
- pb_callback_t name;
- pb_callback_t description;
- pb_callback_t resource_name;
- bool has_aggregation;
- google_census_AggregationDescriptor aggregation;
- pb_callback_t tag_key;
-} google_census_View;
-
-typedef struct _google_census_Aggregation {
- pb_callback_t name;
- pb_callback_t description;
- pb_size_t which_data;
- union {
- uint64_t count;
- google_census_Distribution distribution;
- google_census_IntervalStats interval_stats;
- } data;
- pb_callback_t tag;
-} google_census_Aggregation;
-
-/* Default values for struct fields */
-
-/* Initializer values for message structs */
-#define google_census_Duration_init_default {false, 0, false, 0}
-#define google_census_Timestamp_init_default {false, 0, false, 0}
-#define google_census_Resource_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_default}
-#define google_census_Resource_MeasurementUnit_init_default {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
-#define google_census_AggregationDescriptor_init_default {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_default}}
-#define google_census_AggregationDescriptor_BucketBoundaries_init_default {{{NULL}, NULL}}
-#define google_census_AggregationDescriptor_IntervalBoundaries_init_default {{{NULL}, NULL}}
-#define google_census_Distribution_init_default {false, 0, false, 0, false, google_census_Distribution_Range_init_default, {{NULL}, NULL}}
-#define google_census_Distribution_Range_init_default {false, 0, false, 0}
-#define google_census_IntervalStats_init_default {{{NULL}, NULL}}
-#define google_census_IntervalStats_Window_init_default {false, google_census_Duration_init_default, false, 0, false, 0}
-#define google_census_Tag_init_default {false, "", false, ""}
-#define google_census_View_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_default, {{NULL}, NULL}}
-#define google_census_Aggregation_init_default {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}}
-#define google_census_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_default, false, google_census_Timestamp_init_default}
-#define google_census_Duration_init_zero {false, 0, false, 0}
-#define google_census_Timestamp_init_zero {false, 0, false, 0}
-#define google_census_Resource_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_zero}
-#define google_census_Resource_MeasurementUnit_init_zero {false, 0, {{NULL}, NULL}, {{NULL}, NULL}}
-#define google_census_AggregationDescriptor_init_zero {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_zero}}
-#define google_census_AggregationDescriptor_BucketBoundaries_init_zero {{{NULL}, NULL}}
-#define google_census_AggregationDescriptor_IntervalBoundaries_init_zero {{{NULL}, NULL}}
-#define google_census_Distribution_init_zero {false, 0, false, 0, false, google_census_Distribution_Range_init_zero, {{NULL}, NULL}}
-#define google_census_Distribution_Range_init_zero {false, 0, false, 0}
-#define google_census_IntervalStats_init_zero {{{NULL}, NULL}}
-#define google_census_IntervalStats_Window_init_zero {false, google_census_Duration_init_zero, false, 0, false, 0}
-#define google_census_Tag_init_zero {false, "", false, ""}
-#define google_census_View_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_zero, {{NULL}, NULL}}
-#define google_census_Aggregation_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}}
-#define google_census_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_zero, false, google_census_Timestamp_init_zero}
-
-/* Field tags (for use in manual encoding/decoding) */
-#define google_census_AggregationDescriptor_BucketBoundaries_bounds_tag 1
-#define google_census_AggregationDescriptor_IntervalBoundaries_window_size_tag 1
-#define google_census_IntervalStats_window_tag 1
-#define google_census_AggregationDescriptor_bucket_boundaries_tag 2
-
-#define google_census_AggregationDescriptor_interval_boundaries_tag 3
-#define google_census_AggregationDescriptor_type_tag 1
-#define google_census_Distribution_Range_min_tag 1
-#define google_census_Distribution_Range_max_tag 2
-#define google_census_Duration_seconds_tag 1
-#define google_census_Duration_nanos_tag 2
-#define google_census_Resource_MeasurementUnit_prefix_tag 1
-#define google_census_Resource_MeasurementUnit_numerator_tag 2
-#define google_census_Resource_MeasurementUnit_denominator_tag 3
-#define google_census_Tag_key_tag 1
-#define google_census_Tag_value_tag 2
-#define google_census_Timestamp_seconds_tag 1
-#define google_census_Timestamp_nanos_tag 2
-#define google_census_Distribution_count_tag 1
-#define google_census_Distribution_mean_tag 2
-#define google_census_Distribution_range_tag 3
-#define google_census_Distribution_bucket_count_tag 4
-#define google_census_IntervalStats_Window_window_size_tag 1
-#define google_census_IntervalStats_Window_count_tag 2
-#define google_census_IntervalStats_Window_mean_tag 3
-#define google_census_Metric_view_name_tag 1
-#define google_census_Metric_aggregation_tag 2
-#define google_census_Metric_start_tag 3
-#define google_census_Metric_end_tag 4
-#define google_census_Resource_name_tag 1
-#define google_census_Resource_description_tag 2
-#define google_census_Resource_unit_tag 3
-#define google_census_View_name_tag 1
-#define google_census_View_description_tag 2
-#define google_census_View_resource_name_tag 3
-#define google_census_View_aggregation_tag 4
-#define google_census_View_tag_key_tag 5
-#define google_census_Aggregation_count_tag 3
-
-#define google_census_Aggregation_distribution_tag 4
-
-#define google_census_Aggregation_interval_stats_tag 5
-#define google_census_Aggregation_name_tag 1
-#define google_census_Aggregation_description_tag 2
-#define google_census_Aggregation_tag_tag 6
-
-/* Struct field encoding specification for nanopb */
-extern const pb_field_t google_census_Duration_fields[3];
-extern const pb_field_t google_census_Timestamp_fields[3];
-extern const pb_field_t google_census_Resource_fields[4];
-extern const pb_field_t google_census_Resource_MeasurementUnit_fields[4];
-extern const pb_field_t google_census_AggregationDescriptor_fields[4];
-extern const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2];
-extern const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2];
-extern const pb_field_t google_census_Distribution_fields[5];
-extern const pb_field_t google_census_Distribution_Range_fields[3];
-extern const pb_field_t google_census_IntervalStats_fields[2];
-extern const pb_field_t google_census_IntervalStats_Window_fields[4];
-extern const pb_field_t google_census_Tag_fields[3];
-extern const pb_field_t google_census_View_fields[6];
-extern const pb_field_t google_census_Aggregation_fields[7];
-extern const pb_field_t google_census_Metric_fields[5];
-
-/* Maximum encoded size of messages (where known) */
-#define google_census_Duration_size 22
-#define google_census_Timestamp_size 22
-#define google_census_Distribution_Range_size 18
-#define google_census_IntervalStats_Window_size 44
-#define google_census_Tag_size 516
-
-/* Message IDs (where set with "msgid" option) */
-#ifdef PB_MSGID
-
-#define CENSUS_MESSAGES \
-
-
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H */
diff --git a/src/core/ext/census/gen/trace_context.pb.c b/src/core/ext/census/gen/trace_context.pb.c
deleted file mode 100644
index b5c3d52a71..0000000000
--- a/src/core/ext/census/gen/trace_context.pb.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb constant definitions */
-/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */
-
-#include "src/core/ext/census/gen/trace_context.pb.h"
-
-/* @@protoc_insertion_point(includes) */
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-
-
-const pb_field_t google_trace_TraceContext_fields[5] = {
- PB_FIELD( 1, FIXED64 , OPTIONAL, STATIC , FIRST, google_trace_TraceContext, trace_id_hi, trace_id_hi, 0),
- PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, trace_id_lo, trace_id_hi, 0),
- PB_FIELD( 3, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_id, trace_id_lo, 0),
- PB_FIELD( 4, FIXED32 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_options, span_id, 0),
- PB_LAST_FIELD
-};
-
-
-/* @@protoc_insertion_point(eof) */
diff --git a/src/core/ext/census/gen/trace_context.pb.h b/src/core/ext/census/gen/trace_context.pb.h
deleted file mode 100644
index 181925dc9a..0000000000
--- a/src/core/ext/census/gen/trace_context.pb.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/* Automatically generated nanopb header */
-/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */
-
-#ifndef GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
-#define GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H
-#include "third_party/nanopb/pb.h"
-
-/* @@protoc_insertion_point(includes) */
-#if PB_PROTO_HEADER_VERSION != 30
-#error Regenerate this file with the current version of nanopb generator.
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Struct definitions */
-typedef struct _google_trace_TraceContext {
- bool has_trace_id_hi;
- uint64_t trace_id_hi;
- bool has_trace_id_lo;
- uint64_t trace_id_lo;
- bool has_span_id;
- uint64_t span_id;
- bool has_span_options;
- uint32_t span_options;
-/* @@protoc_insertion_point(struct:google_trace_TraceContext) */
-} google_trace_TraceContext;
-
-/* Default values for struct fields */
-
-/* Initializer values for message structs */
-#define google_trace_TraceContext_init_default {false, 0, false, 0, false, 0, false, 0}
-#define google_trace_TraceContext_init_zero {false, 0, false, 0, false, 0, false, 0}
-
-/* Field tags (for use in manual encoding/decoding) */
-#define google_trace_TraceContext_trace_id_hi_tag 1
-#define google_trace_TraceContext_trace_id_lo_tag 2
-#define google_trace_TraceContext_span_id_tag 3
-#define google_trace_TraceContext_span_options_tag 4
-
-/* Struct field encoding specification for nanopb */
-extern const pb_field_t google_trace_TraceContext_fields[5];
-
-/* Maximum encoded size of messages (where known) */
-#define google_trace_TraceContext_size 32
-
-/* Message IDs (where set with "msgid" option) */
-#ifdef PB_MSGID
-
-#define TRACE_CONTEXT_MESSAGES \
-
-
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-/* @@protoc_insertion_point(eof) */
-
-#endif /* GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H */
diff --git a/src/core/ext/census/grpc_context.c b/src/core/ext/census/grpc_context.cc
index 0bfba63a5e..34eafcab8e 100644
--- a/src/core/ext/census/grpc_context.c
+++ b/src/core/ext/census/grpc_context.cc
@@ -24,9 +24,6 @@
void grpc_census_call_set_context(grpc_call *call, census_context *context) {
GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
(call, context));
- if (census_enabled() == CENSUS_FEATURE_NONE) {
- return;
- }
if (context != NULL) {
grpc_call_context_set(call, GRPC_CONTEXT_TRACING, context, NULL);
}
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
deleted file mode 100644
index 13fe2e6b1c..0000000000
--- a/src/core/ext/census/grpc_filter.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/grpc_filter.h"
-
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/census.h>
-#include <grpc/slice.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-
-#include "src/core/ext/census/census_interface.h"
-#include "src/core/ext/census/census_rpc_stats.h"
-#include "src/core/lib/channel/channel_stack.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/transport/static_metadata.h"
-
-typedef struct call_data {
- census_op_id op_id;
- census_context *ctxt;
- gpr_timespec start_ts;
- int error;
-
- /* recv callback */
- grpc_metadata_batch *recv_initial_metadata;
- grpc_closure *on_done_recv;
- grpc_closure finish_recv;
-} call_data;
-
-typedef struct channel_data { uint8_t unused; } channel_data;
-
-static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
- call_data *calld,
- channel_data *chand) {
- grpc_linked_mdelem *m;
- for (m = md->list.head; m != NULL; m = m->next) {
- if (grpc_slice_eq(GRPC_MDKEY(m->md), GRPC_MDSTR_PATH)) {
- /* Add method tag here */
- }
- }
-}
-
-static void client_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (op->send_initial_metadata) {
- extract_and_annotate_method_tag(
- op->payload->send_initial_metadata.send_initial_metadata, calld, chand);
- }
-}
-
-static void client_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- client_mutate_op(elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
-}
-
-static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr,
- grpc_error *error) {
- GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0);
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (error == GRPC_ERROR_NONE) {
- extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand);
- }
- calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error);
- GPR_TIMER_END("census-server:server_on_done_recv", 0);
-}
-
-static void server_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
- if (op->recv_initial_metadata) {
- /* substitute our callback for the op callback */
- calld->recv_initial_metadata =
- op->payload->recv_initial_metadata.recv_initial_metadata;
- calld->on_done_recv =
- op->payload->recv_initial_metadata.recv_initial_metadata_ready;
- op->payload->recv_initial_metadata.recv_initial_metadata_ready =
- &calld->finish_recv;
- }
-}
-
-static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- /* TODO(ctiller): this code fails. I don't know why. I expect it's
- incomplete, and someone should look at it soon.
-
- call_data *calld = elem->call_data;
- GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
- server_mutate_op(elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
-}
-
-static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *d = elem->call_data;
- GPR_ASSERT(d != NULL);
- memset(d, 0, sizeof(*d));
- d->start_ts = args->start_time;
- return GRPC_ERROR_NONE;
-}
-
-static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *d = elem->call_data;
- GPR_ASSERT(d != NULL);
- /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
-}
-
-static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *d = elem->call_data;
- GPR_ASSERT(d != NULL);
- memset(d, 0, sizeof(*d));
- d->start_ts = args->start_time;
- /* TODO(hongyu): call census_tracing_start_op here. */
- GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem,
- grpc_schedule_on_exec_ctx);
- return GRPC_ERROR_NONE;
-}
-
-static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *d = elem->call_data;
- GPR_ASSERT(d != NULL);
- /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
-}
-
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
- GPR_ASSERT(chand != NULL);
- return GRPC_ERROR_NONE;
-}
-
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
- GPR_ASSERT(chand != NULL);
-}
-
-const grpc_channel_filter grpc_client_census_filter = {
- client_start_transport_op,
- grpc_channel_next_op,
- sizeof(call_data),
- client_init_call_elem,
- grpc_call_stack_ignore_set_pollset_or_pollset_set,
- client_destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- grpc_call_next_get_peer,
- grpc_channel_next_get_info,
- "census-client"};
-
-const grpc_channel_filter grpc_server_census_filter = {
- server_start_transport_op,
- grpc_channel_next_op,
- sizeof(call_data),
- server_init_call_elem,
- grpc_call_stack_ignore_set_pollset_or_pollset_set,
- server_destroy_call_elem,
- sizeof(channel_data),
- init_channel_elem,
- destroy_channel_elem,
- grpc_call_next_get_peer,
- grpc_channel_next_get_info,
- "census-server"};
diff --git a/src/core/ext/census/grpc_plugin.c b/src/core/ext/census/grpc_plugin.c
deleted file mode 100644
index c0efe5afb8..0000000000
--- a/src/core/ext/census/grpc_plugin.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <limits.h>
-#include <string.h>
-
-#include <grpc/census.h>
-
-#include "src/core/ext/census/grpc_filter.h"
-#include "src/core/lib/channel/channel_stack_builder.h"
-#include "src/core/lib/surface/channel_init.h"
-
-static bool is_census_enabled(const grpc_channel_args *a) {
- size_t i;
- if (a == NULL) return 0;
- for (i = 0; i < a->num_args; i++) {
- if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_CENSUS)) {
- return a->args[i].value.integer != 0 && census_enabled();
- }
- }
- return census_enabled() && !grpc_channel_args_want_minimal_stack(a);
-}
-
-static bool maybe_add_census_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
- const grpc_channel_args *args =
- grpc_channel_stack_builder_get_channel_arguments(builder);
- if (is_census_enabled(args)) {
- return grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
- }
- return true;
-}
-
-void census_grpc_plugin_init(void) {
- /* Only initialize census if no one else has and some features are
- * available. */
- if (census_enabled() == CENSUS_FEATURE_NONE &&
- census_supported() != CENSUS_FEATURE_NONE) {
- if (census_initialize(census_supported())) { /* enable all features. */
- gpr_log(GPR_ERROR, "Could not initialize census.");
- }
- }
- grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
- maybe_add_census_filter,
- (void *)&grpc_client_census_filter);
- grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
- maybe_add_census_filter,
- (void *)&grpc_server_census_filter);
-}
-
-void census_grpc_plugin_shutdown(void) { census_shutdown(); }
diff --git a/src/core/ext/census/hash_table.c b/src/core/ext/census/hash_table.c
deleted file mode 100644
index 545b0857c7..0000000000
--- a/src/core/ext/census/hash_table.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/hash_table.h"
-
-#include <stddef.h>
-#include <stdio.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-
-#define CENSUS_HT_NUM_BUCKETS 1999
-
-/* A single hash table data entry */
-typedef struct ht_entry {
- census_ht_key key;
- void *data;
- struct ht_entry *next;
-} ht_entry;
-
-/* hash table bucket */
-typedef struct bucket {
- /* NULL if bucket is empty */
- ht_entry *next;
- /* -1 if all buckets are empty. */
- int32_t prev_non_empty_bucket;
- /* -1 if all buckets are empty. */
- int32_t next_non_empty_bucket;
-} bucket;
-
-struct unresizable_hash_table {
- /* Number of entries in the table */
- size_t size;
- /* Number of buckets */
- uint32_t num_buckets;
- /* Array of buckets initialized at creation time. Memory consumption is
- 16 bytes per bucket on a 64-bit platform. */
- bucket *buckets;
- /* Index of the first non-empty bucket. -1 iff size == 0. */
- int32_t first_non_empty_bucket;
- /* Index of the last non_empty bucket. -1 iff size == 0. */
- int32_t last_non_empty_bucket;
- /* Immutable options of this hash table, initialized at creation time. */
- census_ht_option options;
-};
-
-typedef struct entry_locator {
- int32_t bucket_idx;
- int is_first_in_chain;
- int found;
- ht_entry *prev_entry;
-} entry_locator;
-
-/* Asserts if option is not valid. */
-void check_options(const census_ht_option *option) {
- GPR_ASSERT(option != NULL);
- GPR_ASSERT(option->num_buckets > 0);
- GPR_ASSERT(option->key_type == CENSUS_HT_UINT64 ||
- option->key_type == CENSUS_HT_POINTER);
- if (option->key_type == CENSUS_HT_UINT64) {
- GPR_ASSERT(option->hash == NULL);
- } else if (option->key_type == CENSUS_HT_POINTER) {
- GPR_ASSERT(option->hash != NULL);
- GPR_ASSERT(option->compare_keys != NULL);
- }
-}
-
-#define REMOVE_NEXT(options, ptr) \
- do { \
- ht_entry *tmp = (ptr)->next; \
- (ptr)->next = tmp->next; \
- delete_entry(options, tmp); \
- } while (0)
-
-static void delete_entry(const census_ht_option *opt, ht_entry *p) {
- if (opt->delete_data != NULL) {
- opt->delete_data(p->data);
- }
- if (opt->delete_key != NULL) {
- opt->delete_key(p->key.ptr);
- }
- gpr_free(p);
-}
-
-static uint64_t hash(const census_ht_option *opt, census_ht_key key) {
- return opt->key_type == CENSUS_HT_UINT64 ? key.val : opt->hash(key.ptr);
-}
-
-census_ht *census_ht_create(const census_ht_option *option) {
- int i;
- census_ht *ret = NULL;
- check_options(option);
- ret = (census_ht *)gpr_malloc(sizeof(census_ht));
- ret->size = 0;
- ret->num_buckets = option->num_buckets;
- ret->buckets = (bucket *)gpr_malloc(sizeof(bucket) * ret->num_buckets);
- ret->options = *option;
- /* initialize each bucket */
- for (i = 0; i < ret->options.num_buckets; i++) {
- ret->buckets[i].prev_non_empty_bucket = -1;
- ret->buckets[i].next_non_empty_bucket = -1;
- ret->buckets[i].next = NULL;
- }
- return ret;
-}
-
-static int32_t find_bucket_idx(const census_ht *ht, census_ht_key key) {
- return hash(&ht->options, key) % ht->num_buckets;
-}
-
-static int keys_match(const census_ht_option *opt, const ht_entry *p,
- const census_ht_key key) {
- GPR_ASSERT(opt->key_type == CENSUS_HT_UINT64 ||
- opt->key_type == CENSUS_HT_POINTER);
- if (opt->key_type == CENSUS_HT_UINT64) return p->key.val == key.val;
- return !opt->compare_keys((p->key).ptr, key.ptr);
-}
-
-static entry_locator ht_find(const census_ht *ht, census_ht_key key) {
- entry_locator loc = {0, 0, 0, NULL};
- int32_t idx = 0;
- ht_entry *ptr = NULL;
- GPR_ASSERT(ht != NULL);
- idx = find_bucket_idx(ht, key);
- ptr = ht->buckets[idx].next;
- if (ptr == NULL) {
- /* bucket is empty */
- return loc;
- }
- if (keys_match(&ht->options, ptr, key)) {
- loc.bucket_idx = idx;
- loc.is_first_in_chain = 1;
- loc.found = 1;
- return loc;
- } else {
- for (; ptr->next != NULL; ptr = ptr->next) {
- if (keys_match(&ht->options, ptr->next, key)) {
- loc.bucket_idx = idx;
- loc.is_first_in_chain = 0;
- loc.found = 1;
- loc.prev_entry = ptr;
- return loc;
- }
- }
- }
- /* Could not find the key */
- return loc;
-}
-
-void *census_ht_find(const census_ht *ht, census_ht_key key) {
- entry_locator loc = ht_find(ht, key);
- if (loc.found == 0) {
- return NULL;
- }
- return loc.is_first_in_chain ? ht->buckets[loc.bucket_idx].next->data
- : loc.prev_entry->next->data;
-}
-
-void census_ht_insert(census_ht *ht, census_ht_key key, void *data) {
- int32_t idx = find_bucket_idx(ht, key);
- ht_entry *ptr = NULL;
- entry_locator loc = ht_find(ht, key);
- if (loc.found) {
- /* Replace old value with new value. */
- ptr = loc.is_first_in_chain ? ht->buckets[loc.bucket_idx].next
- : loc.prev_entry->next;
- if (ht->options.delete_data != NULL) {
- ht->options.delete_data(ptr->data);
- }
- ptr->data = data;
- return;
- }
-
- /* first entry in the table. */
- if (ht->size == 0) {
- ht->buckets[idx].next_non_empty_bucket = -1;
- ht->buckets[idx].prev_non_empty_bucket = -1;
- ht->first_non_empty_bucket = idx;
- ht->last_non_empty_bucket = idx;
- } else if (ht->buckets[idx].next == NULL) {
- /* first entry in the bucket. */
- ht->buckets[ht->last_non_empty_bucket].next_non_empty_bucket = idx;
- ht->buckets[idx].prev_non_empty_bucket = ht->last_non_empty_bucket;
- ht->buckets[idx].next_non_empty_bucket = -1;
- ht->last_non_empty_bucket = idx;
- }
- ptr = (ht_entry *)gpr_malloc(sizeof(ht_entry));
- ptr->key = key;
- ptr->data = data;
- ptr->next = ht->buckets[idx].next;
- ht->buckets[idx].next = ptr;
- ht->size++;
-}
-
-void census_ht_erase(census_ht *ht, census_ht_key key) {
- entry_locator loc = ht_find(ht, key);
- if (loc.found == 0) {
- /* noop if not found */
- return;
- }
- ht->size--;
- if (loc.is_first_in_chain) {
- bucket *b = &ht->buckets[loc.bucket_idx];
- GPR_ASSERT(b->next != NULL);
- /* The only entry in the bucket */
- if (b->next->next == NULL) {
- int prev = b->prev_non_empty_bucket;
- int next = b->next_non_empty_bucket;
- if (prev != -1) {
- ht->buckets[prev].next_non_empty_bucket = next;
- } else {
- ht->first_non_empty_bucket = next;
- }
- if (next != -1) {
- ht->buckets[next].prev_non_empty_bucket = prev;
- } else {
- ht->last_non_empty_bucket = prev;
- }
- }
- REMOVE_NEXT(&ht->options, b);
- } else {
- GPR_ASSERT(loc.prev_entry->next != NULL);
- REMOVE_NEXT(&ht->options, loc.prev_entry);
- }
-}
-
-/* Returns NULL if input table is empty. */
-census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num) {
- census_ht_kv *ret = NULL;
- int i = 0;
- int32_t idx = -1;
- GPR_ASSERT(ht != NULL && num != NULL);
- *num = ht->size;
- if (*num == 0) {
- return NULL;
- }
-
- ret = (census_ht_kv *)gpr_malloc(sizeof(census_ht_kv) * ht->size);
- idx = ht->first_non_empty_bucket;
- while (idx >= 0) {
- ht_entry *ptr = ht->buckets[idx].next;
- for (; ptr != NULL; ptr = ptr->next) {
- ret[i].k = ptr->key;
- ret[i].v = ptr->data;
- i++;
- }
- idx = ht->buckets[idx].next_non_empty_bucket;
- }
- return ret;
-}
-
-static void ht_delete_entry_chain(const census_ht_option *options,
- ht_entry *first) {
- if (first == NULL) {
- return;
- }
- if (first->next != NULL) {
- ht_delete_entry_chain(options, first->next);
- }
- delete_entry(options, first);
-}
-
-void census_ht_destroy(census_ht *ht) {
- unsigned i;
- for (i = 0; i < ht->num_buckets; ++i) {
- ht_delete_entry_chain(&ht->options, ht->buckets[i].next);
- }
- gpr_free(ht->buckets);
- gpr_free(ht);
-}
-
-size_t census_ht_get_size(const census_ht *ht) { return ht->size; }
diff --git a/src/core/ext/census/hash_table.h b/src/core/ext/census/hash_table.h
deleted file mode 100644
index 75770641c5..0000000000
--- a/src/core/ext/census/hash_table.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_HASH_TABLE_H
-#define GRPC_CORE_EXT_CENSUS_HASH_TABLE_H
-
-#include <stddef.h>
-
-#include <grpc/support/port_platform.h>
-
-/* A chain based hash table with fixed number of buckets.
- Your probably shouldn't use this code directly. It is implemented for the
- use case in census trace store and stats store, where number of entries in
- the table is in the scale of upto several thousands, entries are added and
- removed from the table very frequently (~100k/s), the frequency of find()
- operations is roughly several times of the frequency of insert() and erase()
- Comparing to find(), the insert(), erase() and get_all_entries() operations
- are much less freqent (<1/s).
-
- Per bucket memory overhead is about (8 + sizeof(intptr_t) bytes.
- Per entry memory overhead is about (8 + 2 * sizeof(intptr_t) bytes.
-
- All functions are not thread-safe. Synchronization will be provided in the
- upper layer (in trace store and stats store).
-*/
-
-/* Opaque hash table struct */
-typedef struct unresizable_hash_table census_ht;
-
-/* Currently, the hash_table can take two types of keys. (uint64 for trace
- store and const char* for stats store). */
-typedef union {
- uint64_t val;
- void *ptr;
-} census_ht_key;
-
-typedef enum census_ht_key_type {
- CENSUS_HT_UINT64 = 0,
- CENSUS_HT_POINTER = 1
-} census_ht_key_type;
-
-typedef struct census_ht_option {
- /* Type of hash key */
- census_ht_key_type key_type;
- /* Desired number of buckets, preferably a prime number */
- int32_t num_buckets;
- /* Fucntion to calculate uint64 hash value of the key. Only takes effect if
- key_type is POINTER. */
- uint64_t (*hash)(const void *);
- /* Function to compare two keys, returns 0 iff equal. Only takes effect if
- key_type is POINTER */
- int (*compare_keys)(const void *k1, const void *k2);
- /* Value deleter. NULL if no specialized delete function is needed. */
- void (*delete_data)(void *);
- /* Key deleter. NULL if table does not own the key. (e.g. key is part of the
- value or key is not owned by the table.) */
- void (*delete_key)(void *);
-} census_ht_option;
-
-/* Creates a hashtable with fixed number of buckets according to the settings
- specified in 'options' arg. Function pointers "hash" and "compare_keys" must
- be provided if key_type is POINTER. Asserts if fail to create. */
-census_ht *census_ht_create(const census_ht_option *options);
-
-/* Deletes hash table instance. Frees all dynamic memory owned by ht.*/
-void census_ht_destroy(census_ht *ht);
-
-/* Inserts the input key-val pair into hash_table. If an entry with the same key
- exists in the table, the corresponding value will be overwritten by the input
- val. */
-void census_ht_insert(census_ht *ht, census_ht_key key, void *val);
-
-/* Returns pointer to data, returns NULL if not found. */
-void *census_ht_find(const census_ht *ht, census_ht_key key);
-
-/* Erase hash table entry with input key. Noop if key is not found. */
-void census_ht_erase(census_ht *ht, census_ht_key key);
-
-typedef struct census_ht_kv {
- census_ht_key k;
- void *v;
-} census_ht_kv;
-
-/* Returns an array of pointers to all values in the hash table. Order of the
- elements can be arbitrary. Sets 'num' to the size of returned array. Caller
- owns returned array. */
-census_ht_kv *census_ht_get_all_elements(const census_ht *ht, size_t *num);
-
-/* Returns number of elements kept. */
-size_t census_ht_get_size(const census_ht *ht);
-
-/* Functor applied on each key-value pair while iterating through entries in the
- table. The functor should not mutate data. */
-typedef void (*census_ht_itr_cb)(census_ht_key key, const void *val_ptr,
- void *state);
-
-/* Iterates through all key-value pairs in the hash_table. The callback function
- should not invalidate data entries. */
-uint64_t census_ht_for_all(const census_ht *ht, census_ht_itr_cb);
-
-#endif /* GRPC_CORE_EXT_CENSUS_HASH_TABLE_H */
diff --git a/src/core/ext/census/initialize.c b/src/core/ext/census/initialize.c
deleted file mode 100644
index 165a1221d4..0000000000
--- a/src/core/ext/census/initialize.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-#include "src/core/ext/census/base_resources.h"
-#include "src/core/ext/census/resource.h"
-
-static int features_enabled = CENSUS_FEATURE_NONE;
-
-int census_initialize(int features) {
- if (features_enabled != CENSUS_FEATURE_NONE) {
- // Must have been a previous call to census_initialize; return error
- return -1;
- }
- features_enabled = features & CENSUS_FEATURE_ALL;
- if (features & CENSUS_FEATURE_STATS) {
- initialize_resources();
- define_base_resources();
- }
-
- return features_enabled;
-}
-
-void census_shutdown(void) {
- if (features_enabled & CENSUS_FEATURE_STATS) {
- shutdown_resources();
- }
- features_enabled = CENSUS_FEATURE_NONE;
-}
-
-int census_supported(void) {
- /* TODO(aveitch): improve this as we implement features... */
- return CENSUS_FEATURE_NONE;
-}
-
-int census_enabled(void) { return features_enabled; }
diff --git a/src/core/ext/census/intrusive_hash_map.c b/src/core/ext/census/intrusive_hash_map.c
deleted file mode 100644
index 7930486963..0000000000
--- a/src/core/ext/census/intrusive_hash_map.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/intrusive_hash_map.h"
-#include <string.h>
-
-extern bool hm_index_compare(const hm_index *A, const hm_index *B);
-
-/* Simple hashing function that takes lower 32 bits. */
-static __inline uint32_t chunked_vector_hasher(uint64_t key) {
- return (uint32_t)key;
-}
-
-/* Vector chunks are 1MiB divided by pointer size. */
-static const size_t VECTOR_CHUNK_SIZE = (1 << 20) / sizeof(void *);
-
-/* Helper functions which return buckets from the chunked vector. */
-static __inline void **get_mutable_bucket(const chunked_vector *buckets,
- uint32_t index) {
- if (index < VECTOR_CHUNK_SIZE) {
- return &buckets->first_[index];
- }
- size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE;
- return &buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE];
-}
-
-static __inline void *get_bucket(const chunked_vector *buckets,
- uint32_t index) {
- if (index < VECTOR_CHUNK_SIZE) {
- return buckets->first_[index];
- }
- size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE;
- return buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE];
-}
-
-/* Helper function. */
-static __inline size_t RestSize(const chunked_vector *vec) {
- return (vec->size_ <= VECTOR_CHUNK_SIZE)
- ? 0
- : (vec->size_ - VECTOR_CHUNK_SIZE - 1) / VECTOR_CHUNK_SIZE + 1;
-}
-
-/* Initialize chunked vector to size of 0. */
-static void chunked_vector_init(chunked_vector *vec) {
- vec->size_ = 0;
- vec->first_ = NULL;
- vec->rest_ = NULL;
-}
-
-/* Clear chunked vector and free all memory that has been allocated then
- initialize chunked vector. */
-static void chunked_vector_clear(chunked_vector *vec) {
- if (vec->first_ != NULL) {
- gpr_free(vec->first_);
- }
- if (vec->rest_ != NULL) {
- size_t rest_size = RestSize(vec);
- for (size_t i = 0; i < rest_size; ++i) {
- if (vec->rest_[i] != NULL) {
- gpr_free(vec->rest_[i]);
- }
- }
- gpr_free(vec->rest_);
- }
- chunked_vector_init(vec);
-}
-
-/* Clear chunked vector and then resize it to n entries. Allow the first 1MB to
- be read w/o an extra cache miss. The rest of the elements are stored in an
- array of arrays to avoid large mallocs. */
-static void chunked_vector_reset(chunked_vector *vec, size_t n) {
- chunked_vector_clear(vec);
- vec->size_ = n;
- if (n <= VECTOR_CHUNK_SIZE) {
- vec->first_ = (void **)gpr_malloc(sizeof(void *) * n);
- memset(vec->first_, 0, sizeof(void *) * n);
- } else {
- vec->first_ = (void **)gpr_malloc(sizeof(void *) * VECTOR_CHUNK_SIZE);
- memset(vec->first_, 0, sizeof(void *) * VECTOR_CHUNK_SIZE);
- size_t rest_size = RestSize(vec);
- vec->rest_ = (void ***)gpr_malloc(sizeof(void **) * rest_size);
- memset(vec->rest_, 0, sizeof(void **) * rest_size);
- int i = 0;
- n -= VECTOR_CHUNK_SIZE;
- while (n > 0) {
- size_t this_size = GPR_MIN(n, VECTOR_CHUNK_SIZE);
- vec->rest_[i] = (void **)gpr_malloc(sizeof(void *) * this_size);
- memset(vec->rest_[i], 0, sizeof(void *) * this_size);
- n -= this_size;
- ++i;
- }
- }
-}
-
-void intrusive_hash_map_init(intrusive_hash_map *hash_map,
- uint32_t initial_log2_table_size) {
- hash_map->log2_num_buckets = initial_log2_table_size;
- hash_map->num_items = 0;
- uint32_t num_buckets = (uint32_t)1 << hash_map->log2_num_buckets;
- hash_map->extend_threshold = num_buckets >> 1;
- chunked_vector_init(&hash_map->buckets);
- chunked_vector_reset(&hash_map->buckets, num_buckets);
- hash_map->hash_mask = num_buckets - 1;
-}
-
-bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map) {
- return hash_map->num_items == 0;
-}
-
-size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map) {
- return hash_map->num_items;
-}
-
-void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx) {
- idx->bucket_index = (uint32_t)hash_map->buckets.size_;
- GPR_ASSERT(idx->bucket_index <= UINT32_MAX);
- idx->item = NULL;
-}
-
-void intrusive_hash_map_next(const intrusive_hash_map *hash_map,
- hm_index *idx) {
- idx->item = idx->item->hash_link;
- while (idx->item == NULL) {
- idx->bucket_index++;
- if (idx->bucket_index >= hash_map->buckets.size_) {
- /* Reached end of table. */
- idx->item = NULL;
- return;
- }
- idx->item = (hm_item *)get_bucket(&hash_map->buckets, idx->bucket_index);
- }
-}
-
-void intrusive_hash_map_begin(const intrusive_hash_map *hash_map,
- hm_index *idx) {
- for (uint32_t i = 0; i < hash_map->buckets.size_; ++i) {
- if (get_bucket(&hash_map->buckets, i) != NULL) {
- idx->bucket_index = i;
- idx->item = (hm_item *)get_bucket(&hash_map->buckets, i);
- return;
- }
- }
- intrusive_hash_map_end(hash_map, idx);
-}
-
-hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map,
- uint64_t key) {
- uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask;
-
- hm_item *p = (hm_item *)get_bucket(&hash_map->buckets, index);
- while (p != NULL) {
- if (key == p->key) {
- return p;
- }
- p = p->hash_link;
- }
- return NULL;
-}
-
-hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key) {
- uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask;
-
- hm_item **slot = (hm_item **)get_mutable_bucket(&hash_map->buckets, index);
- hm_item *p = *slot;
- if (p == NULL) {
- return NULL;
- }
-
- if (key == p->key) {
- *slot = p->hash_link;
- p->hash_link = NULL;
- hash_map->num_items--;
- return p;
- }
-
- hm_item *prev = p;
- p = p->hash_link;
-
- while (p) {
- if (key == p->key) {
- prev->hash_link = p->hash_link;
- p->hash_link = NULL;
- hash_map->num_items--;
- return p;
- }
- prev = p;
- p = p->hash_link;
- }
- return NULL;
-}
-
-/* Insert an hm_item* into the underlying chunked vector. hash_mask is
- * array_size-1. Returns true if it is a new hm_item and false if the hm_item
- * already existed.
- */
-static __inline bool intrusive_hash_map_internal_insert(chunked_vector *buckets,
- uint32_t hash_mask,
- hm_item *item) {
- const uint64_t key = item->key;
- uint32_t index = chunked_vector_hasher(key) & hash_mask;
- hm_item **slot = (hm_item **)get_mutable_bucket(buckets, index);
- hm_item *p = *slot;
- item->hash_link = p;
-
- /* Check to see if key already exists. */
- while (p) {
- if (p->key == key) {
- return false;
- }
- p = p->hash_link;
- }
-
- /* Otherwise add new entry. */
- *slot = item;
- return true;
-}
-
-/* Extend the allocated number of elements in the hash map by a factor of 2. */
-void intrusive_hash_map_extend(intrusive_hash_map *hash_map) {
- uint32_t new_log2_num_buckets = 1 + hash_map->log2_num_buckets;
- uint32_t new_num_buckets = (uint32_t)1 << new_log2_num_buckets;
- GPR_ASSERT(new_num_buckets <= UINT32_MAX && new_num_buckets > 0);
- chunked_vector new_buckets;
- chunked_vector_init(&new_buckets);
- chunked_vector_reset(&new_buckets, new_num_buckets);
- uint32_t new_hash_mask = new_num_buckets - 1;
-
- hm_index cur_idx;
- hm_index end_idx;
- intrusive_hash_map_end(hash_map, &end_idx);
- intrusive_hash_map_begin(hash_map, &cur_idx);
- while (!hm_index_compare(&cur_idx, &end_idx)) {
- hm_item *new_item = cur_idx.item;
- intrusive_hash_map_next(hash_map, &cur_idx);
- intrusive_hash_map_internal_insert(&new_buckets, new_hash_mask, new_item);
- }
-
- /* Set values for new chunked_vector. extend_threshold is set to half of
- * new_num_buckets. */
- hash_map->log2_num_buckets = new_log2_num_buckets;
- chunked_vector_clear(&hash_map->buckets);
- hash_map->buckets = new_buckets;
- hash_map->hash_mask = new_hash_mask;
- hash_map->extend_threshold = new_num_buckets >> 1;
-}
-
-/* Insert a hm_item. The hm_item must remain live until it is removed from the
- table. This object does not take the ownership of hm_item. The caller must
- remove this hm_item from the table and delete it before this table is
- deleted. If hm_item exists already num_items is not changed. */
-bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item) {
- if (hash_map->num_items >= hash_map->extend_threshold) {
- intrusive_hash_map_extend(hash_map);
- }
- if (intrusive_hash_map_internal_insert(&hash_map->buckets,
- hash_map->hash_mask, item)) {
- hash_map->num_items++;
- return true;
- }
- return false;
-}
-
-void intrusive_hash_map_clear(intrusive_hash_map *hash_map,
- void (*free_object)(void *)) {
- hm_index cur;
- hm_index end;
- intrusive_hash_map_end(hash_map, &end);
- intrusive_hash_map_begin(hash_map, &cur);
-
- while (!hm_index_compare(&cur, &end)) {
- hm_index next = cur;
- intrusive_hash_map_next(hash_map, &next);
- if (cur.item != NULL) {
- hm_item *item = intrusive_hash_map_erase(hash_map, cur.item->key);
- (*free_object)((void *)item);
- gpr_free(item);
- }
- cur = next;
- }
-}
-
-void intrusive_hash_map_free(intrusive_hash_map *hash_map,
- void (*free_object)(void *)) {
- intrusive_hash_map_clear(hash_map, (*free_object));
- hash_map->num_items = 0;
- hash_map->extend_threshold = 0;
- hash_map->log2_num_buckets = 0;
- hash_map->hash_mask = 0;
- chunked_vector_clear(&hash_map->buckets);
-}
diff --git a/src/core/ext/census/intrusive_hash_map.h b/src/core/ext/census/intrusive_hash_map.h
deleted file mode 100644
index f50de4fab4..0000000000
--- a/src/core/ext/census/intrusive_hash_map.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H
-#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H
-
-#include "src/core/ext/census/intrusive_hash_map_internal.h"
-
-/* intrusive_hash_map is a fast chained hash table. This hash map is faster than
- * a dense hash map when the application calls insert and erase more often than
- * find. When the workload is dominated by find() a dense hash map may be
- * faster.
- *
- * intrusive_hash_map uses an intrusive header placed within a user defined
- * struct. The header field IHM_key MUST be set to a valid value before
- * insertion into the hash map or undefined behavior may occur. The header field
- * IHM_hash_link MUST to be set to NULL initially.
- *
- * EXAMPLE USAGE:
- *
- * typedef struct string_item {
- * INTRUSIVE_HASH_MAP_HEADER;
- * // User data.
- * char *str_buf;
- * uint16_t len;
- * } string_item;
- *
- * static string_item *make_string_item(uint64_t key, const char *buf,
- * uint16_t len) {
- * string_item *item = (string_item *)gpr_malloc(sizeof(string_item));
- * item->IHM_key = key;
- * item->IHM_hash_link = NULL;
- * item->len = len;
- * item->str_buf = (char *)malloc(len);
- * memcpy(item->str_buf, buf, len);
- * return item;
- * }
- *
- * intrusive_hash_map hash_map;
- * intrusive_hash_map_init(&hash_map, 4);
- * string_item *new_item1 = make_string_item(10, "test1", 5);
- * bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item1);
- *
- * string_item *item1 =
- * (string_item *)intrusive_hash_map_find(&hash_map, 10);
- */
-
-/* Hash map item. Stores key and a pointer to the actual object. A user defined
- * version of this can be passed in provided the first 2 entries (key and
- * hash_link) are the same. These entries must be first in the user defined
- * struct. Pointer to struct will need to be cast as (hm_item *) when passed to
- * hash map. This allows it to be intrusive. */
-typedef struct hm_item {
- uint64_t key;
- struct hm_item *hash_link;
- /* Optional user defined data after this. */
-} hm_item;
-
-/* Macro provided for ease of use. This must be first in the user defined
- * struct (i.e. uint64_t key and hm_item * must be the first two elements in
- * that order). */
-#define INTRUSIVE_HASH_MAP_HEADER \
- uint64_t IHM_key; \
- struct hm_item *IHM_hash_link
-
-/* Index struct which acts as a pseudo-iterator within the hash map. */
-typedef struct hm_index {
- uint32_t bucket_index; // hash map bucket index.
- hm_item *item; // Pointer to hm_item within the hash map.
-} hm_index;
-
-/* Returns true if two hm_indices point to the same object within the hash map
- * and false otherwise. */
-__inline bool hm_index_compare(const hm_index *A, const hm_index *B) {
- return (A->item == B->item && A->bucket_index == B->bucket_index);
-}
-
-/*
- * Helper functions for iterating over the hash map.
- */
-
-/* On return idx will contain an invalid index which is always equal to
- * hash_map->buckets.size_ */
-void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx);
-
-/* Iterates index to the next valid entry in the hash map and stores the
- * index within idx. If end of table is reached, idx will contain the same
- * values as if intrusive_hash_map_end() was called. */
-void intrusive_hash_map_next(const intrusive_hash_map *hash_map, hm_index *idx);
-
-/* On return, idx will contain the index of the first non-null entry in the hash
- * map. If the hash map is empty, idx will contain the same values as if
- * intrusive_hash_map_end() was called. */
-void intrusive_hash_map_begin(const intrusive_hash_map *hash_map,
- hm_index *idx);
-
-/* Initialize intrusive hash map data structure. This must be called before
- * the hash map can be used. The initial size of an intrusive hash map will be
- * 2^initial_log2_map_size (valid range is [0, 31]). */
-void intrusive_hash_map_init(intrusive_hash_map *hash_map,
- uint32_t initial_log2_map_size);
-
-/* Returns true if the hash map is empty and false otherwise. */
-bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map);
-
-/* Returns the number of elements currently in the hash map. */
-size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map);
-
-/* Find a hm_item within the hash map by key. Returns NULL if item was not
- * found. */
-hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map,
- uint64_t key);
-
-/* Erase the hm_item that corresponds with key. If the hm_item is found, return
- * the pointer to the hm_item. Else returns NULL. */
-hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key);
-
-/* Attempts to insert a new hm_item into the hash map. If an element with the
- * same key already exists, it will not insert the new item and return false.
- * Otherwise, it will insert the new item and return true. */
-bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item);
-
-/* Clears entire contents of the hash map, but leaves internal data structure
- * untouched. Second argument takes a function pointer to a function that will
- * free the object designated by the user and pointed to by hash_map->value. */
-void intrusive_hash_map_clear(intrusive_hash_map *hash_map,
- void (*free_object)(void *));
-
-/* Erase all contents of hash map and free the memory. Hash map is invalid
- * after calling this function and cannot be used until it has been
- * reinitialized (intrusive_hash_map_init()). This function takes a function
- * pointer to a function that will free the object designated by the user and
- * pointed to by hash_map->value. */
-void intrusive_hash_map_free(intrusive_hash_map *hash_map,
- void (*free_object)(void *));
-
-#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H */
diff --git a/src/core/ext/census/intrusive_hash_map_internal.h b/src/core/ext/census/intrusive_hash_map_internal.h
deleted file mode 100644
index e9c81fc85c..0000000000
--- a/src/core/ext/census/intrusive_hash_map_internal.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H
-#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-#include <stdbool.h>
-
-/* The chunked vector is a data structure that allocates buckets for use in the
- * hash map. ChunkedVector is logically equivalent to T*[N] (cast void* as
- * T*). It's internally implemented as an array of 1MB arrays to avoid
- * allocating large consecutive memory chunks. This is an internal data
- * structure that should never be accessed directly. */
-typedef struct chunked_vector {
- size_t size_;
- void **first_;
- void ***rest_;
-} chunked_vector;
-
-/* Core intrusive hash map data structure. All internal elements are managed by
- * functions and should not be altered manually. */
-typedef struct intrusive_hash_map {
- uint32_t num_items;
- uint32_t extend_threshold;
- uint32_t log2_num_buckets;
- uint32_t hash_mask;
- chunked_vector buckets;
-} intrusive_hash_map;
-
-#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H */
diff --git a/src/core/ext/census/mlog.c b/src/core/ext/census/mlog.c
deleted file mode 100644
index 937ceb101b..0000000000
--- a/src/core/ext/census/mlog.c
+++ /dev/null
@@ -1,585 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Implements an efficient in-memory log, optimized for multiple writers and
-// a single reader. Available log space is divided up in blocks of
-// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following
-// three data structures:
-// - Free blocks (free_block_list)
-// - Blocks with unread data (dirty_block_list)
-// - Blocks currently attached to cores (core_local_blocks[])
-//
-// census_log_start_write() moves a block from core_local_blocks[] to the end of
-// dirty_block_list when block:
-// - is out-of-space OR
-// - has an incomplete record (an incomplete record occurs when a thread calls
-// census_log_start_write() and is context-switched before calling
-// census_log_end_write()
-// So, blocks in dirty_block_list are ordered, from oldest to newest, by the
-// time when block is detached from the core.
-//
-// census_log_read_next() first iterates over dirty_block_list and then
-// core_local_blocks[]. It moves completely read blocks from dirty_block_list
-// to free_block_list. Blocks in core_local_blocks[] are not freed, even when
-// completely read.
-//
-// If the log is configured to discard old records and free_block_list is empty,
-// census_log_start_write() iterates over dirty_block_list to allocate a
-// new block. It moves the oldest available block (no pending read/write) to
-// core_local_blocks[].
-//
-// core_local_block_struct is used to implement a map from core id to the block
-// associated with that core. This mapping is advisory. It is possible that the
-// block returned by this mapping is no longer associated with that core. This
-// mapping is updated, lazily, by census_log_start_write().
-//
-// Locking in block struct:
-//
-// Exclusive g_log.lock must be held before calling any functions operating on
-// block structs except census_log_start_write() and census_log_end_write().
-//
-// Writes to a block are serialized via writer_lock. census_log_start_write()
-// acquires this lock and census_log_end_write() releases it. On failure to
-// acquire the lock, writer allocates a new block for the current core and
-// updates core_local_block accordingly.
-//
-// Simultaneous read and write access is allowed. Readers can safely read up to
-// committed bytes (bytes_committed).
-//
-// reader_lock protects the block, currently being read, from getting recycled.
-// start_read() acquires reader_lock and end_read() releases the lock.
-//
-// Read/write access to a block is disabled via try_disable_access(). It returns
-// with both writer_lock and reader_lock held. These locks are subsequently
-// released by enable_access() to enable access to the block.
-//
-// A note on naming: Most function/struct names are prepended by cl_
-// (shorthand for census_log). Further, functions that manipulate structures
-// include the name of the structure, which will be passed as the first
-// argument. E.g. cl_block_initialize() will initialize a cl_block.
-
-#include "src/core/ext/census/mlog.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/useful.h>
-#include <stdbool.h>
-#include <string.h>
-
-// End of platform specific code
-
-typedef struct census_log_block_list_struct {
- struct census_log_block_list_struct* next;
- struct census_log_block_list_struct* prev;
- struct census_log_block* block;
-} cl_block_list_struct;
-
-typedef struct census_log_block {
- // Pointer to underlying buffer.
- char* buffer;
- gpr_atm writer_lock;
- gpr_atm reader_lock;
- // Keeps completely written bytes. Declared atomic because accessed
- // simultaneously by reader and writer.
- gpr_atm bytes_committed;
- // Bytes already read.
- size_t bytes_read;
- // Links for list.
- cl_block_list_struct link;
-// We want this structure to be cacheline aligned. We assume the following
-// sizes for the various parts on 32/64bit systems:
-// type 32b size 64b size
-// char* 4 8
-// 3x gpr_atm 12 24
-// size_t 4 8
-// cl_block_list_struct 12 24
-// TOTAL 32 64
-//
-// Depending on the size of our cacheline and the architecture, we
-// selectively add char buffering to this structure. The size is checked
-// via assert in census_log_initialize().
-#if defined(GPR_ARCH_64)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_BLOCK_PAD_SIZE > 0
- char padding[CL_BLOCK_PAD_SIZE];
-#endif
-} cl_block;
-
-// A list of cl_blocks, doubly-linked through cl_block::link.
-typedef struct census_log_block_list {
- int32_t count; // Number of items in list.
- cl_block_list_struct ht; // head/tail of linked list.
-} cl_block_list;
-
-// Cacheline aligned block pointers to avoid false sharing. Block pointer must
-// be initialized via set_block(), before calling other functions
-typedef struct census_log_core_local_block {
- gpr_atm block;
-// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8
-#if defined(GPR_ARCH_64)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
-#else
-#if defined(GPR_ARCH_32)
-#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
-#else
-#error "Unknown architecture"
-#endif
-#endif
-#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
- char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
-#endif
-} cl_core_local_block;
-
-struct census_log {
- int discard_old_records;
- // Number of cores (aka hardware-contexts)
- unsigned num_cores;
- // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log
- uint32_t num_blocks;
- cl_block* blocks; // Block metadata.
- cl_core_local_block* core_local_blocks; // Keeps core to block mappings.
- gpr_mu lock;
- int initialized; // has log been initialized?
- // Keeps the state of the reader iterator. A value of 0 indicates that
- // iterator has reached the end. census_log_init_reader() resets the value
- // to num_core to restart iteration.
- uint32_t read_iterator_state;
- // Points to the block being read. If non-NULL, the block is locked for
- // reading(block_being_read_->reader_lock is held).
- cl_block* block_being_read;
- char* buffer;
- cl_block_list free_block_list;
- cl_block_list dirty_block_list;
- gpr_atm out_of_space_count;
-};
-
-// Single internal log.
-static struct census_log g_log;
-
-// Functions that operate on an atomic memory location used as a lock.
-
-// Returns non-zero if lock is acquired.
-static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
-
-static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
-
-// Functions that operate on cl_core_local_block's.
-
-static void cl_core_local_block_set_block(cl_core_local_block* clb,
- cl_block* block) {
- gpr_atm_rel_store(&clb->block, (gpr_atm)block);
-}
-
-static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
- return (cl_block*)gpr_atm_acq_load(&clb->block);
-}
-
-// Functions that operate on cl_block_list_struct's.
-
-static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
- cl_block* block) {
- bls->next = bls->prev = bls;
- bls->block = block;
-}
-
-// Functions that operate on cl_block_list's.
-
-static void cl_block_list_initialize(cl_block_list* list) {
- list->count = 0;
- cl_block_list_struct_initialize(&list->ht, NULL);
-}
-
-// Returns head of *this, or NULL if empty.
-static cl_block* cl_block_list_head(cl_block_list* list) {
- return list->ht.next->block;
-}
-
-// Insert element *e after *pos.
-static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
- cl_block_list_struct* e) {
- list->count++;
- e->next = pos->next;
- e->prev = pos;
- e->next->prev = e;
- e->prev->next = e;
-}
-
-// Insert block at the head of the list
-static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
- cl_block_list_insert(list, &list->ht, &block->link);
-}
-
-// Insert block at the tail of the list.
-static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
- cl_block_list_insert(list, list->ht.prev, &block->link);
-}
-
-// Removes block *b. Requires *b be in the list.
-static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
- list->count--;
- b->link.next->prev = b->link.prev;
- b->link.prev->next = b->link.next;
-}
-
-// Functions that operate on cl_block's
-
-static void cl_block_initialize(cl_block* block, char* buffer) {
- block->buffer = buffer;
- gpr_atm_rel_store(&block->writer_lock, 0);
- gpr_atm_rel_store(&block->reader_lock, 0);
- gpr_atm_rel_store(&block->bytes_committed, 0);
- block->bytes_read = 0;
- cl_block_list_struct_initialize(&block->link, block);
-}
-
-// Guards against exposing partially written buffer to the reader.
-static void cl_block_set_bytes_committed(cl_block* block,
- size_t bytes_committed) {
- gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed);
-}
-
-static size_t cl_block_get_bytes_committed(cl_block* block) {
- return (size_t)gpr_atm_acq_load(&block->bytes_committed);
-}
-
-// Tries to disable future read/write access to this block. Succeeds if:
-// - no in-progress write AND
-// - no in-progress read AND
-// - 'discard_data' set to true OR no unread data
-// On success, clears the block state and returns with writer_lock_ and
-// reader_lock_ held. These locks are released by a subsequent
-// cl_block_access_enable() call.
-static bool cl_block_try_disable_access(cl_block* block, int discard_data) {
- if (!cl_try_lock(&block->writer_lock)) {
- return false;
- }
- if (!cl_try_lock(&block->reader_lock)) {
- cl_unlock(&block->writer_lock);
- return false;
- }
- if (!discard_data &&
- (block->bytes_read != cl_block_get_bytes_committed(block))) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
- return false;
- }
- cl_block_set_bytes_committed(block, 0);
- block->bytes_read = 0;
- return true;
-}
-
-static void cl_block_enable_access(cl_block* block) {
- cl_unlock(&block->reader_lock);
- cl_unlock(&block->writer_lock);
-}
-
-// Returns with writer_lock held.
-static void* cl_block_start_write(cl_block* block, size_t size) {
- if (!cl_try_lock(&block->writer_lock)) {
- return NULL;
- }
- size_t bytes_committed = cl_block_get_bytes_committed(block);
- if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
- cl_unlock(&block->writer_lock);
- return NULL;
- }
- return block->buffer + bytes_committed;
-}
-
-// Releases writer_lock and increments committed bytes by 'bytes_written'.
-// 'bytes_written' must be <= 'size' specified in the corresponding
-// StartWrite() call. This function is thread-safe.
-static void cl_block_end_write(cl_block* block, size_t bytes_written) {
- cl_block_set_bytes_committed(
- block, cl_block_get_bytes_committed(block) + bytes_written);
- cl_unlock(&block->writer_lock);
-}
-
-// Returns a pointer to the first unread byte in buffer. The number of bytes
-// available are returned in 'bytes_available'. Acquires reader lock that is
-// released by a subsequent cl_block_end_read() call. Returns NULL if:
-// - read in progress
-// - no data available
-static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
- if (!cl_try_lock(&block->reader_lock)) {
- return NULL;
- }
- // bytes_committed may change from under us. Use bytes_available to update
- // bytes_read below.
- size_t bytes_committed = cl_block_get_bytes_committed(block);
- GPR_ASSERT(bytes_committed >= block->bytes_read);
- *bytes_available = bytes_committed - block->bytes_read;
- if (*bytes_available == 0) {
- cl_unlock(&block->reader_lock);
- return NULL;
- }
- void* record = block->buffer + block->bytes_read;
- block->bytes_read += *bytes_available;
- return record;
-}
-
-static void cl_block_end_read(cl_block* block) {
- cl_unlock(&block->reader_lock);
-}
-
-// Internal functions operating on g_log
-
-// Allocates a new free block (or recycles an available dirty block if log is
-// configured to discard old records). Returns NULL if out-of-space.
-static cl_block* cl_allocate_block(void) {
- cl_block* block = cl_block_list_head(&g_log.free_block_list);
- if (block != NULL) {
- cl_block_list_remove(&g_log.free_block_list, block);
- return block;
- }
- if (!g_log.discard_old_records) {
- // No free block and log is configured to keep old records.
- return NULL;
- }
- // Recycle dirty block. Start from the oldest.
- for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
- block = block->link.next->block) {
- if (cl_block_try_disable_access(block, 1 /* discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, block);
- return block;
- }
- }
- return NULL;
-}
-
-// Allocates a new block and updates core id => block mapping. 'old_block'
-// points to the block that the caller thinks is attached to
-// 'core_id'. 'old_block' may be NULL. Returns true if:
-// - allocated a new block OR
-// - 'core_id' => 'old_block' mapping changed (another thread allocated a
-// block before lock was acquired).
-static bool cl_allocate_core_local_block(uint32_t core_id,
- cl_block* old_block) {
- // Now that we have the lock, check if core-local mapping has changed.
- cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
- cl_block* block = cl_core_local_block_get_block(core_local_block);
- if ((block != NULL) && (block != old_block)) {
- return true;
- }
- if (block != NULL) {
- cl_core_local_block_set_block(core_local_block, NULL);
- cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
- }
- block = cl_allocate_block();
- if (block == NULL) {
- return false;
- }
- cl_core_local_block_set_block(core_local_block, block);
- cl_block_enable_access(block);
- return true;
-}
-
-static cl_block* cl_get_block(void* record) {
- uintptr_t p = (uintptr_t)((char*)record - g_log.buffer);
- uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
- return &g_log.blocks[index];
-}
-
-// Gets the next block to read and tries to free 'prev' block (if not NULL).
-// Returns NULL if reached the end.
-static cl_block* cl_next_block_to_read(cl_block* prev) {
- cl_block* block = NULL;
- if (g_log.read_iterator_state == g_log.num_cores) {
- // We are traversing dirty list; find the next dirty block.
- if (prev != NULL) {
- // Try to free the previous block if there is no unread data. This
- // block
- // may have unread data if previously incomplete record completed
- // between
- // read_next() calls.
- block = prev->link.next->block;
- if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
- cl_block_list_remove(&g_log.dirty_block_list, prev);
- cl_block_list_insert_at_head(&g_log.free_block_list, prev);
- }
- } else {
- block = cl_block_list_head(&g_log.dirty_block_list);
- }
- if (block != NULL) {
- return block;
- }
- // We are done with the dirty list; moving on to core-local blocks.
- }
- while (g_log.read_iterator_state > 0) {
- g_log.read_iterator_state--;
- block = cl_core_local_block_get_block(
- &g_log.core_local_blocks[g_log.read_iterator_state]);
- if (block != NULL) {
- return block;
- }
- }
- return NULL;
-}
-
-#define CL_LOG_2_MB 20 // 2^20 = 1MB
-
-// External functions: primary stats_log interface
-void census_log_initialize(size_t size_in_mb, int discard_old_records) {
- // Check cacheline alignment.
- GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
- GPR_ASSERT(!g_log.initialized);
- g_log.discard_old_records = discard_old_records;
- g_log.num_cores = gpr_cpu_num_cores();
- // Ensure that we will not get any overflow in calaculating num_blocks
- GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
- GPR_ASSERT(size_in_mb < 1000);
- // Ensure at least 2x as many blocks as there are cores.
- g_log.num_blocks =
- (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
- CENSUS_LOG_2_MAX_RECORD_SIZE);
- gpr_mu_init(&g_log.lock);
- g_log.read_iterator_state = 0;
- g_log.block_being_read = NULL;
- g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
- g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.core_local_blocks, 0,
- g_log.num_cores * sizeof(cl_core_local_block));
- g_log.blocks = (cl_block*)gpr_malloc_aligned(
- g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
- memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
- g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
- cl_block_list_initialize(&g_log.free_block_list);
- cl_block_list_initialize(&g_log.dirty_block_list);
- for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
- cl_block* block = g_log.blocks + i;
- cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
- cl_block_try_disable_access(block, 1 /* discard data */);
- cl_block_list_insert_at_tail(&g_log.free_block_list, block);
- }
- gpr_atm_rel_store(&g_log.out_of_space_count, 0);
- g_log.initialized = 1;
-}
-
-void census_log_shutdown(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_destroy(&g_log.lock);
- gpr_free_aligned(g_log.core_local_blocks);
- g_log.core_local_blocks = NULL;
- gpr_free_aligned(g_log.blocks);
- g_log.blocks = NULL;
- gpr_free(g_log.buffer);
- g_log.buffer = NULL;
- g_log.initialized = 0;
-}
-
-void* census_log_start_write(size_t size) {
- // Used to bound number of times block allocation is attempted.
- GPR_ASSERT(size > 0);
- GPR_ASSERT(g_log.initialized);
- if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
- return NULL;
- }
- uint32_t attempts_remaining = g_log.num_blocks;
- uint32_t core_id = gpr_cpu_current_cpu();
- do {
- void* record = NULL;
- cl_block* block =
- cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
- if (block && (record = cl_block_start_write(block, size))) {
- return record;
- }
- // Need to allocate a new block. We are here if:
- // - No block associated with the core OR
- // - Write in-progress on the block OR
- // - block is out of space
- gpr_mu_lock(&g_log.lock);
- bool allocated = cl_allocate_core_local_block(core_id, block);
- gpr_mu_unlock(&g_log.lock);
- if (!allocated) {
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
- }
- } while (attempts_remaining--);
- // Give up.
- gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
- return NULL;
-}
-
-void census_log_end_write(void* record, size_t bytes_written) {
- GPR_ASSERT(g_log.initialized);
- cl_block_end_write(cl_get_block(record), bytes_written);
-}
-
-void census_log_init_reader(void) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- // If a block is locked for reading unlock it.
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- g_log.block_being_read = NULL;
- }
- g_log.read_iterator_state = g_log.num_cores;
- gpr_mu_unlock(&g_log.lock);
-}
-
-const void* census_log_read_next(size_t* bytes_available) {
- GPR_ASSERT(g_log.initialized);
- gpr_mu_lock(&g_log.lock);
- if (g_log.block_being_read != NULL) {
- cl_block_end_read(g_log.block_being_read);
- }
- do {
- g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
- if (g_log.block_being_read != NULL) {
- void* record =
- cl_block_start_read(g_log.block_being_read, bytes_available);
- if (record != NULL) {
- gpr_mu_unlock(&g_log.lock);
- return record;
- }
- }
- } while (g_log.block_being_read != NULL);
- gpr_mu_unlock(&g_log.lock);
- return NULL;
-}
-
-size_t census_log_remaining_space(void) {
- GPR_ASSERT(g_log.initialized);
- size_t space = 0;
- gpr_mu_lock(&g_log.lock);
- if (g_log.discard_old_records) {
- // Remaining space is not meaningful; just return the entire log space.
- space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
- } else {
- GPR_ASSERT(g_log.free_block_list.count >= 0);
- space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
- }
- gpr_mu_unlock(&g_log.lock);
- return space;
-}
-
-int64_t census_log_out_of_space_count(void) {
- GPR_ASSERT(g_log.initialized);
- return gpr_atm_acq_load(&g_log.out_of_space_count);
-}
diff --git a/src/core/ext/census/mlog.h b/src/core/ext/census/mlog.h
deleted file mode 100644
index 6f3125944f..0000000000
--- a/src/core/ext/census/mlog.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* A very fast in-memory log, optimized for multiple writers. */
-
-#ifndef GRPC_CORE_EXT_CENSUS_MLOG_H
-#define GRPC_CORE_EXT_CENSUS_MLOG_H
-
-#include <grpc/support/port_platform.h>
-#include <stddef.h>
-
-/* Maximum record size, in bytes. */
-#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */
-#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE)
-
-/* Initialize the statistics logging subsystem with the given log size. A log
- size of 0 will result in the smallest possible log for the platform
- (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If
- discard_old_records is non-zero, then new records will displace older ones
- when the log is full. This function must be called before any other
- census_log functions.
-*/
-void census_log_initialize(size_t size_in_mb, int discard_old_records);
-
-/* Shutdown the logging subsystem. Caller must ensure that:
- - no in progress or future call to any census_log functions
- - no incomplete records
-*/
-void census_log_shutdown(void);
-
-/* Allocates and returns a 'size' bytes record and marks it in use. A
- subsequent census_log_end_write() marks the record complete. The
- 'bytes_written' census_log_end_write() argument must be <=
- 'size'. Returns NULL if out-of-space AND:
- - log is configured to keep old records OR
- - all blocks are pinned by incomplete records.
-*/
-void* census_log_start_write(size_t size);
-
-void census_log_end_write(void* record, size_t bytes_written);
-
-void census_log_init_reader(void);
-
-/* census_log_read_next() iterates over blocks with data and for each block
- returns a pointer to the first unread byte. The number of bytes that can be
- read are returned in 'bytes_available'. Reader is expected to read all
- available data. Reading the data consumes it i.e. it cannot be read again.
- census_log_read_next() returns NULL if the end is reached i.e last block
- is read. census_log_init_reader() starts the iteration or aborts the
- current iteration.
-*/
-const void* census_log_read_next(size_t* bytes_available);
-
-/* Returns estimated remaining space across all blocks, in bytes. If log is
- configured to discard old records, returns total log space. Otherwise,
- returns space available in empty blocks (partially filled blocks are
- treated as full).
-*/
-size_t census_log_remaining_space(void);
-
-/* Returns the number of times grpc_stats_log_start_write() failed due to
- out-of-space. */
-int64_t census_log_out_of_space_count(void);
-
-#endif /* GRPC_CORE_EXT_CENSUS_MLOG_H */
diff --git a/src/core/ext/census/operation.c b/src/core/ext/census/operation.c
deleted file mode 100644
index be88ac74e6..0000000000
--- a/src/core/ext/census/operation.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-
-/* TODO(aveitch): These are all placeholder implementations. */
-
-census_timestamp census_start_rpc_op_timestamp(void) {
- census_timestamp ct;
- /* TODO(aveitch): assumes gpr_timespec implementation of census_timestamp. */
- ct.ts = gpr_now(GPR_CLOCK_MONOTONIC);
- return ct;
-}
-
-census_context *census_start_client_rpc_op(
- const census_context *context, int64_t rpc_name_id,
- const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
- const census_timestamp *start_time) {
- return NULL;
-}
-
-census_context *census_start_server_rpc_op(
- const char *buffer, int64_t rpc_name_id,
- const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask,
- census_timestamp *start_time) {
- return NULL;
-}
-
-census_context *census_start_op(census_context *context, const char *family,
- const char *name, int trace_mask) {
- return NULL;
-}
-
-void census_end_op(census_context *context, int status) {}
diff --git a/src/core/ext/census/placeholders.c b/src/core/ext/census/placeholders.c
deleted file mode 100644
index bed9837ee3..0000000000
--- a/src/core/ext/census/placeholders.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/census.h>
-
-#include <grpc/support/log.h>
-
-/* Placeholders for the pending APIs */
-
-int census_get_trace_record(census_trace_record *trace_record) {
- (void)trace_record;
- abort();
-}
-
-void census_record_values(census_context *context, census_value *values,
- size_t nvalues) {
- (void)context;
- (void)values;
- (void)nvalues;
- abort();
-}
-
-void census_set_rpc_client_peer(census_context *context, const char *peer) {
- (void)context;
- (void)peer;
- abort();
-}
-
-void census_trace_scan_end() { abort(); }
-
-int census_trace_scan_start(int consume) {
- (void)consume;
- abort();
-}
diff --git a/src/core/ext/census/resource.c b/src/core/ext/census/resource.c
deleted file mode 100644
index 1a676f0e1e..0000000000
--- a/src/core/ext/census/resource.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/resource.h"
-#include "third_party/nanopb/pb_decode.h"
-
-#include <grpc/census.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-
-#include <stdbool.h>
-#include <string.h>
-
-// Protect local resource data structures.
-static gpr_mu resource_lock;
-
-// Deleteing and creating resources are relatively rare events, and should not
-// be done in the critical path of performance sensitive code. We record
-// current resource id's used in a simple array, and just search it each time
-// we need to assign a new id, or look up a resource.
-static resource **resources = NULL;
-
-// Number of entries in *resources
-static size_t n_resources = 0;
-
-// Number of defined resources
-static size_t n_defined_resources = 0;
-
-void initialize_resources(void) {
- gpr_mu_init(&resource_lock);
- gpr_mu_lock(&resource_lock);
- GPR_ASSERT(resources == NULL && n_resources == 0 && n_defined_resources == 0);
- gpr_mu_unlock(&resource_lock);
-}
-
-// Delete a resource given it's ID. The ID must be a valid resource ID. Must be
-// called with resource_lock held.
-static void delete_resource_locked(size_t rid) {
- GPR_ASSERT(resources[rid] != NULL);
- gpr_free(resources[rid]->name);
- gpr_free(resources[rid]->description);
- gpr_free(resources[rid]->numerators);
- gpr_free(resources[rid]->denominators);
- gpr_free(resources[rid]);
- resources[rid] = NULL;
- n_defined_resources--;
-}
-
-void shutdown_resources(void) {
- gpr_mu_lock(&resource_lock);
- for (size_t i = 0; i < n_resources; i++) {
- if (resources[i] != NULL) {
- delete_resource_locked(i);
- }
- }
- GPR_ASSERT(n_defined_resources == 0);
- gpr_free(resources);
- resources = NULL;
- n_resources = 0;
- gpr_mu_unlock(&resource_lock);
-}
-
-// Check the contents of string fields in a resource proto.
-static bool validate_string(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- resource *vresource = (resource *)*arg;
- switch (field->tag) {
- case google_census_Resource_name_tag:
- // Name must have at least one character
- if (stream->bytes_left == 0) {
- gpr_log(GPR_INFO, "Zero-length Resource name.");
- return false;
- }
- vresource->name = gpr_malloc(stream->bytes_left + 1);
- vresource->name[stream->bytes_left] = '\0';
- if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) {
- return false;
- }
- // Can't have same name as an existing resource.
- for (size_t i = 0; i < n_resources; i++) {
- resource *compare = resources[i];
- if (compare == vresource || compare == NULL) continue;
- if (strcmp(compare->name, vresource->name) == 0) {
- gpr_log(GPR_INFO, "Duplicate Resource name %s.", vresource->name);
- return false;
- }
- }
- break;
- case google_census_Resource_description_tag:
- if (stream->bytes_left == 0) {
- return true;
- }
- vresource->description = gpr_malloc(stream->bytes_left + 1);
- vresource->description[stream->bytes_left] = '\0';
- if (!pb_read(stream, (uint8_t *)vresource->description,
- stream->bytes_left)) {
- return false;
- }
- break;
- default:
- // No other string fields in Resource. Print warning and skip.
- gpr_log(GPR_INFO, "Unknown string field type in Resource protobuf.");
- if (!pb_read(stream, NULL, stream->bytes_left)) {
- return false;
- }
- break;
- }
- return true;
-}
-
-// Decode numerators/denominators in a stream. The `count` and `bup`
-// (BasicUnit pointer) are pointers to the approriate fields in a resource
-// struct.
-static bool validate_units_helper(pb_istream_t *stream, int *count,
- google_census_Resource_BasicUnit **bup) {
- while (stream->bytes_left) {
- (*count)++;
- // Have to allocate a new array of values. Normal case is 0 or 1, so
- // this should normally not be an issue.
- google_census_Resource_BasicUnit *new_bup =
- gpr_malloc((size_t)*count * sizeof(google_census_Resource_BasicUnit));
- if (*count != 1) {
- memcpy(new_bup, *bup,
- (size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit));
- gpr_free(*bup);
- }
- *bup = new_bup;
- uint64_t value;
- if (!pb_decode_varint(stream, &value)) {
- return false;
- }
- *(*bup + *count - 1) = (google_census_Resource_BasicUnit)value;
- }
- return true;
-}
-
-// Validate units field of a Resource proto.
-static bool validate_units(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- resource *vresource = (resource *)(*arg);
- switch (field->tag) {
- case google_census_Resource_MeasurementUnit_numerator_tag:
- return validate_units_helper(stream, &vresource->n_numerators,
- &vresource->numerators);
- break;
- case google_census_Resource_MeasurementUnit_denominator_tag:
- return validate_units_helper(stream, &vresource->n_denominators,
- &vresource->denominators);
- break;
- default:
- gpr_log(GPR_ERROR, "Unknown field type.");
- return false;
- break;
- }
- return true;
-}
-
-// Validate the contents of a Resource proto. `id` is the intended resource id.
-static bool validate_resource_pb(const uint8_t *resource_pb,
- size_t resource_pb_size, size_t id) {
- GPR_ASSERT(id < n_resources);
- if (resource_pb == NULL) {
- return false;
- }
- google_census_Resource vresource;
- vresource.name.funcs.decode = &validate_string;
- vresource.name.arg = resources[id];
- vresource.description.funcs.decode = &validate_string;
- vresource.description.arg = resources[id];
- vresource.unit.numerator.funcs.decode = &validate_units;
- vresource.unit.numerator.arg = resources[id];
- vresource.unit.denominator.funcs.decode = &validate_units;
- vresource.unit.denominator.arg = resources[id];
-
- pb_istream_t stream =
- pb_istream_from_buffer((uint8_t *)resource_pb, resource_pb_size);
- if (!pb_decode(&stream, google_census_Resource_fields, &vresource)) {
- return false;
- }
- // A Resource must have a name, a unit, with at least one numerator.
- return (resources[id]->name != NULL && vresource.has_unit &&
- resources[id]->n_numerators > 0);
-}
-
-// Allocate a blank resource, and return associated ID. Must be called with
-// resource_lock held.
-size_t allocate_resource(void) {
- // use next_id to optimize expected placement of next new resource.
- static size_t next_id = 0;
- size_t id = n_resources; // resource ID - initialize to invalid value.
- // Expand resources if needed.
- if (n_resources == n_defined_resources) {
- size_t new_n_resources = n_resources ? n_resources * 2 : 2;
- resource **new_resources = gpr_malloc(new_n_resources * sizeof(resource *));
- if (n_resources != 0) {
- memcpy(new_resources, resources, n_resources * sizeof(resource *));
- }
- memset(new_resources + n_resources, 0,
- (new_n_resources - n_resources) * sizeof(resource *));
- gpr_free(resources);
- resources = new_resources;
- n_resources = new_n_resources;
- id = n_defined_resources;
- } else {
- GPR_ASSERT(n_defined_resources < n_resources);
- // Find a free id.
- for (size_t base = 0; base < n_resources; base++) {
- id = (next_id + base) % n_resources;
- if (resources[id] == NULL) break;
- }
- }
- GPR_ASSERT(id < n_resources && resources[id] == NULL);
- resources[id] = gpr_malloc(sizeof(resource));
- memset(resources[id], 0, sizeof(resource));
- n_defined_resources++;
- next_id = (id + 1) % n_resources;
- return id;
-}
-
-int32_t census_define_resource(const uint8_t *resource_pb,
- size_t resource_pb_size) {
- if (resource_pb == NULL) {
- return -1;
- }
- gpr_mu_lock(&resource_lock);
- size_t id = allocate_resource();
- // Validate pb, extract name.
- if (!validate_resource_pb(resource_pb, resource_pb_size, id)) {
- delete_resource_locked(id);
- gpr_mu_unlock(&resource_lock);
- return -1;
- }
- gpr_mu_unlock(&resource_lock);
- return (int32_t)id;
-}
-
-void census_delete_resource(int32_t rid) {
- gpr_mu_lock(&resource_lock);
- if (rid >= 0 && (size_t)rid < n_resources && resources[rid] != NULL) {
- delete_resource_locked((size_t)rid);
- }
- gpr_mu_unlock(&resource_lock);
-}
-
-int32_t census_resource_id(const char *name) {
- gpr_mu_lock(&resource_lock);
- for (int32_t id = 0; (size_t)id < n_resources; id++) {
- if (resources[id] != NULL && strcmp(resources[id]->name, name) == 0) {
- gpr_mu_unlock(&resource_lock);
- return id;
- }
- }
- gpr_mu_unlock(&resource_lock);
- return -1;
-}
-
-int32_t define_resource(const resource *base) {
- GPR_ASSERT(base != NULL && base->name != NULL && base->n_numerators > 0 &&
- base->numerators != NULL);
- gpr_mu_lock(&resource_lock);
- size_t id = allocate_resource();
- size_t len = strlen(base->name) + 1;
- resources[id]->name = gpr_malloc(len);
- memcpy(resources[id]->name, base->name, len);
- if (base->description) {
- len = strlen(base->description) + 1;
- resources[id]->description = gpr_malloc(len);
- memcpy(resources[id]->description, base->description, len);
- }
- resources[id]->prefix = base->prefix;
- resources[id]->n_numerators = base->n_numerators;
- len = (size_t)base->n_numerators * sizeof(*base->numerators);
- resources[id]->numerators = gpr_malloc(len);
- memcpy(resources[id]->numerators, base->numerators, len);
- resources[id]->n_denominators = base->n_denominators;
- if (base->n_denominators != 0) {
- len = (size_t)base->n_denominators * sizeof(*base->denominators);
- resources[id]->denominators = gpr_malloc(len);
- memcpy(resources[id]->denominators, base->denominators, len);
- }
- gpr_mu_unlock(&resource_lock);
- return (int32_t)id;
-}
diff --git a/src/core/ext/census/resource.h b/src/core/ext/census/resource.h
deleted file mode 100644
index b8bda2c72e..0000000000
--- a/src/core/ext/census/resource.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* Census-internal resource definition and manipluation functions. */
-#ifndef GRPC_CORE_EXT_CENSUS_RESOURCE_H
-#define GRPC_CORE_EXT_CENSUS_RESOURCE_H
-
-#include <grpc/grpc.h>
-#include "src/core/ext/census/gen/census.pb.h"
-
-/* Internal representation of a resource. */
-typedef struct {
- char *name;
- char *description;
- int32_t prefix;
- int n_numerators;
- google_census_Resource_BasicUnit *numerators;
- int n_denominators;
- google_census_Resource_BasicUnit *denominators;
-} resource;
-
-/* Initialize and shutdown the resources subsystem. */
-void initialize_resources(void);
-void shutdown_resources(void);
-
-/* Add a new resource, given a proposed resource structure. Returns the
- resource ID, or -ve on failure.
- TODO(aveitch): this function exists to support addition of the base
- resources. It should be removed when we have the ability to add resources
- from configuration files. */
-int32_t define_resource(const resource *base);
-
-#endif /* GRPC_CORE_EXT_CENSUS_RESOURCE_H */
diff --git a/src/core/ext/census/rpc_metric_id.h b/src/core/ext/census/rpc_metric_id.h
deleted file mode 100644
index ea493d7288..0000000000
--- a/src/core/ext/census/rpc_metric_id.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H
-#define GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H
-
-/* Metric ID's used for RPC measurements. */
-/* Count of client requests sent. */
-#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((uint32_t)0)
-/* Count of server requests sent. */
-#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((uint32_t)1)
-/* Client error counts. */
-#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((uint32_t)2)
-/* Server error counts. */
-#define CENSUS_METRIC_RPC_SERVER_ERRORS ((uint32_t)3)
-/* Client side request latency. */
-#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((uint32_t)4)
-/* Server side request latency. */
-#define CENSUS_METRIC_RPC_SERVER_LATENCY ((uint32_t)5)
-
-#endif /* GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H */
diff --git a/src/core/ext/census/trace_context.c b/src/core/ext/census/trace_context.c
deleted file mode 100644
index af92ae6d9e..0000000000
--- a/src/core/ext/census/trace_context.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/trace_context.h"
-
-#include <grpc/census.h>
-#include <grpc/support/log.h>
-#include <stdbool.h>
-
-#include "third_party/nanopb/pb_decode.h"
-#include "third_party/nanopb/pb_encode.h"
-
-// This function assumes the TraceContext is valid.
-size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t buf_size) {
- // Create a stream that will write to our buffer.
- pb_ostream_t stream = pb_ostream_from_buffer(buffer, buf_size);
-
- // encode message
- bool status = pb_encode(&stream, google_trace_TraceContext_fields, ctxt);
-
- if (!status) {
- gpr_log(GPR_DEBUG, "TraceContext encoding failed: %s",
- PB_GET_ERROR(&stream));
- return 0;
- }
-
- return stream.bytes_written;
-}
-
-bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t nbytes) {
- // Create a stream that reads nbytes from the buffer.
- pb_istream_t stream = pb_istream_from_buffer(buffer, nbytes);
-
- // decode message
- bool status = pb_decode(&stream, google_trace_TraceContext_fields, ctxt);
-
- if (!status) {
- gpr_log(GPR_DEBUG, "TraceContext decoding failed: %s",
- PB_GET_ERROR(&stream));
- return false;
- }
-
- // check fields
- if (!ctxt->has_trace_id_hi || !ctxt->has_trace_id_lo) {
- gpr_log(GPR_DEBUG, "Invalid TraceContext: missing trace_id");
- return false;
- }
- if (!ctxt->has_span_id) {
- gpr_log(GPR_DEBUG, "Invalid TraceContext: missing span_id");
- return false;
- }
-
- return true;
-}
diff --git a/src/core/ext/census/trace_context.h b/src/core/ext/census/trace_context.h
deleted file mode 100644
index a7233e6a2b..0000000000
--- a/src/core/ext/census/trace_context.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* Functions for manipulating trace contexts as defined in
- src/proto/census/trace.proto */
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H
-
-#include "src/core/ext/census/gen/trace_context.pb.h"
-
-/* Span option flags. */
-#define SPAN_OPTIONS_IS_SAMPLED 0x01
-
-/* Maximum number of bytes required to encode a TraceContext (31)
-1 byte for trace_id field
-1 byte for trace_id length
-1 byte for trace_id.hi field
-8 bytes for trace_id.hi (uint64_t)
-1 byte for trace_id.lo field
-8 bytes for trace_id.lo (uint64_t)
-1 byte for span_id field
-8 bytes for span_id (uint64_t)
-1 byte for is_sampled field
-1 byte for is_sampled (bool) */
-#define TRACE_MAX_CONTEXT_SIZE 31
-
-/* Encode a trace context (ctxt) into proto format to the buffer provided. The
-size of buffer must be at least TRACE_MAX_CONTEXT_SIZE. On success, returns the
-number of bytes successfully encoded into buffer. On failure, returns 0. */
-size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t buf_size);
-
-/* Decode a proto-encoded TraceContext from the provided buffer into the
-TraceContext structure (ctxt). The function expects to be supplied the number
-of bytes to be read from buffer (nbytes). This function will also validate that
-the TraceContext has a span_id and a trace_id, and will return false if either
-of these do not exist. On success, returns true and false otherwise. */
-bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer,
- const size_t nbytes);
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */
diff --git a/src/core/ext/census/trace_label.h b/src/core/ext/census/trace_label.h
deleted file mode 100644
index 97ce399eb5..0000000000
--- a/src/core/ext/census/trace_label.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H
-
-#include "src/core/ext/census/trace_string.h"
-
-/* Trace label (key/value pair) stores a label name and the label value. The
- value can be one of trace_string/int64_t/bool. */
-typedef struct trace_label {
- trace_string key;
- enum label_type {
- /* Unknown value for debugging/error purposes */
- LABEL_UNKNOWN = 0,
- /* A string value */
- LABEL_STRING = 1,
- /* An integer value. */
- LABEL_INT = 2,
- /* A boolean value. */
- LABEL_BOOL = 3,
- } value_type;
-
- union value {
- trace_string label_str;
- int64_t label_int;
- bool label_bool;
- } value;
-} trace_label;
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H */
diff --git a/src/core/ext/census/trace_propagation.h b/src/core/ext/census/trace_propagation.h
deleted file mode 100644
index eecfcb7d01..0000000000
--- a/src/core/ext/census/trace_propagation.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H
-
-#include "src/core/ext/census/tracing.h"
-
-/* Encoding and decoding functions for receiving and sending trace contexts
- over the wire. Only RPC libraries should be calling these
- functions. These functions return the number of bytes encoded/decoded
- (0 if a failure has occurred). buf_size indicates the size of the
- input/output buffer. trace_span_context is a struct that includes the
- trace ID, span ID, and a set of option flags (is_sampled, etc.). */
-
-/* Converts a span context to a binary byte buffer. */
-size_t trace_span_context_to_binary(const trace_span_context *ctxt,
- uint8_t *buf, size_t buf_size);
-
-/* Reads a binary byte buffer and populates a span context structure. */
-size_t binary_to_trace_span_context(const uint8_t *buf, size_t buf_size,
- trace_span_context *ctxt);
-
-/* Converts a span context to an http metadata compatible string. */
-size_t trace_span_context_to_http_format(const trace_span_context *ctxt,
- char *buf, size_t buf_size);
-
-/* Reads an http metadata compatible string and populates a span context
- structure. */
-size_t http_format_to_trace_span_context(const char *buf, size_t buf_size,
- trace_span_context *ctxt);
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H */
diff --git a/src/core/ext/census/trace_string.h b/src/core/ext/census/trace_string.h
deleted file mode 100644
index e4da3f590d..0000000000
--- a/src/core/ext/census/trace_string.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STRING_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_STRING_H
-
-#include <grpc/slice.h>
-
-/* String struct for tracing messages. Since this is a C API, we do not have
- access to a string class. This is intended for use by higher level
- languages which wrap around the C API, as most of them have a string class.
- This will also be more efficient when copying, as we have an explicitly
- specified length. Also, grpc_slice has reference counting which allows for
- interning. */
-typedef struct trace_string {
- char *string;
- size_t length;
-} trace_string;
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STRING_H */
diff --git a/src/core/ext/census/tracing.c b/src/core/ext/census/tracing.c
deleted file mode 100644
index 823c681abf..0000000000
--- a/src/core/ext/census/tracing.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/tracing.h"
-
-#include <grpc/census.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include "src/core/ext/census/mlog.h"
-
-void trace_start_span(const trace_span_context *span_ctxt,
- const trace_string name, const start_span_options *opts,
- trace_span_context *new_span_ctxt,
- bool has_remote_parent) {
- // Noop implementation.
-}
-
-void trace_add_span_annotation(const trace_string description,
- const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt) {
- // Noop implementation.
-}
-
-void trace_add_span_network_event_annotation(const trace_string description,
- const trace_label *labels,
- const size_t n_labels,
- const gpr_timespec timestamp,
- bool sent, uint64_t id,
- trace_span_context *span_ctxt) {
- // Noop implementation.
-}
-
-void trace_add_span_labels(const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt) {
- // Noop implementation.
-}
-
-void trace_end_span(const trace_status *status, trace_span_context *span_ctxt) {
- // Noop implementation.
-}
diff --git a/src/core/ext/census/tracing.h b/src/core/ext/census/tracing.h
deleted file mode 100644
index 038c9e2790..0000000000
--- a/src/core/ext/census/tracing.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_TRACING_H
-#define GRPC_CORE_EXT_CENSUS_TRACING_H
-
-#include <grpc/support/time.h>
-#include <stdbool.h>
-#include "src/core/ext/census/trace_context.h"
-#include "src/core/ext/census/trace_label.h"
-#include "src/core/ext/census/trace_status.h"
-
-/* This is the low level tracing API that other languages will interface with.
- This is not intended to be accessed by the end-user, therefore it has been
- designed with performance in mind rather than ease of use. */
-
-/* The tracing level. */
-enum TraceLevel {
- /* Annotations on this context will be silently discarded. */
- NO_TRACING = 0,
- /* Annotations will not be saved to a persistent store. They will be
- available via local APIs only. This setting is not propagated to child
- spans. */
- TRANSIENT_TRACING = 1,
- /* Annotations are recorded for the entire distributed trace and they are
- saved to a persistent store. This setting is propagated to child spans. */
- PERSISTENT_TRACING = 2,
-};
-
-typedef struct trace_span_context {
- /* Trace span context stores Span ID, Trace ID, and option flags. */
- /* Trace ID is 128 bits split into 2 64-bit chunks (hi and lo). */
- uint64_t trace_id_hi;
- uint64_t trace_id_lo;
- /* Span ID is 64 bits. */
- uint64_t span_id;
- /* Span-options is 32-bit value which contains flag options. */
- uint32_t span_options;
-} trace_span_context;
-
-typedef struct start_span_options {
- /* If set, this will override the Span.local_start_time for the Span. */
- gpr_timespec local_start_timestamp;
-
- /* Linked spans can be used to identify spans that are linked to this span in
- a different trace. This can be used (for example) in batching operations,
- where a single batch handler processes multiple requests from different
- traces. If set, points to a list of Spans are linked to the created Span.*/
- trace_span_context *linked_spans;
- /* The number of linked spans. */
- size_t n_linked_spans;
-} start_span_options;
-
-/* Create a new child Span (or root if parent is NULL), with parent being the
- designated Span. The child span will have the provided name and starting
- span options (optional). The bool has_remote_parent marks whether the
- context refers to a remote parent span or not. */
-void trace_start_span(const trace_span_context *span_ctxt,
- const trace_string name, const start_span_options *opts,
- trace_span_context *new_span_ctxt,
- bool has_remote_parent);
-
-/* Add a new Annotation to the Span. Annotations consist of a description
- (trace_string) and a set of n labels (trace_label). This can be populated
- with arbitrary user data. */
-void trace_add_span_annotation(const trace_string description,
- const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt);
-
-/* Add a new NetworkEvent annotation to a Span. This function is only intended
- to be used by RPC systems (either client or server), not by higher level
- applications. The timestamp type will be system-defined, the sent argument
- designates whether this is a network send event (client request, server
- reply)or receive (server request, client reply). The id argument corresponds
- to Span.Annotation.NetworkEvent.id from the data model, and serves to uniquely
- identify each network message. */
-void trace_add_span_network_event(const trace_string description,
- const trace_label *labels,
- const size_t n_labels,
- const gpr_timespec timestamp, bool sent,
- uint64_t id, trace_span_context *span_ctxt);
-
-/* Add a set of labels to the Span. These will correspond to the field
-Span.labels in the data model. */
-void trace_add_span_labels(const trace_label *labels, const size_t n_labels,
- trace_span_context *span_ctxt);
-
-/* Mark the end of Span Execution with the given status. Only the timing of the
-first EndSpan call for a given Span will be recorded, and implementations are
-free to ignore all further calls using the Span. EndSpanOptions can
-optionally be NULL. */
-void trace_end_span(const trace_status *status, trace_span_context *span_ctxt);
-
-#endif /* GRPC_CORE_EXT_CENSUS_TRACING_H */
diff --git a/src/core/ext/census/window_stats.c b/src/core/ext/census/window_stats.c
deleted file mode 100644
index 0058e4bf9c..0000000000
--- a/src/core/ext/census/window_stats.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/census/window_stats.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpc/support/useful.h>
-#include <math.h>
-#include <stddef.h>
-#include <string.h>
-
-/* typedefs make typing long names easier. Use cws (for census_window_stats) */
-typedef census_window_stats_stat_info cws_stat_info;
-typedef struct census_window_stats_sum cws_sum;
-
-/* Each interval is composed of a number of buckets, which hold a count of
- entries and a single statistic */
-typedef struct census_window_stats_bucket {
- int64_t count;
- void *statistic;
-} cws_bucket;
-
-/* Each interval has a set of buckets, and the variables needed to keep
- track of their current state */
-typedef struct census_window_stats_interval_stats {
- /* The buckets. There will be 'granularity' + 1 of these. */
- cws_bucket *buckets;
- /* Index of the bucket containing the smallest time interval. */
- int bottom_bucket;
- /* The smallest time storable in the current window. */
- int64_t bottom;
- /* The largest time storable in the current window + 1ns */
- int64_t top;
- /* The width of each bucket in ns. */
- int64_t width;
-} cws_interval_stats;
-
-typedef struct census_window_stats {
- /* Number of intervals. */
- int nintervals;
- /* Number of buckets in each interval. 'granularity' + 1. */
- int nbuckets;
- /* Record of stat_info. */
- cws_stat_info stat_info;
- /* Stats for each interval. */
- cws_interval_stats *interval_stats;
- /* The time the newset stat was recorded. */
- int64_t newest_time;
-} window_stats;
-
-/* Calculate an actual bucket index from a logical index 'IDX'. Other
- parameters supply information on the interval struct and overall stats. */
-#define BUCKET_IDX(IS, IDX, WSTATS) \
- ((IS->bottom_bucket + (IDX)) % WSTATS->nbuckets)
-
-/* The maximum seconds value we can have in a valid timespec. More than this
- will result in overflow in timespec_to_ns(). This works out to ~292 years.
- TODO: consider using doubles instead of int64. */
-static int64_t max_seconds = (GPR_INT64_MAX - GPR_NS_PER_SEC) / GPR_NS_PER_SEC;
-
-static int64_t timespec_to_ns(const gpr_timespec ts) {
- if (ts.tv_sec > max_seconds) {
- return GPR_INT64_MAX - 1;
- }
- return ts.tv_sec * GPR_NS_PER_SEC + ts.tv_nsec;
-}
-
-static void cws_initialize_statistic(void *statistic,
- const cws_stat_info *stat_info) {
- if (stat_info->stat_initialize == NULL) {
- memset(statistic, 0, stat_info->stat_size);
- } else {
- stat_info->stat_initialize(statistic);
- }
-}
-
-/* Create and initialize a statistic */
-static void *cws_create_statistic(const cws_stat_info *stat_info) {
- void *stat = gpr_malloc(stat_info->stat_size);
- cws_initialize_statistic(stat, stat_info);
- return stat;
-}
-
-window_stats *census_window_stats_create(int nintervals,
- const gpr_timespec intervals[],
- int granularity,
- const cws_stat_info *stat_info) {
- window_stats *ret;
- int i;
- /* validate inputs */
- GPR_ASSERT(nintervals > 0 && granularity > 2 && intervals != NULL &&
- stat_info != NULL);
- for (i = 0; i < nintervals; i++) {
- int64_t ns = timespec_to_ns(intervals[i]);
- GPR_ASSERT(intervals[i].tv_sec >= 0 && intervals[i].tv_nsec >= 0 &&
- intervals[i].tv_nsec < GPR_NS_PER_SEC && ns >= 100 &&
- granularity * 10 <= ns);
- }
- /* Allocate and initialize relevant data structures */
- ret = (window_stats *)gpr_malloc(sizeof(window_stats));
- ret->nintervals = nintervals;
- ret->nbuckets = granularity + 1;
- ret->stat_info = *stat_info;
- ret->interval_stats =
- (cws_interval_stats *)gpr_malloc(nintervals * sizeof(cws_interval_stats));
- for (i = 0; i < nintervals; i++) {
- int64_t size_ns = timespec_to_ns(intervals[i]);
- cws_interval_stats *is = ret->interval_stats + i;
- cws_bucket *buckets = is->buckets =
- (cws_bucket *)gpr_malloc(ret->nbuckets * sizeof(cws_bucket));
- int b;
- for (b = 0; b < ret->nbuckets; b++) {
- buckets[b].statistic = cws_create_statistic(stat_info);
- buckets[b].count = 0;
- }
- is->bottom_bucket = 0;
- is->bottom = 0;
- is->width = size_ns / granularity;
- /* Check for possible overflow issues, and maximize interval size if the
- user requested something large enough. */
- if ((GPR_INT64_MAX - is->width) > size_ns) {
- is->top = size_ns + is->width;
- } else {
- is->top = GPR_INT64_MAX;
- is->width = GPR_INT64_MAX / (granularity + 1);
- }
- /* If size doesn't divide evenly, we can have a width slightly too small;
- better to have it slightly large. */
- if ((size_ns - (granularity + 1) * is->width) > 0) {
- is->width += 1;
- }
- }
- ret->newest_time = 0;
- return ret;
-}
-
-/* When we try adding a measurement above the current interval range, we
- need to "shift" the buckets sufficiently to cover the new range. */
-static void cws_shift_buckets(const window_stats *wstats,
- cws_interval_stats *is, int64_t when_ns) {
- int i;
- /* number of bucket time widths to "shift" */
- int shift;
- /* number of buckets to clear */
- int nclear;
- GPR_ASSERT(when_ns >= is->top);
- /* number of bucket time widths to "shift" */
- shift = ((when_ns - is->top) / is->width) + 1;
- /* number of buckets to clear - limited by actual number of buckets */
- nclear = GPR_MIN(shift, wstats->nbuckets);
- for (i = 0; i < nclear; i++) {
- int b = BUCKET_IDX(is, i, wstats);
- is->buckets[b].count = 0;
- cws_initialize_statistic(is->buckets[b].statistic, &wstats->stat_info);
- }
- /* adjust top/bottom times and current bottom bucket */
- is->bottom_bucket = BUCKET_IDX(is, shift, wstats);
- is->top += shift * is->width;
- is->bottom += shift * is->width;
-}
-
-void census_window_stats_add(window_stats *wstats, const gpr_timespec when,
- const void *stat_value) {
- int i;
- int64_t when_ns = timespec_to_ns(when);
- GPR_ASSERT(wstats->interval_stats != NULL);
- for (i = 0; i < wstats->nintervals; i++) {
- cws_interval_stats *is = wstats->interval_stats + i;
- cws_bucket *bucket;
- if (when_ns < is->bottom) { /* Below smallest time in interval: drop */
- continue;
- }
- if (when_ns >= is->top) { /* above limit: shift buckets */
- cws_shift_buckets(wstats, is, when_ns);
- }
- /* Add the stat. */
- GPR_ASSERT(is->bottom <= when_ns && when_ns < is->top);
- bucket = is->buckets +
- BUCKET_IDX(is, (when_ns - is->bottom) / is->width, wstats);
- bucket->count++;
- wstats->stat_info.stat_add(bucket->statistic, stat_value);
- }
- if (when_ns > wstats->newest_time) {
- wstats->newest_time = when_ns;
- }
-}
-
-/* Add a specific bucket contents to an accumulating total. */
-static void cws_add_bucket_to_sum(cws_sum *sum, const cws_bucket *bucket,
- const cws_stat_info *stat_info) {
- sum->count += bucket->count;
- stat_info->stat_add(sum->statistic, bucket->statistic);
-}
-
-/* Add a proportion to an accumulating sum. */
-static void cws_add_proportion_to_sum(double p, cws_sum *sum,
- const cws_bucket *bucket,
- const cws_stat_info *stat_info) {
- sum->count += p * bucket->count;
- stat_info->stat_add_proportion(p, sum->statistic, bucket->statistic);
-}
-
-void census_window_stats_get_sums(const window_stats *wstats,
- const gpr_timespec when, cws_sum sums[]) {
- int i;
- int64_t when_ns = timespec_to_ns(when);
- GPR_ASSERT(wstats->interval_stats != NULL);
- for (i = 0; i < wstats->nintervals; i++) {
- int when_bucket;
- int new_bucket;
- double last_proportion = 1.0;
- double bottom_proportion;
- cws_interval_stats *is = wstats->interval_stats + i;
- cws_sum *sum = sums + i;
- sum->count = 0;
- cws_initialize_statistic(sum->statistic, &wstats->stat_info);
- if (when_ns < is->bottom) {
- continue;
- }
- if (when_ns >= is->top) {
- cws_shift_buckets(wstats, is, when_ns);
- }
- /* Calculating the appropriate amount of which buckets to use can get
- complicated. Essentially there are two cases:
- 1) if the "top" bucket (new_bucket, where the newest additions to the
- stats recorded are entered) corresponds to 'when', then we need
- to take a proportion of it - (if when < newest_time) or the full
- thing. We also (possibly) need to take a corresponding
- proportion of the bottom bucket.
- 2) Other cases, we just take a straight proportion.
- */
- when_bucket = (when_ns - is->bottom) / is->width;
- new_bucket = (wstats->newest_time - is->bottom) / is->width;
- if (new_bucket == when_bucket) {
- int64_t bottom_bucket_time = is->bottom + when_bucket * is->width;
- if (when_ns < wstats->newest_time) {
- last_proportion = (double)(when_ns - bottom_bucket_time) /
- (double)(wstats->newest_time - bottom_bucket_time);
- bottom_proportion =
- (double)(is->width - (when_ns - bottom_bucket_time)) / is->width;
- } else {
- bottom_proportion =
- (double)(is->width - (wstats->newest_time - bottom_bucket_time)) /
- is->width;
- }
- } else {
- last_proportion =
- (double)(when_ns + 1 - is->bottom - when_bucket * is->width) /
- is->width;
- bottom_proportion = 1.0 - last_proportion;
- }
- cws_add_proportion_to_sum(last_proportion, sum,
- is->buckets + BUCKET_IDX(is, when_bucket, wstats),
- &wstats->stat_info);
- if (when_bucket != 0) { /* last bucket isn't also bottom bucket */
- int b;
- /* Add all of "bottom" bucket if we are looking at a subset of the
- full interval, or a proportion if we are adding full interval. */
- cws_add_proportion_to_sum(
- (when_bucket == wstats->nbuckets - 1 ? bottom_proportion : 1.0), sum,
- is->buckets + is->bottom_bucket, &wstats->stat_info);
- /* Add all the remaining buckets (everything but top and bottom). */
- for (b = 1; b < when_bucket; b++) {
- cws_add_bucket_to_sum(sum, is->buckets + BUCKET_IDX(is, b, wstats),
- &wstats->stat_info);
- }
- }
- }
-}
-
-void census_window_stats_destroy(window_stats *wstats) {
- int i;
- GPR_ASSERT(wstats->interval_stats != NULL);
- for (i = 0; i < wstats->nintervals; i++) {
- int b;
- for (b = 0; b < wstats->nbuckets; b++) {
- gpr_free(wstats->interval_stats[i].buckets[b].statistic);
- }
- gpr_free(wstats->interval_stats[i].buckets);
- }
- gpr_free(wstats->interval_stats);
- /* Ensure any use-after free triggers assert. */
- wstats->interval_stats = NULL;
- gpr_free(wstats);
-}
diff --git a/src/core/ext/census/window_stats.h b/src/core/ext/census/window_stats.h
deleted file mode 100644
index ebe3732008..0000000000
--- a/src/core/ext/census/window_stats.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H
-#define GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H
-
-#include <grpc/support/time.h>
-
-/* Keep rolling sums of a user-defined statistic (containing a number of
- measurements) over a a number of time intervals ("windows"). For example,
- you can use a window_stats object to answer questions such as
- "Approximately how many RPCs/s did I receive over the past minute, and
- approximately how many bytes did I send out over that period?".
-
- The type of data to record, and the time intervals to keep are specified
- when creating the object via a call to census_window_stats_create().
-
- A window's interval is divided into one or more "buckets"; the interval
- must be divisible by the number of buckets. Internally, these buckets
- control the granularity of window_stats' measurements. Increasing the
- number of buckets lets the object respond more quickly to changes in the
- overall rate of data added into the object, at the cost of additional
- memory usage.
-
- Here's some code which keeps one minute/hour measurements for two values
- (latency in seconds and bytes transferred), with each interval divided into
- 4 buckets.
-
- typedef struct my_stat {
- double latency;
- int bytes;
- } my_stat;
-
- void add_my_stat(void* base, const void* addme) {
- my_stat* b = (my_stat*)base;
- const my_stat* a = (const my_stat*)addme;
- b->latency += a->latency;
- b->bytes += a->bytes;
- }
-
- void add_proportion_my_stat(double p, void* base, const void* addme) {
- (my_stat*)result->latency += p * (const my_stat*)base->latency;
- (my_stat*)result->bytes += p * (const my_stat*)base->bytes;
- }
-
- #define kNumIntervals 2
- #define kMinInterval 0
- #define kHourInterval 1
- #define kNumBuckets 4
-
- const struct census_window_stats_stat_info kMyStatInfo
- = { sizeof(my_stat), NULL, add_my_stat, add_proportion_my_stat };
- gpr_timespec intervals[kNumIntervals] = {{60, 0}, {3600, 0}};
- my_stat stat;
- my_stat sums[kNumIntervals];
- census_window_stats_sums result[kNumIntervals];
- struct census_window_stats* stats
- = census_window_stats_create(kNumIntervals, intervals, kNumBuckets,
- &kMyStatInfo);
- // Record a new event, taking 15.3ms, transferring 1784 bytes.
- stat.latency = 0.153;
- stat.bytes = 1784;
- census_window_stats_add(stats, gpr_now(GPR_CLOCK_REALTIME), &stat);
- // Get sums and print them out
- result[kMinInterval].statistic = &sums[kMinInterval];
- result[kHourInterval].statistic = &sums[kHourInterval];
- census_window_stats_get_sums(stats, gpr_now(GPR_CLOCK_REALTIME), result);
- printf("%d events/min, average time %gs, average bytes %g\n",
- result[kMinInterval].count,
- (my_stat*)result[kMinInterval].statistic->latency /
- result[kMinInterval].count,
- (my_stat*)result[kMinInterval].statistic->bytes /
- result[kMinInterval].count
- );
- printf("%d events/hr, average time %gs, average bytes %g\n",
- result[kHourInterval].count,
- (my_stat*)result[kHourInterval].statistic->latency /
- result[kHourInterval].count,
- (my_stat*)result[kHourInterval].statistic->bytes /
- result[kHourInterval].count
- );
-*/
-
-/* Opaque structure for representing window_stats object */
-struct census_window_stats;
-
-/* Information provided by API user on the information they want to record */
-typedef struct census_window_stats_stat_info {
- /* Number of bytes in user-defined object. */
- size_t stat_size;
- /* Function to initialize a user-defined statistics object. If this is set
- * to NULL, then the object will be zero-initialized. */
- void (*stat_initialize)(void *stat);
- /* Function to add one user-defined statistics object ('addme') to 'base' */
- void (*stat_add)(void *base, const void *addme);
- /* As for previous function, but only add a proportion 'p'. This API will
- currently only use 'p' values in the range [0,1], but other values are
- possible in the future, and should be supported. */
- void (*stat_add_proportion)(double p, void *base, const void *addme);
-} census_window_stats_stat_info;
-
-/* Create a new window_stats object. 'nintervals' is the number of
- 'intervals', and must be >=1. 'granularity' is the number of buckets, with
- a larger number using more memory, but providing greater accuracy of
- results. 'granularity should be > 2. We also require that each interval be
- at least 10 * 'granularity' nanoseconds in size. 'stat_info' contains
- information about the statistic to be gathered. Intervals greater than ~192
- years will be treated as essentially infinite in size. This function will
- GPR_ASSERT() if the object cannot be created or any of the parameters have
- invalid values. This function is thread-safe. */
-struct census_window_stats *census_window_stats_create(
- int nintervals, const gpr_timespec intervals[], int granularity,
- const census_window_stats_stat_info *stat_info);
-
-/* Add a new measurement (in 'stat_value'), as of a given time ('when').
- This function is thread-compatible. */
-void census_window_stats_add(struct census_window_stats *wstats,
- const gpr_timespec when, const void *stat_value);
-
-/* Structure used to record a single intervals sum for a given statistic */
-typedef struct census_window_stats_sum {
- /* Total count of samples. Note that because some internal interpolation
- is performed, the count of samples returned for each interval may not be an
- integral value. */
- double count;
- /* Sum for statistic */
- void *statistic;
-} census_window_stats_sums;
-
-/* Retrieve a set of all values stored in a window_stats object 'wstats'. The
- number of 'sums' MUST be the same as the number 'nintervals' used in
- census_window_stats_create(). This function is thread-compatible. */
-void census_window_stats_get_sums(const struct census_window_stats *wstats,
- const gpr_timespec when,
- struct census_window_stats_sum sums[]);
-
-/* Destroy a window_stats object. Once this function has been called, the
- object will no longer be usable from any of the above functions (and
- calling them will most likely result in a NULL-pointer dereference or
- assertion failure). This function is thread-compatible. */
-void census_window_stats_destroy(struct census_window_stats *wstats);
-
-#endif /* GRPC_CORE_EXT_CENSUS_WINDOW_STATS_H */
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
new file mode 100644
index 0000000000..466bf86bc0
--- /dev/null
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -0,0 +1,158 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/ext/filters/client_channel/backup_poller.h"
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/pollset.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/support/env.h"
+#include "src/core/lib/support/string.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+#define DEFAULT_POLL_INTERVAL_MS 5000
+
+typedef struct backup_poller {
+ grpc_timer polling_timer;
+ grpc_closure run_poller_closure;
+ grpc_closure shutdown_closure;
+ gpr_mu* pollset_mu;
+ grpc_pollset* pollset; // guarded by pollset_mu
+ bool shutting_down; // guarded by pollset_mu
+ gpr_refcount refs;
+ gpr_refcount shutdown_refs;
+} backup_poller;
+
+static gpr_once g_once = GPR_ONCE_INIT;
+static gpr_mu g_poller_mu;
+static backup_poller* g_poller = NULL; // guarded by g_poller_mu
+// g_poll_interval_ms is set only once at the first time
+// grpc_client_channel_start_backup_polling() is called, after that it is
+// treated as const.
+static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS;
+
+static void init_globals() {
+ gpr_mu_init(&g_poller_mu);
+ char* env = gpr_getenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS");
+ if (env != NULL) {
+ int poll_interval_ms = gpr_parse_nonnegative_int(env);
+ if (poll_interval_ms == -1) {
+ gpr_log(GPR_ERROR,
+ "Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %s, "
+ "default value %d will be used.",
+ env, g_poll_interval_ms);
+ } else {
+ g_poll_interval_ms = poll_interval_ms;
+ }
+ }
+ gpr_free(env);
+}
+
+static void backup_poller_shutdown_unref(grpc_exec_ctx* exec_ctx,
+ backup_poller* p) {
+ if (gpr_unref(&p->shutdown_refs)) {
+ grpc_pollset_destroy(exec_ctx, p->pollset);
+ gpr_free(p->pollset);
+ gpr_free(p);
+ }
+}
+
+static void done_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ backup_poller_shutdown_unref(exec_ctx, (backup_poller*)arg);
+}
+
+static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
+ if (gpr_unref(&g_poller->refs)) {
+ gpr_mu_lock(&g_poller_mu);
+ backup_poller* p = g_poller;
+ g_poller = NULL;
+ gpr_mu_unlock(&g_poller_mu);
+ gpr_mu_lock(p->pollset_mu);
+ p->shutting_down = true;
+ grpc_pollset_shutdown(exec_ctx, p->pollset,
+ GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller,
+ p, grpc_schedule_on_exec_ctx));
+ gpr_mu_unlock(p->pollset_mu);
+ grpc_timer_cancel(exec_ctx, &p->polling_timer);
+ }
+}
+
+static void run_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ backup_poller* p = (backup_poller*)arg;
+ if (error != GRPC_ERROR_NONE) {
+ if (error != GRPC_ERROR_CANCELLED) {
+ GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
+ }
+ backup_poller_shutdown_unref(exec_ctx, p);
+ return;
+ }
+ gpr_mu_lock(p->pollset_mu);
+ if (p->shutting_down) {
+ gpr_mu_unlock(p->pollset_mu);
+ backup_poller_shutdown_unref(exec_ctx, p);
+ return;
+ }
+ grpc_error* err = grpc_pollset_work(exec_ctx, p->pollset, NULL,
+ grpc_exec_ctx_now(exec_ctx));
+ gpr_mu_unlock(p->pollset_mu);
+ GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
+ grpc_timer_init(exec_ctx, &p->polling_timer,
+ grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
+ &p->run_poller_closure);
+}
+
+void grpc_client_channel_start_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
+ gpr_once_init(&g_once, init_globals);
+ if (g_poll_interval_ms == 0) {
+ return;
+ }
+ gpr_mu_lock(&g_poller_mu);
+ if (g_poller == NULL) {
+ g_poller = (backup_poller*)gpr_zalloc(sizeof(backup_poller));
+ g_poller->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+ g_poller->shutting_down = false;
+ grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
+ gpr_ref_init(&g_poller->refs, 0);
+ // one for timer cancellation, one for pollset shutdown
+ gpr_ref_init(&g_poller->shutdown_refs, 2);
+ GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
+ grpc_schedule_on_exec_ctx);
+ grpc_timer_init(exec_ctx, &g_poller->polling_timer,
+ grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
+ &g_poller->run_poller_closure);
+ }
+ gpr_ref(&g_poller->refs);
+ gpr_mu_unlock(&g_poller_mu);
+ grpc_pollset_set_add_pollset(exec_ctx, interested_parties, g_poller->pollset);
+}
+
+void grpc_client_channel_stop_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
+ if (g_poll_interval_ms == 0) {
+ return;
+ }
+ grpc_pollset_set_del_pollset(exec_ctx, interested_parties, g_poller->pollset);
+ g_poller_unref(exec_ctx);
+}
diff --git a/src/core/ext/census/grpc_filter.h b/src/core/ext/filters/client_channel/backup_poller.h
index baa7bb931a..e993d50639 100644
--- a/src/core/ext/census/grpc_filter.h
+++ b/src/core/ext/filters/client_channel/backup_poller.h
@@ -16,14 +16,19 @@
*
*/
-#ifndef GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H
-#define GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
+#include <grpc/grpc.h>
#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
-/* Census filters: provides tracing and stats collection functionalities. It
- needs to reside right below the surface filter in the channel stack. */
-extern const grpc_channel_filter grpc_client_census_filter;
-extern const grpc_channel_filter grpc_server_census_filter;
+/* Start polling \a interested_parties periodically in the timer thread */
+void grpc_client_channel_start_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
-#endif /* GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H */
+/* Stop polling \a interested_parties */
+void grpc_client_channel_stop_backup_polling(
+ grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H */
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.cc
index b83c95275f..31a8fc39ce 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.c
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/surface/channel.h"
+#include <inttypes.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -86,20 +88,20 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
grpc_cq_completion *ignored) {
- int delete = 0;
- state_watcher *w = pw;
+ bool should_delete = false;
+ state_watcher *w = (state_watcher *)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
case READY_TO_CALL_BACK:
GPR_UNREACHABLE_CODE(return );
case CALLING_BACK_AND_FINISHED:
- delete = 1;
+ should_delete = true;
break;
}
gpr_mu_unlock(&w->mu);
- if (delete) {
+ if (should_delete) {
delete_state_watcher(exec_ctx, w);
}
}
@@ -161,12 +163,12 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
- partly_done(exec_ctx, pw, true, GRPC_ERROR_REF(error));
+ partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
}
static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
grpc_error *error) {
- partly_done(exec_ctx, pw, false, GRPC_ERROR_REF(error));
+ partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {
@@ -186,18 +188,24 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
- gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC),
- &wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timespec_to_millis_round_up(wa->deadline),
+ &wa->w->on_timeout);
gpr_free(wa);
}
+int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
+ grpc_channel_element *client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
+ return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
+}
+
void grpc_channel_watch_connectivity_state(
grpc_channel *channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
grpc_channel_element *client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- state_watcher *w = gpr_malloc(sizeof(*w));
+ state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -222,7 +230,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
- watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg *wa =
+ (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.cc
index 58e31d7b45..00c51ba543 100644
--- a/src/core/ext/filters/client_channel/client_channel.c
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -16,8 +16,11 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/ext/filters/client_channel/client_channel.h"
+#include <inttypes.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
@@ -28,6 +31,7 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
+#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
@@ -68,7 +72,7 @@ typedef enum {
typedef struct {
gpr_refcount refs;
- gpr_timespec timeout;
+ grpc_millis timeout;
wait_for_ready_value wait_for_ready;
} method_parameters;
@@ -85,7 +89,7 @@ static void method_parameters_unref(method_parameters *method_params) {
}
static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
- method_parameters_unref(value);
+ method_parameters_unref((method_parameters *)value);
}
static bool parse_wait_for_ready(grpc_json *field,
@@ -98,17 +102,18 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
-static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
+static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
char *buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
char *decimal_point = strchr(buf, '.');
+ int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
- timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1);
- if (timeout->tv_nsec == -1) {
+ nanos = gpr_parse_nonnegative_int(decimal_point + 1);
+ if (nanos == -1) {
gpr_free(buf);
return false;
}
@@ -127,28 +132,30 @@ static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) {
gpr_free(buf);
return false;
}
- timeout->tv_nsec *= multiplier;
+ nanos *= multiplier;
}
- timeout->tv_sec = gpr_parse_nonnegative_int(buf);
+ int seconds = gpr_parse_nonnegative_int(buf);
gpr_free(buf);
- if (timeout->tv_sec == -1) return false;
+ if (seconds == -1) return false;
+ *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
return true;
}
static void *method_parameters_create_from_json(const grpc_json *json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
- gpr_timespec timeout = {0, 0, GPR_TIMESPAN};
+ grpc_millis timeout = 0;
for (grpc_json *field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL;
} else if (strcmp(field->key, "timeout") == 0) {
- if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate.
+ if (timeout > 0) return NULL; // Duplicate.
if (!parse_timeout(field, &timeout)) return NULL;
}
}
- method_parameters *value = gpr_malloc(sizeof(method_parameters));
+ method_parameters *value =
+ (method_parameters *)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -254,7 +261,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- lb_policy_connectivity_watcher *w = arg;
+ lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@@ -281,7 +288,8 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
- lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
+ lb_policy_connectivity_watcher *w =
+ (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -310,7 +318,8 @@ typedef struct {
} service_config_parsing_state;
static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
- service_config_parsing_state *parsing_state = arg;
+ service_config_parsing_state *parsing_state =
+ (service_config_parsing_state *)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@@ -365,14 +374,14 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- channel_data *chand = arg;
+ channel_data *chand = (channel_data *)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
- char *lb_policy_name = NULL;
+ char *lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
grpc_lb_policy *new_lb_policy = NULL;
char *service_config_json = NULL;
@@ -380,6 +389,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_hash_table *method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
+ const char *lb_policy_name = NULL;
const grpc_arg *channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
@@ -391,7 +401,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
+ grpc_lb_addresses *addresses =
+ (grpc_lb_addresses *)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -469,7 +480,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
// Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below.
- lb_policy_name = gpr_strdup(lb_policy_name);
+ lb_policy_name_dup = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = NULL;
}
@@ -477,8 +488,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"",
- chand, lb_policy_name, lb_policy_name_changed ? " (changed)" : "",
- service_config_json);
+ chand, lb_policy_name_dup,
+ lb_policy_name_changed ? " (changed)" : "", service_config_json);
}
// Now swap out fields in chand. Note that the new values may still
// be NULL if (e.g.) the resolver failed to return results or the
@@ -486,9 +497,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
//
// First, swap out the data used by cc_get_channel_info().
gpr_mu_lock(&chand->info_mu);
- if (lb_policy_name != NULL) {
+ if (lb_policy_name_dup != NULL) {
gpr_free(chand->info_lb_policy_name);
- chand->info_lb_policy_name = lb_policy_name;
+ chand->info_lb_policy_name = lb_policy_name_dup;
}
if (service_config_json != NULL) {
gpr_free(chand->info_service_config_json);
@@ -586,9 +597,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- grpc_transport_op *op = arg;
- grpc_channel_element *elem = op->handler_private.extra_arg;
- channel_data *chand = elem->channel_data;
+ grpc_transport_op *op = (grpc_transport_op *)arg;
+ grpc_channel_element *elem =
+ (grpc_channel_element *)op->handler_private.extra_arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@@ -642,7 +654,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -662,7 +674,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *info) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@@ -682,7 +694,7 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -701,6 +713,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
chand->interested_parties = grpc_pollset_set_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
+ grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory.
const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@@ -712,8 +725,10 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"client channel factory arg must be a pointer");
}
- grpc_client_channel_factory_ref(arg->value.pointer.p);
- chand->client_channel_factory = arg->value.pointer.p;
+ grpc_client_channel_factory_ref(
+ (grpc_client_channel_factory *)arg->value.pointer.p);
+ chand->client_channel_factory =
+ (grpc_client_channel_factory *)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@@ -745,7 +760,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_resolver *resolver = arg;
+ grpc_resolver *resolver = (grpc_resolver *)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
@@ -753,7 +768,7 @@ static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
@@ -777,6 +792,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
+ grpc_client_channel_stop_backup_polling(exec_ctx, chand->interested_parties);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
@@ -796,7 +812,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
// send_message
// recv_trailing_metadata
// send_trailing_metadata
-#define MAX_WAITING_BATCHES 6
+// We also add room for a single cancel_stream batch.
+#define MAX_WAITING_BATCHES 7
/** Call data. Holds a pointer to grpc_subchannel_call and the
associated machinery to create such a pointer.
@@ -807,24 +824,27 @@ typedef struct client_channel_call_data {
// State for handling deadlines.
// The code in deadline_filter.c requires this to be the first field.
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
- // and this struct both independently store a pointer to the call
- // stack and each has its own mutex. If/when we have time, find a way
- // to avoid this without breaking the grpc_deadline_state abstraction.
+ // and this struct both independently store pointers to the call stack
+ // and call combiner. If/when we have time, find a way to avoid this
+ // without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
grpc_slice path; // Request path.
gpr_timespec call_start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
+ gpr_arena *arena;
+ grpc_call_stack *owning_call;
+ grpc_call_combiner *call_combiner;
+
grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
- /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
- bit is 0), or a pointer to an error (if the lowest bit is 1) */
- gpr_atm subchannel_call_or_error;
- gpr_arena *arena;
+ grpc_subchannel_call *subchannel_call;
+ grpc_error *error;
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
+ grpc_closure lb_pick_cancel_closure;
grpc_connected_subchannel *connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
@@ -832,10 +852,9 @@ typedef struct client_channel_call_data {
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
+ grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
- grpc_transport_stream_op_batch_payload *initial_metadata_payload;
-
- grpc_call_stack *owning_call;
+ grpc_transport_stream_op_batch *initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
@@ -843,99 +862,112 @@ typedef struct client_channel_call_data {
grpc_closure *original_on_complete;
} call_data;
-typedef struct {
- grpc_subchannel_call *subchannel_call;
- grpc_error *error;
-} call_or_error;
-
-static call_or_error get_call_or_error(call_data *p) {
- gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error);
- if (c == 0)
- return (call_or_error){NULL, NULL};
- else if (c & 1)
- return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)};
- else
- return (call_or_error){(grpc_subchannel_call *)c, NULL};
+grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
+ grpc_call_element *elem) {
+ call_data *calld = (call_data *)elem->call_data;
+ return calld->subchannel_call;
}
-static bool set_call_or_error(call_data *p, call_or_error coe) {
- // this should always be under a lock
- call_or_error existing = get_call_or_error(p);
- if (existing.error != GRPC_ERROR_NONE) {
- GRPC_ERROR_UNREF(coe.error);
- return false;
- }
- GPR_ASSERT(existing.subchannel_call == NULL);
- if (coe.error != GRPC_ERROR_NONE) {
- GPR_ASSERT(coe.subchannel_call == NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error);
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_add(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->initial_metadata_batch == NULL);
+ calld->initial_metadata_batch = batch;
} else {
- GPR_ASSERT(coe.subchannel_call != NULL);
- gpr_atm_rel_store(&p->subchannel_call_or_error,
- (gpr_atm)coe.subchannel_call);
+ GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
+ batch;
}
- return true;
}
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *call_elem) {
- return get_call_or_error(call_elem->call_data).subchannel_call;
-}
-
-static void waiting_for_pick_batches_add_locked(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
- batch;
+// This is called via the call combiner, so access to calld is synchronized.
+static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ call_data *calld = (call_data *)arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
+ GRPC_ERROR_REF(error), calld->call_combiner);
+ }
}
-static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = elem->call_data;
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_error *error) {
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
+ "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, calld->waiting_for_pick_batches_count,
grpc_error_string(error));
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ fail_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_REF(error),
+ "waiting_for_pick_batches_fail");
+ }
+ if (calld->initial_metadata_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error));
+ exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ } else {
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "waiting_for_pick_batches_fail");
}
- calld->waiting_for_pick_batches_count = 0;
GRPC_ERROR_UNREF(error);
}
-static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- if (calld->waiting_for_pick_batches_count == 0) return;
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
- waiting_for_pick_batches_fail_locked(exec_ctx, elem,
- GRPC_ERROR_REF(coe.error));
- return;
+// This is called via the call combiner, so access to calld is synchronized.
+static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *ignored) {
+ call_data *calld = (call_data *)arg;
+ if (calld->waiting_for_pick_batches_count > 0) {
+ --calld->waiting_for_pick_batches_count;
+ grpc_subchannel_call_process_op(
+ exec_ctx, calld->subchannel_call,
+ calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
}
+}
+
+// This is called via the call combiner, so access to calld is synchronized.
+static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
- coe.subchannel_call);
+ chand, calld, calld->waiting_for_pick_batches_count,
+ calld->subchannel_call);
}
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call,
- calld->waiting_for_pick_batches[i]);
- }
- calld->waiting_for_pick_batches_count = 0;
+ GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
+ run_pending_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ &calld->handle_pending_batch_in_call_combiner[i],
+ GRPC_ERROR_NONE,
+ "waiting_for_pick_batches_resume");
+ }
+ GPR_ASSERT(calld->initial_metadata_batch != NULL);
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
+ calld->initial_metadata_batch);
}
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -945,18 +977,18 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
- calld->method_params = grpc_method_config_table_get(
+ calld->method_params = (method_parameters *)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
// If the deadline from the service config is shorter than the one
// from the client API, reset the deadline timer.
if (chand->deadline_checking_enabled &&
- gpr_time_cmp(calld->method_params->timeout,
- gpr_time_0(GPR_TIMESPAN)) != 0) {
- const gpr_timespec per_method_deadline =
- gpr_time_add(calld->call_start_time, calld->method_params->timeout);
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
+ calld->method_params->timeout != 0) {
+ const grpc_millis per_method_deadline =
+ grpc_timespec_to_millis_round_up(calld->call_start_time) +
+ calld->method_params->timeout;
+ if (per_method_deadline < calld->deadline) {
calld->deadline = per_method_deadline;
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
}
@@ -968,194 +1000,98 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call = NULL;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
- .pollent = calld->pollent,
- .path = calld->path,
- .start_time = calld->call_start_time,
- .deadline = calld->deadline,
- .arena = calld->arena,
- .context = calld->subchannel_call_context};
+ calld->pollent, // pollent
+ calld->path, // path
+ calld->call_start_time, // start_time
+ calld->deadline, // deadline
+ calld->arena, // arena
+ calld->subchannel_call_context, // context
+ calld->call_combiner // call_combiner
+ };
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args,
+ &calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
- elem->channel_data, calld, subchannel_call,
- grpc_error_string(new_error));
+ chand, calld, calld->subchannel_call, grpc_error_string(new_error));
}
- GPR_ASSERT(set_call_or_error(
- calld, (call_or_error){.subchannel_call = subchannel_call}));
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
} else {
- waiting_for_pick_batches_resume_locked(exec_ctx, elem);
+ waiting_for_pick_batches_resume(exec_ctx, elem);
}
GRPC_ERROR_UNREF(error);
}
-static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
- chand->interested_parties);
- call_or_error coe = get_call_or_error(calld);
+// Invoked when a pick is completed, on both success or failure.
+static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_error *error) {
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
- grpc_error *failure =
- error == GRPC_ERROR_NONE
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy")
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Failed to create subchannel", &error, 1);
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = error == GRPC_ERROR_NONE
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Call dropped by load balancing policy")
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failed to create subchannel: error=%s", chand,
- calld, grpc_error_string(failure));
+ calld, grpc_error_string(calld->error));
}
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure);
- } else if (coe.error != GRPC_ERROR_NONE) {
- /* already cancelled before subchannel became ready */
- grpc_error *child_errors[] = {error, coe.error};
- grpc_error *cancellation_error =
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Cancelled before creating subchannel", child_errors,
- GPR_ARRAY_SIZE(child_errors));
- /* if due to deadline, attach the deadline exceeded status to the error */
- if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
- cancellation_error =
- grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_DEADLINE_EXCEEDED);
- }
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: cancelled before subchannel became ready: %s",
- chand, calld, grpc_error_string(cancellation_error));
- }
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error);
+ waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
} else {
/* Create call on subchannel. */
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
GRPC_ERROR_UNREF(error);
}
-static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- call_data *calld = elem->call_data;
- grpc_subchannel_call *subchannel_call =
- get_call_or_error(calld).subchannel_call;
- if (subchannel_call == NULL) {
- return NULL;
- } else {
- return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
- }
+// A wrapper around pick_done_locked() that is used in cases where
+// either (a) the pick was deferred pending a resolver result or (b) the
+// pick was done asynchronously. Removes the call's polling entity from
+// chand->interested_parties before invoking pick_done_locked().
+static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem, grpc_error *error) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
+ pick_done_locked(exec_ctx, elem, error);
}
-/** Return true if subchannel is available immediately (in which case
- subchannel_ready_locked() should not be called), or false otherwise (in
- which case subchannel_ready_locked() should be called when the subchannel
- is available). */
-static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem);
-
-typedef struct {
- grpc_call_element *elem;
- bool cancelled;
- grpc_closure closure;
-} pick_after_resolver_result_args;
-
-static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- pick_after_resolver_result_args *args = arg;
- if (args->cancelled) {
- /* cancelled, do nothing */
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "call cancelled before resolver result");
- }
- } else {
- channel_data *chand = args->elem->channel_data;
- call_data *calld = args->elem->call_data;
- if (error != GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
- chand, calld);
- }
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error));
- } else {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
- chand, calld);
- }
- if (pick_subchannel_locked(exec_ctx, args->elem)) {
- subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE);
- }
- }
- }
- gpr_free(args);
-}
-
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: deferring pick pending resolver result", chand,
- calld);
- }
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
- args->elem = elem;
- GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
- args, grpc_combiner_scheduler(chand->combiner));
- grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
- &args->closure, GRPC_ERROR_NONE);
-}
-
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- // If we don't yet have a resolver result, then a closure for
- // pick_after_resolver_result_done_locked() will have been added to
- // chand->waiting_for_resolver_result_closures, and it may not be invoked
- // until after this call has been destroyed. We mark the operation as
- // cancelled, so that when pick_after_resolver_result_done_locked()
- // is called, it will be a no-op. We also immediately invoke
- // subchannel_ready_locked() to propagate the error back to the caller.
- for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head;
- closure != NULL; closure = closure->next_data.next) {
- pick_after_resolver_result_args *args = closure->cb_arg;
- if (!args->cancelled && args->elem == elem) {
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: "
- "cancelling pick waiting for resolver result",
- chand, calld);
- }
- args->cancelled = true;
- subchannel_ready_locked(exec_ctx, elem,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick cancelled", &error, 1));
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
+ chand, calld, calld->lb_policy);
}
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
+ &calld->connected_subchannel,
+ GRPC_ERROR_REF(error));
}
- GRPC_ERROR_UNREF(error);
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
}
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
-// Unrefs the LB policy after invoking subchannel_ready_locked().
+// Unrefs the LB policy and invokes async_pick_done_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@@ -1163,28 +1099,51 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(calld->lb_policy != NULL);
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
- subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+ async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
}
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_lb_policy_pick_args *inputs) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
}
+ apply_service_config_to_call_locked(exec_ctx, elem);
+ // If the application explicitly set wait_for_ready, use that.
+ // Otherwise, if the service config specified a value for this
+ // method, use that.
+ uint32_t initial_metadata_flags =
+ calld->initial_metadata_batch->payload->send_initial_metadata
+ .send_initial_metadata_flags;
+ const bool wait_for_ready_set_from_api =
+ initial_metadata_flags &
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
+ const bool wait_for_ready_set_from_service_config =
+ calld->method_params != NULL &&
+ calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
+ if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
+ if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
+ initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ } else {
+ initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
+ }
+ }
+ const grpc_lb_policy_pick_args inputs = {
+ calld->initial_metadata_batch->payload->send_initial_metadata
+ .send_initial_metadata,
+ initial_metadata_flags, &calld->lb_token_mdelem};
// Keep a ref to the LB policy in calld while the pick is pending.
GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel");
calld->lb_policy = chand->lb_policy;
GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
grpc_combiner_scheduler(chand->combiner));
const bool pick_done = grpc_lb_policy_pick_locked(
- exec_ctx, chand->lb_policy, inputs, &calld->connected_subchannel,
+ exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
@@ -1194,160 +1153,187 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
calld->lb_policy = NULL;
+ } else {
+ GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
+ pick_callback_cancel_locked, elem,
+ grpc_combiner_scheduler(chand->combiner)));
}
return pick_done;
}
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- GPR_ASSERT(calld->lb_policy != NULL);
+typedef struct {
+ grpc_call_element *elem;
+ bool finished;
+ grpc_closure closure;
+ grpc_closure cancel_closure;
+} pick_after_resolver_result_args;
+
+// Note: This runs under the client_channel combiner, but will NOT be
+// holding the call combiner.
+static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)arg;
+ if (args->finished) {
+ gpr_free(args);
+ return;
+ }
+ // If we don't yet have a resolver result, then a closure for
+ // pick_after_resolver_result_done_locked() will have been added to
+ // chand->waiting_for_resolver_result_closures, and it may not be invoked
+ // until after this call has been destroyed. We mark the operation as
+ // finished, so that when pick_after_resolver_result_done_locked()
+ // is called, it will be a no-op. We also immediately invoke
+ // async_pick_done_locked() to propagate the error back to the caller.
+ args->finished = true;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
- chand, calld, calld->lb_policy);
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: cancelling pick waiting for resolver result",
+ chand, calld);
}
- grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
- &calld->connected_subchannel, error);
+ // Note: Although we are not in the call combiner here, we are
+ // basically stealing the call combiner from the pending pick, so
+ // it's safe to call async_pick_done_locked() here -- we are
+ // essentially calling it here instead of calling it in
+ // pick_after_resolver_result_done_locked().
+ async_pick_done_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
}
-static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- GPR_TIMER_BEGIN("pick_subchannel", 0);
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
- bool pick_done = false;
- if (chand->lb_policy != NULL) {
- apply_service_config_to_call_locked(exec_ctx, elem);
- // If the application explicitly set wait_for_ready, use that.
- // Otherwise, if the service config specified a value for this
- // method, use that.
- uint32_t initial_metadata_flags =
- calld->initial_metadata_payload->send_initial_metadata
- .send_initial_metadata_flags;
- const bool wait_for_ready_set_from_api =
- initial_metadata_flags &
- GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
- const bool wait_for_ready_set_from_service_config =
- calld->method_params != NULL &&
- calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
- if (!wait_for_ready_set_from_api &&
- wait_for_ready_set_from_service_config) {
- if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
- initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
- } else {
- initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
- }
- }
- const grpc_lb_policy_pick_args inputs = {
- calld->initial_metadata_payload->send_initial_metadata
- .send_initial_metadata,
- initial_metadata_flags, &calld->lb_token_mdelem};
- pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs);
- } else if (chand->resolver != NULL) {
- if (!chand->started_resolving) {
- start_resolving_locked(exec_ctx, chand);
- }
- pick_after_resolver_result_start_locked(exec_ctx, elem);
- } else {
- subchannel_ready_locked(
- exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
- }
- GPR_TIMER_END("pick_subchannel", 0);
- return pick_done;
-}
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem);
-static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx,
+static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
- grpc_error *error_ignored) {
- GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0);
- grpc_transport_stream_op_batch *batch = arg;
- grpc_call_element *elem = batch->handler_private.extra_arg;
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- /* need to recheck that another thread hasn't set the call */
- call_or_error coe = get_call_or_error(calld);
- if (coe.error != GRPC_ERROR_NONE) {
+ grpc_error *error) {
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)arg;
+ if (args->finished) {
+ /* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
+ gpr_log(GPR_DEBUG, "call cancelled before resolver result");
}
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
- goto done;
+ gpr_free(args);
+ return;
}
- if (coe.subchannel_call != NULL) {
+ args->finished = true;
+ grpc_call_element *elem = args->elem;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
+ chand, calld);
+ }
+ async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+ } else if (chand->lb_policy != NULL) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
+ chand, calld);
+ }
+ if (pick_callback_start_locked(exec_ctx, elem)) {
+ // Even if the LB policy returns a result synchronously, we have
+ // already added our polling entity to chand->interested_parties
+ // in order to wait for the resolver result, so we need to
+ // remove it here. Therefore, we call async_pick_done_locked()
+ // instead of pick_done_locked().
+ async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
}
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
}
- // Add to waiting-for-pick list. If we succeed in getting a
- // subchannel call below, we'll handle this batch (along with any
- // other waiting batches) in waiting_for_pick_batches_resume_locked().
- waiting_for_pick_batches_add_locked(calld, batch);
- // If this is a cancellation, cancel the pending pick (if any) and
- // fail any pending batches.
- if (batch->cancel_stream) {
- grpc_error *error = batch->payload->cancel_stream.cancel_error;
+ // TODO(roth): It should be impossible for chand->lb_policy to be NULL
+ // here, so the rest of this code should never actually be executed.
+ // However, we have reports of a crash on iOS that triggers this case,
+ // so we are temporarily adding this to restore branches that were
+ // removed in https://github.com/grpc/grpc/pull/12297. Need to figure
+ // out what is actually causing this to occur and then figure out the
+ // right way to deal with it.
+ else if (chand->resolver != NULL) {
+ // No LB policy, so try again.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
- calld, grpc_error_string(error));
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: resolver returned but no LB policy, "
+ "trying again",
+ chand, calld);
}
- /* Stash a copy of cancel_error in our call data, so that we can use
- it for subsequent operations. This ensures that if the call is
- cancelled before any batches are passed down (e.g., if the deadline
- is in the past when the call starts), we can return the right
- error to the caller when the first batch does get passed down. */
- set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)});
- if (calld->lb_policy != NULL) {
- pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- } else {
- pick_after_resolver_result_cancel_locked(exec_ctx, elem,
- GRPC_ERROR_REF(error));
+ pick_after_resolver_result_start_locked(exec_ctx, elem);
+ } else {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
+ calld);
}
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
- goto done;
+ async_pick_done_locked(
+ exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
- /* if we don't have a subchannel, try to get one */
- if (batch->send_initial_metadata) {
- GPR_ASSERT(calld->connected_subchannel == NULL);
- calld->initial_metadata_payload = batch->payload;
- GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
- /* If a subchannel is not available immediately, the polling entity from
- call_data should be provided to channel_data's interested_parties, so
- that IO of the lb_policy and resolver could be done under it. */
- if (pick_subchannel_locked(exec_ctx, elem)) {
- // Pick was returned synchronously.
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
- if (calld->connected_subchannel == NULL) {
- grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Call dropped by load balancing policy");
- set_call_or_error(calld,
- (call_or_error){.error = GRPC_ERROR_REF(error)});
- waiting_for_pick_batches_fail_locked(exec_ctx, elem, error);
- } else {
- // Create subchannel call.
- create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE);
- }
- } else {
- grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
- chand->interested_parties);
+}
+
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: deferring pick pending resolver result", chand,
+ calld);
+ }
+ pick_after_resolver_result_args *args =
+ (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
+ args->elem = elem;
+ GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
+ args, grpc_combiner_scheduler(chand->combiner));
+ grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
+ &args->closure, GRPC_ERROR_NONE);
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&args->cancel_closure,
+ pick_after_resolver_result_cancel_locked, args,
+ grpc_combiner_scheduler(chand->combiner)));
+}
+
+static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *ignored) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ GPR_ASSERT(calld->connected_subchannel == NULL);
+ if (chand->lb_policy != NULL) {
+ // We already have an LB policy, so ask it for a pick.
+ if (pick_callback_start_locked(exec_ctx, elem)) {
+ // Pick completed synchronously.
+ pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
+ return;
}
+ } else {
+ // We do not yet have an LB policy, so wait for a resolver result.
+ if (chand->resolver == NULL) {
+ pick_done_locked(exec_ctx, elem,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+ return;
+ }
+ if (!chand->started_resolving) {
+ start_resolving_locked(exec_ctx, chand);
+ }
+ pick_after_resolver_result_start_locked(exec_ctx, elem);
}
-done:
- GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
- "start_transport_stream_op_batch");
- GPR_TIMER_END("start_transport_stream_op_batch_locked", 0);
+ // We need to wait for either a resolver result or for an async result
+ // from the LB policy. Add the polling entity from call_data to the
+ // channel_data's interested_parties, so that the I/O of the LB policy
+ // and resolver can be done under it. The polling entity will be
+ // removed in async_pick_done_locked().
+ grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+ chand->interested_parties);
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_call_element *elem = arg;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1365,27 +1351,49 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_REF(error));
}
-/* The logic here is fairly complicated, due to (a) the fact that we
- need to handle the case where we receive the send op before the
- initial metadata op, and (b) the need for efficiency, especially in
- the streaming case.
-
- We use double-checked locking to initially see if initialization has been
- performed. If it has not, we acquire the combiner and perform initialization.
- If it has, we proceed on the fast path. */
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- if (GRPC_TRACER_ON(grpc_client_channel_trace) ||
- GRPC_TRACER_ON(grpc_trace_channel)) {
- grpc_call_log_op(GPR_INFO, elem, batch);
- }
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
}
+ GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
+ // If we've previously been cancelled, immediately fail any new batches.
+ if (calld->error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
+ chand, calld, grpc_error_string(calld->error));
+ }
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+ goto done;
+ }
+ if (batch->cancel_stream) {
+ // Stash a copy of cancel_error in our call data, so that we can use
+ // it for subsequent operations. This ensures that if the call is
+ // cancelled before any batches are passed down (e.g., if the deadline
+ // is in the past when the call starts), we can return the right
+ // error to the caller when the first batch does get passed down.
+ GRPC_ERROR_UNREF(calld->error);
+ calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
+ calld, grpc_error_string(calld->error));
+ }
+ // If we have a subchannel call, send the cancellation batch down.
+ // Otherwise, fail all pending batches.
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+ } else {
+ waiting_for_pick_batches_add(calld, batch);
+ waiting_for_pick_batches_fail(exec_ctx, elem,
+ GRPC_ERROR_REF(calld->error));
+ }
+ goto done;
+ }
// Intercept on_complete for recv_trailing_metadata so that we can
// check retry throttle status.
if (batch->recv_trailing_metadata) {
@@ -1395,38 +1403,44 @@ static void cc_start_transport_stream_op_batch(
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete;
}
- /* try to (atomically) get the call */
- call_or_error coe = get_call_or_error(calld);
- GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
- if (coe.error != GRPC_ERROR_NONE) {
+ // Check if we've already gotten a subchannel call.
+ // Note that once we have completed the pick, we do not need to enter
+ // the channel combiner, which is more efficient (especially for
+ // streaming calls).
+ if (calld->subchannel_call != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
- chand, calld, grpc_error_string(coe.error));
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
+ calld, calld->subchannel_call);
}
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch, GRPC_ERROR_REF(coe.error));
+ grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
goto done;
}
- if (coe.subchannel_call != NULL) {
+ // We do not yet have a subchannel call.
+ // Add the batch to the waiting-for-pick list.
+ waiting_for_pick_batches_add(calld, batch);
+ // For batches containing a send_initial_metadata op, enter the channel
+ // combiner to start a pick.
+ if (batch->send_initial_metadata) {
+ if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
+ chand, calld);
+ }
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
+ elem, grpc_combiner_scheduler(chand->combiner)),
+ GRPC_ERROR_NONE);
+ } else {
+ // For all other batches, release the call combiner.
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
- calld, coe.subchannel_call);
+ "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
+ calld);
}
- grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch);
- goto done;
- }
- /* we failed; lock and figure out what to do */
- if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "batch does not include send_initial_metadata");
}
- GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch");
- batch->handler_private.extra_arg = elem;
- GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
- start_transport_stream_op_batch_locked, batch,
- grpc_combiner_scheduler(chand->combiner)),
- GRPC_ERROR_NONE);
done:
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0);
}
@@ -1435,16 +1449,18 @@ done:
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
- calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
- calld->owning_call = args->call_stack;
+ calld->deadline = args->deadline;
calld->arena = args->arena;
+ calld->owning_call = args->call_stack;
+ calld->call_combiner = args->call_combiner;
if (chand->deadline_checking_enabled) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, calld->deadline);
}
return GRPC_ERROR_NONE;
}
@@ -1454,8 +1470,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@@ -1463,13 +1479,12 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
if (calld->method_params != NULL) {
method_parameters_unref(calld->method_params);
}
- call_or_error coe = get_call_or_error(calld);
- GRPC_ERROR_UNREF(coe.error);
- if (coe.subchannel_call != NULL) {
- grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call,
+ GRPC_ERROR_UNREF(calld->error);
+ if (calld->subchannel_call != NULL) {
+ grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
then_schedule_closure);
then_schedule_closure = NULL;
- GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call,
+ GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
"client_channel_destroy_call");
}
GPR_ASSERT(calld->lb_policy == NULL);
@@ -1490,7 +1505,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->pollent = pollent;
}
@@ -1508,14 +1523,13 @@ const grpc_channel_filter grpc_client_channel_filter = {
sizeof(channel_data),
cc_init_channel_elem,
cc_destroy_channel_elem,
- cc_get_peer,
cc_get_channel_info,
"client-channel",
};
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- channel_data *chand = arg;
+ channel_data *chand = (channel_data *)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@@ -1529,7 +1543,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@@ -1600,7 +1614,7 @@ static void external_connectivity_watcher_list_remove(
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1615,9 +1629,9 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
-static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- external_connectivity_watcher *w = arg;
+static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
@@ -1630,13 +1644,13 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
- external_connectivity_watcher *w = arg;
+ external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
external_connectivity_watcher *found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
- GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
- grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
+ grpc_combiner_scheduler(w->chand->combiner));
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
} else {
@@ -1659,8 +1673,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_polling_entity pollent, grpc_connectivity_state *state,
grpc_closure *closure, grpc_closure *watcher_timer_init) {
- channel_data *chand = elem->channel_data;
- external_connectivity_watcher *w = gpr_zalloc(sizeof(*w));
+ channel_data *chand = (channel_data *)elem->channel_data;
+ external_connectivity_watcher *w =
+ (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index c99f0092e9..152fe2365a 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -28,6 +28,10 @@ extern grpc_tracer_flag grpc_client_channel_trace;
// Channel arg key for server URI string.
#define GRPC_ARG_SERVER_URI "grpc.server_uri"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A client channel is a channel that begins disconnected, and can connect
to some endpoint on demand. If that endpoint disconnects, it will be
connected to again later.
@@ -52,4 +56,8 @@ void grpc_client_channel_watch_connectivity_state(
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.c b/src/core/ext/filters/client_channel/client_channel_factory.cc
index 7220a8639e..57eac8f875 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.c
+++ b/src/core/ext/filters/client_channel/client_channel_factory.cc
@@ -43,14 +43,13 @@ grpc_channel* grpc_client_channel_factory_create_channel(
}
static void* factory_arg_copy(void* factory) {
- grpc_client_channel_factory_ref(factory);
+ grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory);
return factory;
}
static void factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* factory) {
- // TODO(roth): Remove local exec_ctx when
- // https://github.com/grpc/grpc/pull/8705 is merged.
- grpc_client_channel_factory_unref(exec_ctx, factory);
+ grpc_client_channel_factory_unref(exec_ctx,
+ (grpc_client_channel_factory*)factory);
}
static int factory_arg_cmp(void* factory1, void* factory2) {
@@ -64,6 +63,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = {
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY,
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
factory, &factory_arg_vtable);
}
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h
index ce6266c769..4273c90058 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.h
+++ b/src/core/ext/filters/client_channel/client_channel_factory.h
@@ -27,6 +27,10 @@
// Channel arg key for client channel factory.
#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
typedef struct grpc_client_channel_factory_vtable
grpc_client_channel_factory_vtable;
@@ -74,4 +78,8 @@ grpc_channel *grpc_client_channel_factory_create_channel(
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory *factory);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.c b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index c32e83d012..4431d11519 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.c
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -54,8 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
char *default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
- grpc_arg arg = grpc_channel_arg_string_create(GRPC_ARG_DEFAULT_AUTHORITY,
- default_authority);
+ grpc_arg arg = grpc_channel_arg_string_create(
+ (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);
@@ -65,7 +65,7 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
return true;
}
-void grpc_client_channel_init(void) {
+extern "C" void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
grpc_resolver_registry_init();
grpc_retry_throttle_map_init();
@@ -84,7 +84,7 @@ void grpc_client_channel_init(void) {
#endif
}
-void grpc_client_channel_shutdown(void) {
+extern "C" void grpc_client_channel_shutdown(void) {
grpc_subchannel_index_shutdown();
grpc_channel_init_shutdown();
grpc_proxy_mapper_registry_shutdown();
diff --git a/src/core/ext/filters/client_channel/connector.c b/src/core/ext/filters/client_channel/connector.cc
index c258468e58..c258468e58 100644
--- a/src/core/ext/filters/client_channel/connector.c
+++ b/src/core/ext/filters/client_channel/connector.cc
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index 7f3d4a1cc0..b71e0aab00 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_connector grpc_connector;
typedef struct grpc_connector_vtable grpc_connector_vtable;
@@ -34,7 +38,7 @@ typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
/** deadline for connection */
- gpr_timespec deadline;
+ grpc_millis deadline;
/** channel arguments (to be passed to transport) */
const grpc_channel_args *channel_args;
} grpc_connect_in_args;
@@ -70,4 +74,8 @@ void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
grpc_error *why);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.c b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index 0952dc6d4e..418bb41ef6 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -124,7 +124,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the write failed or we're shutting down, clean up and invoke the
@@ -145,7 +145,7 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- http_connect_handshaker* handshaker = arg;
+ http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the read failed or we're shutting down, clean up and invoke the
@@ -281,7 +281,8 @@ static void http_connect_handshaker_do_handshake(
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
gpr_string_split(arg->value.string, "\n", &header_strings,
&num_header_strings);
- headers = gpr_malloc(sizeof(grpc_http_header) * num_header_strings);
+ headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
+ num_header_strings);
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == NULL) {
@@ -308,7 +309,7 @@ static void http_connect_handshaker_do_handshake(
grpc_httpcli_request request;
memset(&request, 0, sizeof(request));
request.host = server_name;
- request.http.method = "CONNECT";
+ request.http.method = (char*)"CONNECT";
request.http.path = server_name;
request.http.hdrs = headers;
request.http.hdr_count = num_headers;
@@ -333,7 +334,8 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_do_handshake};
static grpc_handshaker* grpc_http_connect_handshaker_create() {
- http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
+ http_connect_handshaker* handshaker =
+ (http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
gpr_mu_init(&handshaker->mu);
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.h b/src/core/ext/filters/client_channel/http_connect_handshaker.h
index 928a23dc93..05a23cdba3 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.h
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.h
@@ -28,7 +28,15 @@
/// seperated by colons.
#define GRPC_ARG_HTTP_CONNECT_HEADERS "grpc.http_connect_headers"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Registers handshaker factory.
void grpc_http_connect_register_handshaker_factory();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */
diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.cc
index ef3512ed83..a16b44d3dc 100644
--- a/src/core/ext/filters/client_channel/http_proxy.c
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -44,6 +44,8 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
GPR_ASSERT(user_cred != NULL);
char* proxy_name = NULL;
char* uri_str = gpr_getenv("http_proxy");
+ char** authority_strs = NULL;
+ size_t authority_nstrs;
if (uri_str == NULL) return NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
@@ -56,8 +58,6 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
goto done;
}
/* Split on '@' to separate user credentials from host */
- char** authority_strs = NULL;
- size_t authority_nstrs;
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
if (authority_nstrs == 1) {
@@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false;
+ char* no_proxy_str = NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
if (uri == NULL || uri->path[0] == '\0') {
@@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy",
server_uri);
- if (uri != NULL) {
- gpr_free(user_cred);
- grpc_uri_destroy(uri);
- }
- return false;
+ goto no_use_proxy;
}
if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri);
- gpr_free(user_cred);
- grpc_uri_destroy(uri);
- return false;
+ goto no_use_proxy;
}
- char* no_proxy_str = gpr_getenv("no_proxy");
+ no_proxy_str = gpr_getenv("no_proxy");
if (no_proxy_str != NULL) {
static const char* NO_PROXY_SEPARATOR = ",";
bool use_proxy = true;
@@ -147,17 +142,12 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
gpr_free(no_proxy_hosts);
gpr_free(server_host);
gpr_free(server_port);
- if (!use_proxy) {
- grpc_uri_destroy(uri);
- gpr_free(*name_to_resolve);
- *name_to_resolve = NULL;
- return false;
- }
+ if (!use_proxy) goto no_use_proxy;
}
}
grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create(
- GRPC_ARG_HTTP_CONNECT_SERVER,
+ (char*)GRPC_ARG_HTTP_CONNECT_SERVER,
uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (user_cred != NULL) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */
@@ -166,16 +156,22 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
char* header;
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
gpr_free(encoded_user_cred);
- args_to_add[1] =
- grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header);
+ args_to_add[1] = grpc_channel_arg_string_create(
+ (char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
gpr_free(header);
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
- gpr_free(user_cred);
grpc_uri_destroy(uri);
+ gpr_free(user_cred);
return true;
+no_use_proxy:
+ if (uri != NULL) grpc_uri_destroy(uri);
+ gpr_free(*name_to_resolve);
+ *name_to_resolve = NULL;
+ gpr_free(user_cred);
+ return false;
}
static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
diff --git a/src/core/ext/filters/client_channel/http_proxy.h b/src/core/ext/filters/client_channel/http_proxy.h
index 34694931d0..bdad03def3 100644
--- a/src/core/ext/filters/client_channel/http_proxy.h
+++ b/src/core/ext/filters/client_channel/http_proxy.h
@@ -19,6 +19,14 @@
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_register_http_proxy_mapper();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy.c b/src/core/ext/filters/client_channel/lb_policy.cc
index dd95a135cf..8e6673d737 100644
--- a/src/core/ext/filters/client_channel/lb_policy.c
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -67,7 +67,7 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_lb_policy *policy = arg;
+ grpc_lb_policy *policy = (grpc_lb_policy *)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 645d51e138..010299c2f4 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/connectivity_state.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** A load balancing policy: specified by a vtable and a struct (which
is expected to be extended to contain some parameters) */
typedef struct grpc_lb_policy grpc_lb_policy;
@@ -204,4 +208,8 @@ void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
const grpc_lb_policy_args *lb_policy_args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 568bb2ba8d..7ad322902b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -49,7 +49,7 @@ typedef struct {
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -59,7 +59,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- call_data *calld = arg;
+ call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -70,12 +70,13 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
- args->context[GRPC_GRPCLB_CLIENT_STATS].value);
+ (grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
+ .value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;
@@ -84,7 +85,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@@ -98,7 +99,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
@@ -132,6 +133,5 @@ const grpc_channel_filter grpc_client_load_reporting_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client_load_reporting"};
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
index 51e30b20b8..c6a0d69c3f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
@@ -21,7 +21,15 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_client_load_reporting_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 6d01667f36..a5f8c59252 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -80,6 +80,7 @@
headers. Therefore, sockaddr.h must always be included first */
#include "src/core/lib/iomgr/sockaddr.h"
+#include <inttypes.h>
#include <limits.h>
#include <string.h>
@@ -101,6 +102,8 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
+#include "src/core/ext/filters/client_channel/subchannel_index.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -110,7 +113,6 @@
#include "src/core/lib/slice/slice_hash_table.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
@@ -122,6 +124,7 @@
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
+#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
@@ -137,7 +140,7 @@ static grpc_error *initial_metadata_add_lb_token(
}
static void destroy_client_stats(void *arg) {
- grpc_grpclb_client_stats_unref(arg);
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
}
typedef struct wrapped_rr_closure_arg {
@@ -181,7 +184,7 @@ typedef struct wrapped_rr_closure_arg {
* order to unref the round robin instance upon its invocation */
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_rr_closure_arg *wc_arg = arg;
+ wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
@@ -245,7 +248,7 @@ static void add_pending_pick(pending_pick **root,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
grpc_closure *on_complete) {
- pending_pick *pp = gpr_zalloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@@ -271,7 +274,7 @@ typedef struct pending_ping {
} pending_ping;
static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
- pending_ping *pping = gpr_zalloc(sizeof(*pping));
+ pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@@ -285,7 +288,7 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
-static const grpc_lb_policy_vtable glb_lb_policy_vtable;
+
typedef struct glb_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
@@ -298,6 +301,10 @@ typedef struct glb_lb_policy {
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
+ /** timeout in milliseconds for before using fallback backend addresses.
+ * 0 means not using fallback. */
+ int lb_fallback_timeout_ms;
+
/** for communicating with the LB server */
grpc_channel *lb_channel;
@@ -324,6 +331,9 @@ typedef struct glb_lb_policy {
* Otherwise, we delegate to the RR policy. */
size_t serverlist_index;
+ /** stores the backend addresses from the resolver */
+ grpc_lb_addresses *fallback_backend_addresses;
+
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
@@ -335,27 +345,21 @@ typedef struct glb_lb_policy {
/** are we currently updating lb_call? */
bool updating_lb_call;
- /** are we currently updating lb_channel? */
- bool updating_lb_channel;
-
/** are we already watching the LB channel's connectivity? */
bool watching_lb_channel;
/** is \a lb_call_retry_timer active? */
bool retry_timer_active;
+ /** is \a lb_fallback_timer active? */
+ bool fallback_timer_active;
+
/** called upon changes to the LB channel's connectivity. */
grpc_closure lb_channel_on_connectivity_changed;
- /** args from the latest update received while already updating, or NULL */
- grpc_lb_policy_args *pending_update_args;
-
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
- /* Finished sending initial request. */
- grpc_closure lb_on_sent_initial_request;
-
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
@@ -366,6 +370,9 @@ typedef struct glb_lb_policy {
/* LB call retry timer callback. */
grpc_closure lb_on_call_retry;
+ /* LB fallback timer callback. */
+ grpc_closure lb_on_fallback;
+
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
@@ -384,19 +391,21 @@ typedef struct glb_lb_policy {
grpc_slice lb_call_status_details;
/** LB call retry backoff state */
- gpr_backoff lb_call_backoff_state;
+ grpc_backoff lb_call_backoff_state;
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
- bool initial_request_sent;
+ /** LB fallback timer */
+ grpc_timer lb_fallback_timer;
+
bool seen_initial_response;
/* Stats for client-side load reporting. Should be unreffed and
* recreated whenever lb_call is replaced. */
grpc_grpclb_client_stats *client_stats;
/* Interval and timer for next client load report. */
- gpr_timespec client_stats_report_interval;
+ grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
bool client_load_report_timer_pending;
bool last_client_load_report_counters_were_zero;
@@ -442,11 +451,11 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
static void *lb_token_copy(void *token) {
return token == NULL
? NULL
- : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload;
+ : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
if (token != NULL) {
- GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token});
+ GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
}
}
static int lb_token_cmp(void *token1, void *token2) {
@@ -535,6 +544,32 @@ static grpc_lb_addresses *process_serverlist_locked(
return lb_addresses;
}
+/* Returns the backend addresses extracted from the given addresses */
+static grpc_lb_addresses *extract_backend_addresses_locked(
+ grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
+ /* first pass: count the number of backend addresses */
+ size_t num_backends = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (!addresses->addresses[i].is_balancer) {
+ ++num_backends;
+ }
+ }
+ /* second pass: actually populate the addresses and (empty) LB tokens */
+ grpc_lb_addresses *backend_addresses =
+ grpc_lb_addresses_create(num_backends, &lb_token_vtable);
+ size_t num_copied = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) continue;
+ const grpc_resolved_address *addr = &addresses->addresses[i].address;
+ grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
+ addr->len, false /* is_balancer */,
+ NULL /* balancer_name */,
+ (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
+ ++num_copied;
+ }
+ return backend_addresses;
+}
+
static void update_lb_connectivity_status_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
@@ -576,7 +611,6 @@ static void update_lb_connectivity_status_locked(
case GRPC_CHANNEL_SHUTDOWN:
GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
break;
- case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY:
@@ -602,35 +636,38 @@ static bool pick_from_internal_rr_locked(
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
const grpc_lb_policy_pick_args *pick_args, bool force_async,
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
- // Look at the index into the serverlist to see if we should drop this call.
- grpc_grpclb_server *server =
- glb_policy->serverlist->servers[glb_policy->serverlist_index++];
- if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
- glb_policy->serverlist_index = 0; // Wrap-around.
- }
- if (server->drop) {
- // Not using the RR policy, so unref it.
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
- (intptr_t)wc_arg->rr_policy);
+ // Check for drops if we are not using fallback backend addresses.
+ if (glb_policy->serverlist != NULL) {
+ // Look at the index into the serverlist to see if we should drop this call.
+ grpc_grpclb_server *server =
+ glb_policy->serverlist->servers[glb_policy->serverlist_index++];
+ if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
+ glb_policy->serverlist_index = 0; // Wrap-around.
}
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
- // Update client load reporting stats to indicate the number of
- // dropped calls. Note that we have to do this here instead of in
- // the client_load_reporting filter, because we do not create a
- // subchannel call (and therefore no client_load_reporting filter)
- // for dropped calls.
- grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token,
- wc_arg->client_stats);
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
- if (force_async) {
- GPR_ASSERT(wc_arg->wrapped_closure != NULL);
- GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ if (server->drop) {
+ // Not using the RR policy, so unref it.
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
+ (intptr_t)wc_arg->rr_policy);
+ }
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+ // Update client load reporting stats to indicate the number of
+ // dropped calls. Note that we have to do this here instead of in
+ // the client_load_reporting filter, because we do not create a
+ // subchannel call (and therefore no client_load_reporting filter)
+ // for dropped calls.
+ grpc_grpclb_client_stats_add_call_dropped_locked(
+ server->load_balance_token, wc_arg->client_stats);
+ grpc_grpclb_client_stats_unref(wc_arg->client_stats);
+ if (force_async) {
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
+ GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+ gpr_free(wc_arg->free_when_done);
+ return false;
+ }
gpr_free(wc_arg->free_when_done);
- return false;
+ return true;
}
- gpr_free(wc_arg->free_when_done);
- return true;
}
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
@@ -668,10 +705,20 @@ static bool pick_from_internal_rr_locked(
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- grpc_lb_addresses *addresses =
- process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+ grpc_lb_addresses *addresses;
+ if (glb_policy->serverlist != NULL) {
+ GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
+ addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+ } else {
+ // If rr_handover_locked() is invoked when we haven't received any
+ // serverlist from the balancer, we use the fallback backends returned by
+ // the resolver. Note that the fallback backend list may be empty, in which
+ // case the new round_robin policy will keep the requested picks pending.
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
+ }
GPR_ASSERT(addresses != NULL);
- grpc_lb_policy_args *args = gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
@@ -727,7 +774,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* Allocate the data for the tracking of the new RR policy's connectivity.
* It'll be deallocated in glb_rr_connectivity_changed() */
rr_connectivity_data *rr_connectivity =
- gpr_zalloc(sizeof(rr_connectivity_data));
+ (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -775,8 +822,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- GPR_ASSERT(glb_policy->serverlist != NULL &&
- glb_policy->serverlist->num_servers > 0);
if (glb_policy->shutting_down) return;
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
@@ -798,7 +843,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- rr_connectivity_data *rr_connectivity = arg;
+ rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -841,8 +886,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
}
static int balancer_name_cmp_fn(void *a, void *b) {
- const char *a_str = a;
- const char *b_str = b;
+ const char *a_str = (const char *)a;
+ const char *b_str = (const char *)b;
return strcmp(a_str, b_str);
}
@@ -869,7 +914,8 @@ static grpc_channel_args *build_lb_channel_args(
grpc_lb_addresses *lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, NULL);
grpc_slice_hash_table_entry *targets_info_entries =
- gpr_zalloc(sizeof(*targets_info_entries) * num_grpclb_addrs);
+ (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
+ num_grpclb_addrs);
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -911,92 +957,6 @@ static grpc_channel_args *build_lb_channel_args(
return result;
}
-static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error);
-static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
- /* Count the number of gRPC-LB addresses. There must be at least one.
- * TODO(roth): For now, we ignore non-balancer addresses, but in the
- * future, we may change the behavior such that we fall back to using
- * the non-balancer addresses if we cannot reach any balancers. In the
- * fallback case, we should use the LB policy indicated by
- * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
- * unset, we should default to pick_first). */
- const grpc_arg *arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- return NULL;
- }
- grpc_lb_addresses *addresses = arg->value.pointer.p;
- size_t num_grpclb_addrs = 0;
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
- if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
- }
- if (num_grpclb_addrs == 0) return NULL;
-
- glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
-
- /* Get server name. */
- arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
- GPR_ASSERT(arg != NULL);
- GPR_ASSERT(arg->type == GRPC_ARG_STRING);
- grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
- GPR_ASSERT(uri->path[0] != '\0');
- glb_policy->server_name =
- gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
- glb_policy->server_name);
- }
- grpc_uri_destroy(uri);
-
- glb_policy->cc_factory = args->client_channel_factory;
- GPR_ASSERT(glb_policy->cc_factory != NULL);
-
- arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
- glb_policy->lb_call_timeout_ms =
- grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
-
- // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
- // since we use this to trigger the client_load_reporting filter.
- grpc_arg new_arg =
- grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
- static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
- glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
- args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
-
- /* Create a client channel over them to communicate with a LB service */
- glb_policy->response_generator =
- grpc_fake_resolver_response_generator_create();
- grpc_channel_args *lb_channel_args = build_lb_channel_args(
- exec_ctx, addresses, glb_policy->response_generator, args->args);
- char *uri_str;
- gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
- glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
- exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
-
- /* Propagate initial resolution */
- grpc_fake_resolver_response_generator_set_response(
- exec_ctx, glb_policy->response_generator, lb_channel_args);
- grpc_channel_args_destroy(exec_ctx, lb_channel_args);
- gpr_free(uri_str);
- if (glb_policy->lb_channel == NULL) {
- gpr_free((void *)glb_policy->server_name);
- grpc_channel_args_destroy(exec_ctx, glb_policy->args);
- gpr_free(glb_policy);
- return NULL;
- }
- GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
- glb_lb_channel_on_connectivity_changed_cb, glb_policy,
- grpc_combiner_scheduler(args->combiner));
- grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
- grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
- "grpclb");
- return &glb_policy->base;
-}
-
static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
GPR_ASSERT(glb_policy->pending_picks == NULL);
@@ -1010,11 +970,11 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
if (glb_policy->serverlist != NULL) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
- grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
- if (glb_policy->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
- gpr_free(glb_policy->pending_update_args);
+ if (glb_policy->fallback_backend_addresses != NULL) {
+ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
}
+ grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
+ grpc_subchannel_index_unref();
gpr_free(glb_policy);
}
@@ -1039,6 +999,10 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
glb_policy->retry_timer_active = false;
}
+ if (glb_policy->fallback_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+ glb_policy->fallback_timer_active = false;
+ }
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
@@ -1062,15 +1026,19 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pp);
pp = next;
}
while (pping != NULL) {
pending_ping *next = pping->next;
- GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ gpr_free(pping);
pping = next;
}
}
@@ -1150,12 +1118,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
+static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
+ /* start a timer to fall back */
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
+ glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
+ grpc_millis deadline =
+ grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
+ glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ glb_policy->fallback_timer_active = true;
+ grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
+ &glb_policy->lb_on_fallback);
+ }
+
glb_policy->started_picking = true;
- gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
}
@@ -1209,8 +1193,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg *wc_arg =
- gpr_zalloc(sizeof(wrapped_rr_closure_arg));
+ wrapped_rr_closure_arg *wc_arg =
+ (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
@@ -1237,10 +1221,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
on_complete);
- pick_done = false;
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
+ pick_done = false;
}
return pick_done;
}
@@ -1275,25 +1259,74 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &glb_policy->state_tracker, current, notify);
}
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+ glb_policy->retry_timer_active = false;
+ if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->lb_call == NULL);
+ query_for_backends_locked(exec_ctx, glb_policy);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+}
+
+static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy) {
+ if (glb_policy->started_picking && glb_policy->updating_lb_call) {
+ if (glb_policy->retry_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+ }
+ if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
+ glb_policy->updating_lb_call = false;
+ } else if (!glb_policy->shutting_down) {
+ /* if we aren't shutting down, restart the LB client call after some time */
+ grpc_millis next_try =
+ grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
+ (void *)glb_policy);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
+ timeout);
+ } else {
+ gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
+ }
+ }
+ GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
+ lb_call_on_retry_timer_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ glb_policy->retry_timer_active = true;
+ grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+ &glb_policy->lb_on_call_retry);
+ }
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_server_status_received_locked");
+}
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
- const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- const gpr_timespec next_client_load_report_time =
- gpr_time_add(now, glb_policy->client_stats_report_interval);
+ const grpc_millis next_client_load_report_time =
+ grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
send_client_load_report_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
next_client_load_report_time,
- &glb_policy->client_load_report_closure, now);
+ &glb_policy->client_load_report_closure);
}
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
@@ -1305,24 +1338,10 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
-static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = glb_policy->client_load_report_payload;
- GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
- client_load_report_done_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, &op, 1,
- &glb_policy->client_load_report_closure);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
grpc_grpclb_dropped_call_counts *drop_entries =
- request->client_stats.calls_finished_with_drop.arg;
+ (grpc_grpclb_dropped_call_counts *)
+ request->client_stats.calls_finished_with_drop.arg;
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
request->client_stats.num_calls_finished_with_client_failed_to_send ==
@@ -1333,11 +1352,14 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"client_load_report");
+ if (glb_policy->lb_call == NULL) {
+ maybe_restart_lb_call(exec_ctx, glb_policy);
+ }
return;
}
// Construct message payload.
@@ -1361,17 +1383,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- // If we've already sent the initial request, then we can go ahead and
- // sent the load report. Otherwise, we need to wait until the initial
- // request has been sent to send this
- // (see lb_on_sent_initial_request_locked() below).
- if (glb_policy->initial_request_sent) {
- do_send_client_load_report_locked(exec_ctx, glb_policy);
+ // Send load report message.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_SEND_MESSAGE;
+ op.data.send_message.send_message = glb_policy->client_load_report_payload;
+ GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
+ client_load_report_done_locked, glb_policy,
+ grpc_combiner_scheduler(glb_policy->base.combiner));
+ grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ exec_ctx, glb_policy->lb_call, &op, 1,
+ &glb_policy->client_load_report_closure);
+ if (call_error != GRPC_CALL_OK) {
+ gpr_log(GPR_ERROR, "call_error=%d", call_error);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
}
}
-static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error);
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -1387,12 +1415,10 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
- gpr_timespec deadline =
+ grpc_millis deadline =
glb_policy->lb_call_timeout_ms == 0
- ? gpr_inf_future(GPR_CLOCK_MONOTONIC)
- : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
- GPR_TIMESPAN));
+ ? GRPC_MILLIS_INF_FUTURE
+ : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
@@ -1416,9 +1442,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
grpc_grpclb_request_destroy(request);
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
- lb_on_sent_initial_request_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -1426,14 +1449,13 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
lb_on_response_received_locked, glb_policy,
grpc_combiner_scheduler(glb_policy->base.combiner));
- gpr_backoff_init(&glb_policy->lb_call_backoff_state,
- GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_GRPCLB_RECONNECT_JITTER,
- GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&glb_policy->lb_call_backoff_state,
+ GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_GRPCLB_RECONNECT_JITTER,
+ GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
- glb_policy->initial_request_sent = false;
glb_policy->seen_initial_response = false;
glb_policy->last_client_load_report_counters_were_zero = false;
}
@@ -1450,7 +1472,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
- if (!glb_policy->client_load_report_timer_pending) {
+ if (glb_policy->client_load_report_timer_pending) {
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
}
}
@@ -1474,7 +1496,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_error call_error;
- grpc_op ops[4];
+ grpc_op ops[3];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@@ -1495,13 +1517,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
op->flags = 0;
op->reserved = NULL;
op++;
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
- * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
- "lb_on_sent_initial_request_locked");
- call_error = grpc_call_start_batch_and_execute(
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
- &glb_policy->lb_on_sent_initial_request);
+ call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
+ ops, (size_t)(op - ops), NULL);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
@@ -1538,27 +1555,14 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
- glb_policy->initial_request_sent = true;
- // If we attempted to send a client load report before the initial
- // request was sent, send the load report now.
- if (glb_policy->client_load_report_payload != NULL) {
- do_send_client_load_report_locked(exec_ctx, glb_policy);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
- "lb_on_sent_initial_request_locked");
-}
-
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
if (glb_policy->lb_response_payload != NULL) {
- gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
+ grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
@@ -1572,16 +1576,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
if (response->has_client_stats_report_interval) {
- glb_policy->client_stats_report_interval =
- gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
- grpc_grpclb_duration_to_timespec(
- &response->client_stats_report_interval));
+ glb_policy->client_stats_report_interval = GPR_MAX(
+ GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
+ &response->client_stats_report_interval));
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"received initial LB response message; "
- "client load reporting interval = %" PRId64 ".%09d sec",
- glb_policy->client_stats_report_interval.tv_sec,
- glb_policy->client_stats_report_interval.tv_nsec);
+ "client load reporting interval = %" PRIdPTR " milliseconds",
+ glb_policy->client_stats_report_interval);
}
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
* strong ref count goes to zero) to be unref'd in
@@ -1626,6 +1628,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
+ } else {
+ /* or dispose of the fallback */
+ grpc_lb_addresses_destroy(exec_ctx,
+ glb_policy->fallback_backend_addresses);
+ glb_policy->fallback_backend_addresses = NULL;
+ if (glb_policy->fallback_timer_active) {
+ grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+ glb_policy->fallback_timer_active = false;
+ }
}
/* and update the copy in the glb_lb_policy instance. This
* serverlist instance will be destroyed either upon the next
@@ -1636,9 +1647,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
} else {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO,
- "Received empty server list. Picks will stay pending until "
- "a response with > 0 servers is received");
+ gpr_log(GPR_INFO, "Received empty server list, ignoring.");
}
grpc_grpclb_destroy_serverlist(serverlist);
}
@@ -1661,6 +1670,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
+ } else {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_response_received_locked_shutdown");
}
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received_locked" weak ref taken in
@@ -1670,24 +1682,30 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
- glb_policy->retry_timer_active = false;
- if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
- (void *)glb_policy);
+static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+ glb_policy->fallback_timer_active = false;
+ /* If we receive a serverlist after the timer fires but before this callback
+ * actually runs, don't fall back. */
+ if (glb_policy->serverlist == NULL) {
+ if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO,
+ "Falling back to use backends from resolver (grpclb %p)",
+ (void *)glb_policy);
+ }
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ rr_handover_locked(exec_ctx, glb_policy);
}
- GPR_ASSERT(glb_policy->lb_call == NULL);
- query_for_backends_locked(exec_ctx, glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "grpclb_fallback_timer");
}
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char *status_details =
@@ -1701,66 +1719,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
/* We need to perform cleanups no matter what. */
lb_call_destroy_locked(exec_ctx, glb_policy);
- if (glb_policy->started_picking && glb_policy->updating_lb_call) {
- if (glb_policy->retry_timer_active) {
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
- }
- if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
- glb_policy->updating_lb_call = false;
- } else if (!glb_policy->shutting_down) {
- /* if we aren't shutting down, restart the LB client call after some time */
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try =
- gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
- (void *)glb_policy);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG,
- "... retry_timer_active in %" PRId64 ".%09d seconds.",
- timeout.tv_sec, timeout.tv_nsec);
- } else {
- gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
- }
- }
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
- GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
- lb_call_on_retry_timer_locked, glb_policy,
- grpc_combiner_scheduler(glb_policy->base.combiner));
- glb_policy->retry_timer_active = true;
- grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
- &glb_policy->lb_on_call_retry, now);
+ // If the load report timer is still pending, we wait for it to be
+ // called before restarting the call. Otherwise, we restart the call
+ // here.
+ if (!glb_policy->client_load_report_timer_pending) {
+ maybe_restart_lb_call(exec_ctx, glb_policy);
+ }
+}
+
+static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
+ glb_lb_policy *glb_policy,
+ const grpc_lb_addresses *addresses) {
+ GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
+ grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+ glb_policy->fallback_backend_addresses =
+ extract_backend_addresses_locked(exec_ctx, addresses);
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
+ !glb_policy->fallback_timer_active) {
+ rr_handover_locked(exec_ctx, glb_policy);
}
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
- "lb_on_server_status_received_locked");
}
static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_lb_policy_args *args) {
glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
- if (glb_policy->updating_lb_channel) {
- if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO,
- "Update already in progress for grpclb %p. Deferring update.",
- (void *)glb_policy);
- }
- if (glb_policy->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx,
- glb_policy->pending_update_args->args);
- gpr_free(glb_policy->pending_update_args);
- }
- glb_policy->pending_update_args =
- gpr_zalloc(sizeof(*glb_policy->pending_update_args));
- glb_policy->pending_update_args->client_channel_factory =
- args->client_channel_factory;
- glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
- glb_policy->pending_update_args->combiner = args->combiner;
- return;
- }
-
- glb_policy->updating_lb_channel = true;
- // Propagate update to lb_channel (pick first).
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
@@ -1778,18 +1760,26 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
"ignoring.",
(void *)glb_policy);
}
+ return;
+ }
+ const grpc_lb_addresses *addresses =
+ (const grpc_lb_addresses *)arg->value.pointer.p;
+ // If a non-empty serverlist hasn't been received from the balancer,
+ // propagate the update to fallback_backend_addresses.
+ if (glb_policy->serverlist == NULL) {
+ fallback_update_locked(exec_ctx, glb_policy, addresses);
}
- const grpc_lb_addresses *addresses = arg->value.pointer.p;
GPR_ASSERT(glb_policy->lb_channel != NULL);
+ // Propagate updates to the LB channel (pick_first) through the fake
+ // resolver.
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
- /* Propagate updates to the LB channel through the fake resolver */
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
-
+ // Start watching the LB channel connectivity for connection, if not
+ // already doing so.
if (!glb_policy->watching_lb_channel) {
- // Watch the LB channel connectivity for connection.
glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
glb_policy->lb_channel, true /* try to connect */);
grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
@@ -1812,13 +1802,12 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
- glb_lb_policy *glb_policy = arg;
+ glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received.
switch (glb_policy->lb_channel_connectivity) {
- case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
/* resub. */
@@ -1835,24 +1824,15 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
break;
}
case GRPC_CHANNEL_IDLE:
- // lb channel inactive (probably shutdown prior to update). Restart lb
- // call to kick the lb channel into gear.
- GPR_ASSERT(glb_policy->lb_call == NULL);
+ // lb channel inactive (probably shutdown prior to update). Restart lb
+ // call to kick the lb channel into gear.
/* fallthrough */
case GRPC_CHANNEL_READY:
if (glb_policy->lb_call != NULL) {
- glb_policy->updating_lb_channel = false;
glb_policy->updating_lb_call = true;
grpc_call_cancel(glb_policy->lb_call, NULL);
- // lb_on_server_status_received will pick up the cancel and reinit
+ // lb_on_server_status_received() will pick up the cancel and reinit
// lb_call.
- if (glb_policy->pending_update_args != NULL) {
- grpc_lb_policy_args *args = glb_policy->pending_update_args;
- glb_policy->pending_update_args = NULL;
- glb_update_locked(exec_ctx, &glb_policy->base, args);
- grpc_channel_args_destroy(exec_ctx, args->args);
- gpr_free(args);
- }
} else if (glb_policy->started_picking && !glb_policy->shutting_down) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
@@ -1883,6 +1863,93 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_notify_on_state_change_locked,
glb_update_locked};
+static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ /* Count the number of gRPC-LB addresses. There must be at least one. */
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ return NULL;
+ }
+ grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ size_t num_grpclb_addrs = 0;
+ for (size_t i = 0; i < addresses->num_addresses; ++i) {
+ if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
+ }
+ if (num_grpclb_addrs == 0) return NULL;
+
+ glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
+
+ /* Get server name. */
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(arg != NULL);
+ GPR_ASSERT(arg->type == GRPC_ARG_STRING);
+ grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ glb_policy->server_name =
+ gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
+ if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
+ gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
+ glb_policy->server_name);
+ }
+ grpc_uri_destroy(uri);
+
+ glb_policy->cc_factory = args->client_channel_factory;
+ GPR_ASSERT(glb_policy->cc_factory != NULL);
+
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
+ glb_policy->lb_call_timeout_ms =
+ grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
+
+ arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
+ glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
+ arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
+
+ // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
+ // since we use this to trigger the client_load_reporting filter.
+ grpc_arg new_arg = grpc_channel_arg_string_create(
+ (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
+ static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
+ glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
+
+ /* Extract the backend addresses (may be empty) from the resolver for
+ * fallback. */
+ glb_policy->fallback_backend_addresses =
+ extract_backend_addresses_locked(exec_ctx, addresses);
+
+ /* Create a client channel over them to communicate with a LB service */
+ glb_policy->response_generator =
+ grpc_fake_resolver_response_generator_create();
+ grpc_channel_args *lb_channel_args = build_lb_channel_args(
+ exec_ctx, addresses, glb_policy->response_generator, args->args);
+ char *uri_str;
+ gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
+ glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
+ exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
+
+ /* Propagate initial resolution */
+ grpc_fake_resolver_response_generator_set_response(
+ exec_ctx, glb_policy->response_generator, lb_channel_args);
+ grpc_channel_args_destroy(exec_ctx, lb_channel_args);
+ gpr_free(uri_str);
+ if (glb_policy->lb_channel == NULL) {
+ gpr_free((void *)glb_policy->server_name);
+ grpc_channel_args_destroy(exec_ctx, glb_policy->args);
+ gpr_free(glb_policy);
+ return NULL;
+ }
+ grpc_subchannel_index_ref();
+ GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
+ glb_lb_channel_on_connectivity_changed_cb, glb_policy,
+ grpc_combiner_scheduler(args->combiner));
+ grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
+ grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
+ "grpclb");
+ return &glb_policy->base;
+}
+
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
@@ -1913,7 +1980,7 @@ static bool maybe_add_client_load_reporting_filter(
return true;
}
-void grpc_lb_policy_grpclb_init() {
+extern "C" void grpc_lb_policy_grpclb_init() {
grpc_register_lb_policy(grpc_glb_lb_factory_create());
grpc_register_tracer(&grpc_lb_glb_trace);
#ifndef NDEBUG
@@ -1925,4 +1992,4 @@ void grpc_lb_policy_grpclb_init() {
(void *)&grpc_client_load_reporting_filter);
}
-void grpc_lb_policy_grpclb_shutdown() {}
+extern "C" void grpc_lb_policy_grpclb_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
index 63ad66c5e9..15c8a680b7 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@@ -21,9 +21,17 @@
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
* subchannel to pick. */
grpc_lb_policy_factory *grpc_glb_lb_factory_create();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index f2967182e2..f2967182e2 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index 6120bf53f7..e8599d1f51 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -23,6 +23,10 @@
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/lib/slice/slice_hash_table.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Create the channel used for communicating with an LB service.
* Note that an LB *service* may be comprised of several LB *servers*.
*
@@ -40,5 +44,9 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
grpc_fake_resolver_response_generator *response_generator,
const grpc_channel_args *args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 2681b2a079..2681b2a079 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
index 5b62623145..903120ca7d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
@@ -42,7 +42,8 @@ struct grpc_grpclb_client_stats {
};
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
- grpc_grpclb_client_stats* client_stats = gpr_zalloc(sizeof(*client_stats));
+ grpc_grpclb_client_stats* client_stats =
+ (grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
@@ -88,7 +89,8 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
// Record the drop.
if (client_stats->drop_token_counts == NULL) {
client_stats->drop_token_counts =
- gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts));
+ (grpc_grpclb_dropped_call_counts*)gpr_zalloc(
+ sizeof(grpc_grpclb_dropped_call_counts));
}
grpc_grpclb_dropped_call_counts* drop_token_counts =
client_stats->drop_token_counts;
@@ -103,9 +105,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
while (new_num_entries < drop_token_counts->num_entries + 1) {
new_num_entries *= 2;
}
- drop_token_counts->token_counts =
- gpr_realloc(drop_token_counts->token_counts,
- new_num_entries * sizeof(grpc_grpclb_drop_token_count));
+ drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc(
+ drop_token_counts->token_counts,
+ new_num_entries * sizeof(grpc_grpclb_drop_token_count));
grpc_grpclb_drop_token_count* new_entry =
&drop_token_counts->token_counts[drop_token_counts->num_entries++];
new_entry->token = gpr_strdup(token);
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
index c51e2a431a..b38c076f38 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
@@ -23,6 +23,10 @@
#include <grpc/impl/codegen/grpc_types.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_grpclb_client_stats grpc_grpclb_client_stats;
typedef struct {
@@ -61,5 +65,9 @@ void grpc_grpclb_client_stats_get_locked(
void grpc_grpclb_dropped_call_counts_destroy(
grpc_grpclb_dropped_call_counts* drop_entries);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
*/
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 6fa29f326e..4d5fb2081c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -25,7 +25,7 @@
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
- grpc_grpclb_serverlist *sl = *arg;
+ grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -46,9 +46,10 @@ typedef struct decode_serverlist_arg {
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
- decode_serverlist_arg *dec_arg = *arg;
+ decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
- grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
+ grpc_grpclb_server *server =
+ (grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -59,7 +60,8 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
}
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
- grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request *req =
+ (grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -78,14 +80,15 @@ static void populate_timestamp(gpr_timespec timestamp,
static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
- char *str = *arg;
+ char *str = (char *)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
return pb_encode_string(stream, (uint8_t *)str, strlen(str));
}
static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
- grpc_grpclb_dropped_call_counts *drop_entries = *arg;
+ grpc_grpclb_dropped_call_counts *drop_entries =
+ (grpc_grpclb_dropped_call_counts *)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -104,7 +107,8 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats *client_stats) {
- grpc_grpclb_request *req = gpr_zalloc(sizeof(grpc_grpclb_request));
+ grpc_grpclb_request *req =
+ (grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -144,7 +148,8 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
if (request->has_client_stats) {
grpc_grpclb_dropped_call_counts *drop_entries =
- request->client_stats.calls_finished_with_drop.arg;
+ (grpc_grpclb_dropped_call_counts *)
+ request->client_stats.calls_finished_with_drop.arg;
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
gpr_free(request);
@@ -166,7 +171,8 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return NULL;
grpc_grpclb_initial_response *initial_res =
- gpr_malloc(sizeof(grpc_grpclb_initial_response));
+ (grpc_grpclb_initial_response *)gpr_malloc(
+ sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@@ -179,7 +185,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *sl =
+ (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -193,7 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = gpr_zalloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ sl->servers = (grpc_grpclb_server **)gpr_zalloc(
+ sizeof(grpc_grpclb_server *) * sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -226,13 +234,16 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl) {
- grpc_grpclb_serverlist *copy = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist *copy =
+ (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
- copy->servers = gpr_malloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
+ copy->servers = (grpc_grpclb_server **)gpr_malloc(
+ sizeof(grpc_grpclb_server *) * sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
- copy->servers[i] = gpr_malloc(sizeof(grpc_grpclb_server));
+ copy->servers[i] =
+ (grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
@@ -288,13 +299,10 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
-gpr_timespec grpc_grpclb_duration_to_timespec(
- grpc_grpclb_duration *duration_pb) {
- gpr_timespec duration;
- duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0;
- duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0;
- duration.clock_type = GPR_TIMESPAN;
- return duration;
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
+ return (grpc_millis)(
+ (duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
+ (duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index c4a98492c9..56b9c096d0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -81,8 +81,7 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
const grpc_grpclb_duration *rhs);
-gpr_timespec grpc_grpclb_duration_to_timespec(
- grpc_grpclb_duration *duration_pb);
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
deleted file mode 100644
index a50ba09bf5..0000000000
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
+++ /dev/null
@@ -1,710 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-
-#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
-#include "src/core/ext/filters/client_channel/subchannel.h"
-#include "src/core/ext/filters/client_channel/subchannel_index.h"
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/iomgr/combiner.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "src/core/lib/transport/connectivity_state.h"
-
-grpc_tracer_flag grpc_lb_pick_first_trace =
- GRPC_TRACER_INITIALIZER(false, "pick_first");
-
-typedef struct pending_pick {
- struct pending_pick *next;
- uint32_t initial_metadata_flags;
- grpc_connected_subchannel **target;
- grpc_closure *on_complete;
-} pending_pick;
-
-typedef struct {
- /** base policy: must be first */
- grpc_lb_policy base;
- /** all our subchannels */
- grpc_subchannel **subchannels;
- grpc_subchannel **new_subchannels;
- size_t num_subchannels;
- size_t num_new_subchannels;
-
- grpc_closure connectivity_changed;
-
- /** remaining members are protected by the combiner */
-
- /** the selected channel */
- grpc_connected_subchannel *selected;
-
- /** the subchannel key for \a selected, or NULL if \a selected not set */
- const grpc_subchannel_key *selected_key;
-
- /** have we started picking? */
- bool started_picking;
- /** are we shut down? */
- bool shutdown;
- /** are we updating the selected subchannel? */
- bool updating_selected;
- /** are we updating the subchannel candidates? */
- bool updating_subchannels;
- /** args from the latest update received while already updating, or NULL */
- grpc_lb_policy_args *pending_update_args;
- /** which subchannel are we watching? */
- size_t checking_subchannel;
- /** what is the connectivity of that channel? */
- grpc_connectivity_state checking_connectivity;
- /** list of picks that are waiting on connectivity */
- pending_pick *pending_picks;
-
- /** our connectivity state tracker */
- grpc_connectivity_state_tracker state_tracker;
-} pick_first_lb_policy;
-
-static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- GPR_ASSERT(p->pending_picks == NULL);
- for (size_t i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first_destroy");
- }
- if (p->selected != NULL) {
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
- "picked_first_destroy");
- }
- grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
- if (p->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
- gpr_free(p->pending_update_args);
- }
- gpr_free(p->subchannels);
- gpr_free(p->new_subchannels);
- gpr_free(p);
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
- }
-}
-
-static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
- p->shutdown = true;
- pp = p->pending_picks;
- p->pending_picks = NULL;
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
- /* cancel subscription */
- if (p->selected != NULL) {
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
- } else if (p->num_subchannels > 0 && p->started_picking) {
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
- &p->connectivity_changed);
- }
- while (pp != NULL) {
- pending_pick *next = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- pp = next;
- }
-}
-
-static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
- pp = p->pending_picks;
- p->pending_picks = NULL;
- while (pp != NULL) {
- pending_pick *next = pp->next;
- if (pp->target == target) {
- *target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick Cancelled", &error, 1));
- gpr_free(pp);
- } else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
- }
- pp = next;
- }
- GRPC_ERROR_UNREF(error);
-}
-
-static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- uint32_t initial_metadata_flags_mask,
- uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
- pp = p->pending_picks;
- p->pending_picks = NULL;
- while (pp != NULL) {
- pending_pick *next = pp->next;
- if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
- initial_metadata_flags_eq) {
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick Cancelled", &error, 1));
- gpr_free(pp);
- } else {
- pp->next = p->pending_picks;
- p->pending_picks = pp;
- }
- pp = next;
- }
- GRPC_ERROR_UNREF(error);
-}
-
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- p->started_picking = true;
- if (p->subchannels != NULL) {
- GPR_ASSERT(p->num_subchannels > 0);
- p->checking_subchannel = 0;
- p->checking_connectivity = GRPC_CHANNEL_IDLE;
- GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- }
-}
-
-static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- if (!p->started_picking) {
- start_picking_locked(exec_ctx, p);
- }
-}
-
-static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp;
-
- /* Check atomically for a selected channel */
- if (p->selected != NULL) {
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
- return 1;
- }
-
- /* No subchannel selected yet, so try again */
- if (!p->started_picking) {
- start_picking_locked(exec_ctx, p);
- }
- pp = gpr_malloc(sizeof(*pp));
- pp->next = p->pending_picks;
- pp->target = target;
- pp->initial_metadata_flags = pick_args->initial_metadata_flags;
- pp->on_complete = on_complete;
- p->pending_picks = pp;
- return 0;
-}
-
-static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- size_t num_subchannels = p->num_subchannels;
- grpc_subchannel **subchannels = p->subchannels;
-
- p->num_subchannels = 0;
- p->subchannels = NULL;
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
-
- for (size_t i = 0; i < num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
- }
- gpr_free(subchannels);
-}
-
-static grpc_connectivity_state pf_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- return grpc_connectivity_state_get(&p->state_tracker, error);
-}
-
-static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
- current, notify);
-}
-
-static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- if (p->selected) {
- grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
- } else {
- GRPC_CLOSURE_SCHED(exec_ctx, closure,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
- }
-}
-
-/* unsubscribe all subchannels */
-static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
- if (p->num_subchannels > 0) {
- GPR_ASSERT(p->selected == NULL);
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p",
- (void *)p, (void *)p->subchannels[p->checking_subchannel]);
- }
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
- &p->connectivity_changed);
- p->updating_subchannels = true;
- } else if (p->selected != NULL) {
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG,
- "Pick First %p unsubscribing from selected subchannel %p",
- (void *)p, (void *)p->selected);
- }
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
- p->updating_selected = true;
- }
-}
-
-/* true upon success */
-static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
- const grpc_arg *arg =
- grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
- if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
- if (p->subchannels == NULL) {
- // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
- "pf_update_missing");
- } else {
- // otherwise, keep using the current subchannel list (ignore this update).
- gpr_log(GPR_ERROR,
- "No valid LB addresses channel arg for Pick First %p update, "
- "ignoring.",
- (void *)p);
- }
- return;
- }
- const grpc_lb_addresses *addresses = arg->value.pointer.p;
- if (addresses->num_addresses == 0) {
- // Empty update. Unsubscribe from all current subchannels and put the
- // channel in TRANSIENT_FAILURE.
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
- "pf_update_empty");
- stop_connectivity_watchers(exec_ctx, p);
- return;
- }
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void *)p, (unsigned long)addresses->num_addresses);
- }
- grpc_subchannel_args *sc_args =
- gpr_zalloc(sizeof(*sc_args) * addresses->num_addresses);
- /* We remove the following keys in order for subchannel keys belonging to
- * subchannels point to the same address to match. */
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
- GRPC_ARG_LB_ADDRESSES};
- size_t sc_args_count = 0;
-
- /* Create list of subchannel args for new addresses in \a args. */
- for (size_t i = 0; i < addresses->num_addresses; i++) {
- // If there were any balancer, we would have chosen grpclb policy instead.
- GPR_ASSERT(!addresses->addresses[i].is_balancer);
- if (addresses->addresses[i].user_data != NULL) {
- gpr_log(GPR_ERROR,
- "This LB policy doesn't support user data. It will be ignored");
- }
- grpc_arg addr_arg =
- grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
- 1);
- gpr_free(addr_arg.value.string);
- sc_args[sc_args_count++].args = new_args;
- }
-
- /* Check if p->selected is amongst them. If so, we are done. */
- if (p->selected != NULL) {
- GPR_ASSERT(p->selected_key != NULL);
- for (size_t i = 0; i < sc_args_count; i++) {
- grpc_subchannel_key *ith_sc_key = grpc_subchannel_key_create(&sc_args[i]);
- const bool found_selected =
- grpc_subchannel_key_compare(p->selected_key, ith_sc_key) == 0;
- grpc_subchannel_key_destroy(exec_ctx, ith_sc_key);
- if (found_selected) {
- // The currently selected subchannel is in the update: we are done.
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO,
- "Pick First %p found already selected subchannel %p amongst "
- "updates. Update done.",
- (void *)p, (void *)p->selected);
- }
- for (size_t j = 0; j < sc_args_count; j++) {
- grpc_channel_args_destroy(exec_ctx,
- (grpc_channel_args *)sc_args[j].args);
- }
- gpr_free(sc_args);
- return;
- }
- }
- }
- // We only check for already running updates here because if the previous
- // steps were successful, the update can be considered done without any
- // interference (ie, no callbacks were scheduled).
- if (p->updating_selected || p->updating_subchannels) {
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO,
- "Update already in progress for pick first %p. Deferring update.",
- (void *)p);
- }
- if (p->pending_update_args != NULL) {
- grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
- gpr_free(p->pending_update_args);
- }
- p->pending_update_args = gpr_zalloc(sizeof(*p->pending_update_args));
- p->pending_update_args->client_channel_factory =
- args->client_channel_factory;
- p->pending_update_args->args = grpc_channel_args_copy(args->args);
- p->pending_update_args->combiner = args->combiner;
- return;
- }
- /* Create the subchannels for the new subchannel args/addresses. */
- grpc_subchannel **new_subchannels =
- gpr_zalloc(sizeof(*new_subchannels) * sc_args_count);
- size_t num_new_subchannels = 0;
- for (size_t i = 0; i < sc_args_count; i++) {
- grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
- exec_ctx, args->client_channel_factory, &sc_args[i]);
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- char *address_uri =
- grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_INFO,
- "Pick First %p created subchannel %p for address uri %s",
- (void *)p, (void *)subchannel, address_uri);
- gpr_free(address_uri);
- }
- grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)sc_args[i].args);
- if (subchannel != NULL) new_subchannels[num_new_subchannels++] = subchannel;
- }
- gpr_free(sc_args);
- if (num_new_subchannels == 0) {
- gpr_free(new_subchannels);
- // Empty update. Unsubscribe from all current subchannels and put the
- // channel in TRANSIENT_FAILURE.
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("No valid addresses in update"),
- "pf_update_no_valid_addresses");
- stop_connectivity_watchers(exec_ctx, p);
- return;
- }
-
- /* Destroy the current subchannels. Repurpose pf_shutdown/destroy. */
- stop_connectivity_watchers(exec_ctx, p);
-
- /* Save new subchannels. The switch over will happen in
- * pf_connectivity_changed_locked */
- if (p->updating_selected || p->updating_subchannels) {
- p->num_new_subchannels = num_new_subchannels;
- p->new_subchannels = new_subchannels;
- } else { /* nothing is updating. Get things moving from here */
- p->num_subchannels = num_new_subchannels;
- p->subchannels = new_subchannels;
- p->new_subchannels = NULL;
- p->num_new_subchannels = 0;
- if (p->started_picking) {
- p->checking_subchannel = 0;
- p->checking_connectivity = GRPC_CHANNEL_IDLE;
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- }
- }
-}
-
-static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- pick_first_lb_policy *p = arg;
- grpc_subchannel *selected_subchannel;
- pending_pick *pp;
-
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(
- GPR_DEBUG,
- "Pick First %p connectivity changed. Updating selected: %d; Updating "
- "subchannels: %d; Checking %lu index (%lu total); State: %d; ",
- (void *)p, p->updating_selected, p->updating_subchannels,
- (unsigned long)p->checking_subchannel,
- (unsigned long)p->num_subchannels, p->checking_connectivity);
- }
- bool restart = false;
- if (p->updating_selected && error != GRPC_ERROR_NONE) {
- /* Captured the unsubscription for p->selected */
- GPR_ASSERT(p->selected != NULL);
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected,
- "pf_update_connectivity");
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p",
- (void *)p, (void *)p->selected);
- }
- p->updating_selected = false;
- if (p->num_new_subchannels == 0) {
- p->selected = NULL;
- return;
- }
- restart = true;
- }
- if (p->updating_subchannels && error != GRPC_ERROR_NONE) {
- /* Captured the unsubscription for the checking subchannel */
- GPR_ASSERT(p->selected == NULL);
- for (size_t i = 0; i < p->num_subchannels; i++) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i],
- "pf_update_connectivity");
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p,
- (void *)p->subchannels[i]);
- }
- }
- gpr_free(p->subchannels);
- p->subchannels = NULL;
- p->num_subchannels = 0;
- p->updating_subchannels = false;
- if (p->num_new_subchannels == 0) return;
- restart = true;
- }
- if (restart) {
- p->selected = NULL;
- p->selected_key = NULL;
- GPR_ASSERT(p->new_subchannels != NULL);
- GPR_ASSERT(p->num_new_subchannels > 0);
- p->num_subchannels = p->num_new_subchannels;
- p->subchannels = p->new_subchannels;
- p->num_new_subchannels = 0;
- p->new_subchannels = NULL;
- if (p->started_picking) {
- /* If we were picking, continue to do so over the new subchannels,
- * starting from the 0th index. */
- p->checking_subchannel = 0;
- p->checking_connectivity = GRPC_CHANNEL_IDLE;
- /* reuses the weak ref from start_picking_locked */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- }
- if (p->pending_update_args != NULL) {
- const grpc_lb_policy_args *args = p->pending_update_args;
- p->pending_update_args = NULL;
- pf_update_locked(exec_ctx, &p->base, args);
- }
- return;
- }
- GRPC_ERROR_REF(error);
- if (p->shutdown) {
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
- GRPC_ERROR_UNREF(error);
- return;
- } else if (p->selected != NULL) {
- if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
- /* if the selected channel goes bad, we're done */
- p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN;
- }
- grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
- p->checking_connectivity, GRPC_ERROR_REF(error),
- "selected_changed");
- if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) {
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, p->base.interested_parties,
- &p->checking_connectivity, &p->connectivity_changed);
- } else {
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
- }
- } else {
- loop:
- switch (p->checking_connectivity) {
- case GRPC_CHANNEL_INIT:
- GPR_UNREACHABLE_CODE(return );
- case GRPC_CHANNEL_READY:
- grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
- GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
- "connecting_ready");
- selected_subchannel = p->subchannels[p->checking_subchannel];
- p->selected = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected_subchannel),
- "picked_first");
-
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO,
- "Pick First %p selected subchannel %p (connected %p)",
- (void *)p, (void *)selected_subchannel, (void *)p->selected);
- }
- p->selected_key = grpc_subchannel_get_key(selected_subchannel);
- /* drop the pick list: we are connected now */
- GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
- destroy_subchannels_locked(exec_ctx, p);
- /* update any calls that were waiting for a pick */
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked");
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO,
- "Servicing pending pick with selected subchannel %p",
- (void *)p->selected);
- }
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
- grpc_connected_subchannel_notify_on_state_change(
- exec_ctx, p->selected, p->base.interested_parties,
- &p->checking_connectivity, &p->connectivity_changed);
- break;
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
- p->checking_subchannel =
- (p->checking_subchannel + 1) % p->num_subchannels;
- if (p->checking_subchannel == 0) {
- /* only trigger transient failure when we've tried all alternatives
- */
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_REF(error), "connecting_transient_failure");
- }
- GRPC_ERROR_UNREF(error);
- p->checking_connectivity = grpc_subchannel_check_connectivity(
- p->subchannels[p->checking_subchannel], &error);
- if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- } else {
- goto loop;
- }
- break;
- case GRPC_CHANNEL_CONNECTING:
- case GRPC_CHANNEL_IDLE:
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
- GRPC_ERROR_REF(error), "connecting_changed");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, p->subchannels[p->checking_subchannel],
- p->base.interested_parties, &p->checking_connectivity,
- &p->connectivity_changed);
- break;
- case GRPC_CHANNEL_SHUTDOWN:
- p->num_subchannels--;
- GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
- p->subchannels[p->num_subchannels]);
- GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
- "pick_first");
- if (p->num_subchannels == 0) {
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Pick first exhausted channels", &error, 1),
- "no_more_channels");
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
- "pick_first_connectivity");
- } else {
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_REF(error), "subchannel_failed");
- p->checking_subchannel %= p->num_subchannels;
- GRPC_ERROR_UNREF(error);
- p->checking_connectivity = grpc_subchannel_check_connectivity(
- p->subchannels[p->checking_subchannel], &error);
- goto loop;
- }
- }
- }
-
- GRPC_ERROR_UNREF(error);
-}
-
-static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
- pf_destroy,
- pf_shutdown_locked,
- pf_pick_locked,
- pf_cancel_pick_locked,
- pf_cancel_picks_locked,
- pf_ping_one_locked,
- pf_exit_idle_locked,
- pf_check_connectivity_locked,
- pf_notify_on_state_change_locked,
- pf_update_locked};
-
-static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
-
-static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
-
-static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
- GPR_ASSERT(args->client_channel_factory != NULL);
- pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
- if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
- }
- pf_update_locked(exec_ctx, &p->base, args);
- grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
- GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p,
- grpc_combiner_scheduler(args->combiner));
- return &p->base;
-}
-
-static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
- pick_first_factory_ref, pick_first_factory_unref, create_pick_first,
- "pick_first"};
-
-static grpc_lb_policy_factory pick_first_lb_policy_factory = {
- &pick_first_factory_vtable};
-
-static grpc_lb_policy_factory *pick_first_lb_factory_create() {
- return &pick_first_lb_policy_factory;
-}
-
-/* Plugin registration */
-
-void grpc_lb_policy_pick_first_init() {
- grpc_register_lb_policy(pick_first_lb_factory_create());
- grpc_register_tracer(&grpc_lb_pick_first_trace);
-}
-
-void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
new file mode 100644
index 0000000000..f0c66c68e1
--- /dev/null
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -0,0 +1,617 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/ext/filters/client_channel/subchannel.h"
+#include "src/core/ext/filters/client_channel/subchannel_index.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+grpc_tracer_flag grpc_lb_pick_first_trace =
+ GRPC_TRACER_INITIALIZER(false, "pick_first");
+
+typedef struct pending_pick {
+ struct pending_pick *next;
+ uint32_t initial_metadata_flags;
+ grpc_connected_subchannel **target;
+ grpc_closure *on_complete;
+} pending_pick;
+
+typedef struct {
+ /** base policy: must be first */
+ grpc_lb_policy base;
+ /** all our subchannels */
+ grpc_lb_subchannel_list *subchannel_list;
+ /** latest pending subchannel list */
+ grpc_lb_subchannel_list *latest_pending_subchannel_list;
+ /** selected subchannel in \a subchannel_list */
+ grpc_lb_subchannel_data *selected;
+ /** have we started picking? */
+ bool started_picking;
+ /** are we shut down? */
+ bool shutdown;
+ /** list of picks that are waiting on connectivity */
+ pending_pick *pending_picks;
+ /** our connectivity state tracker */
+ grpc_connectivity_state_tracker state_tracker;
+} pick_first_lb_policy;
+
+static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ GPR_ASSERT(p->subchannel_list == NULL);
+ GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
+ GPR_ASSERT(p->pending_picks == NULL);
+ grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ gpr_free(p);
+ grpc_subchannel_index_unref();
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
+ }
+}
+
+static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
+ grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
+ }
+ p->shutdown = true;
+ pending_pick *pp;
+ while ((pp = p->pending_picks) != NULL) {
+ p->pending_picks = pp->next;
+ *pp->target = NULL;
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
+ gpr_free(pp);
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
+ "shutdown");
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "pf_shutdown");
+ p->subchannel_list = NULL;
+ }
+ if (p->latest_pending_subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
+ p->latest_pending_subchannel_list = NULL;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
+}
+
+static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_connected_subchannel **target,
+ grpc_error *error) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if (pp->target == target) {
+ *target = NULL;
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ uint32_t initial_metadata_flags_mask,
+ uint32_t initial_metadata_flags_eq,
+ grpc_error *error) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ pending_pick *pp = p->pending_picks;
+ p->pending_picks = NULL;
+ while (pp != NULL) {
+ pending_pick *next = pp->next;
+ if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
+ initial_metadata_flags_eq) {
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
+ gpr_free(pp);
+ } else {
+ pp->next = p->pending_picks;
+ p->pending_picks = pp;
+ }
+ pp = next;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+static void start_picking_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
+ p->started_picking = true;
+ if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
+ p->subchannel_list->checking_subchannel = 0;
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ p->subchannel_list, "connectivity_watch+start_picking");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &p->subchannel_list->subchannels[0]);
+ }
+}
+
+static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ if (!p->started_picking) {
+ start_picking_locked(exec_ctx, p);
+ }
+}
+
+static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ const grpc_lb_policy_pick_args *pick_args,
+ grpc_connected_subchannel **target,
+ grpc_call_context_element *context, void **user_data,
+ grpc_closure *on_complete) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ // If we have a selected subchannel already, return synchronously.
+ if (p->selected != NULL) {
+ *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
+ "picked");
+ return 1;
+ }
+ // No subchannel selected yet, so handle asynchronously.
+ if (!p->started_picking) {
+ start_picking_locked(exec_ctx, p);
+ }
+ pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
+ pp->next = p->pending_picks;
+ pp->target = target;
+ pp->initial_metadata_flags = pick_args->initial_metadata_flags;
+ pp->on_complete = on_complete;
+ p->pending_picks = pp;
+ return 0;
+}
+
+static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
+ pick_first_lb_policy *p) {
+ for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
+ grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
+ if (p->selected != sd) {
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "selected_different_subchannel");
+ }
+ }
+}
+
+static grpc_connectivity_state pf_check_connectivity_locked(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ return grpc_connectivity_state_get(&p->state_tracker, error);
+}
+
+static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy *pol,
+ grpc_connectivity_state *current,
+ grpc_closure *notify) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
+ current, notify);
+}
+
+static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+ grpc_closure *closure) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ if (p->selected) {
+ grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
+ closure);
+ } else {
+ GRPC_CLOSURE_SCHED(exec_ctx, closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
+ }
+}
+
+static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
+
+static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ const grpc_lb_policy_args *args) {
+ pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
+ const grpc_arg *arg =
+ grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
+ if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ if (p->subchannel_list == NULL) {
+ // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
+ "pf_update_missing");
+ } else {
+ // otherwise, keep using the current subchannel list (ignore this update).
+ gpr_log(GPR_ERROR,
+ "No valid LB addresses channel arg for Pick First %p update, "
+ "ignoring.",
+ (void *)p);
+ }
+ return;
+ }
+ const grpc_lb_addresses *addresses =
+ (const grpc_lb_addresses *)arg->value.pointer.p;
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
+ (void *)p, (unsigned long)addresses->num_addresses);
+ }
+ grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
+ pf_connectivity_changed_locked);
+ if (subchannel_list->num_subchannels == 0) {
+ // Empty update or no valid subchannels. Unsubscribe from all current
+ // subchannels and put the channel in TRANSIENT_FAILURE.
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
+ "pf_update_empty");
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "sl_shutdown_empty_update");
+ }
+ p->subchannel_list = subchannel_list; // Empty list.
+ p->selected = NULL;
+ return;
+ }
+ if (p->selected == NULL) {
+ // We don't yet have a selected subchannel, so replace the current
+ // subchannel list immediately.
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "pf_update_before_selected");
+ }
+ p->subchannel_list = subchannel_list;
+ } else {
+ // We do have a selected subchannel.
+ // Check if it's present in the new list. If so, we're done.
+ for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
+ grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ if (sd->subchannel == p->selected->subchannel) {
+ // The currently selected subchannel is in the update: we are done.
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_INFO,
+ "Pick First %p found already selected subchannel %p "
+ "at update index %" PRIuPTR " of %" PRIuPTR "; update done",
+ p, p->selected->subchannel, i,
+ subchannel_list->num_subchannels);
+ }
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ subchannel_list, "connectivity_watch+replace_selected");
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "pf_update_includes_selected");
+ }
+ p->subchannel_list = subchannel_list;
+ if (p->selected->connected_subchannel != NULL) {
+ sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ p->selected->connected_subchannel, "pf_update_includes_selected");
+ }
+ p->selected = sd;
+ destroy_unselected_subchannels_locked(exec_ctx, p);
+ // If there was a previously pending update (which may or may
+ // not have contained the currently selected subchannel), drop
+ // it, so that it doesn't override what we've done here.
+ if (p->latest_pending_subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list,
+ "pf_update_includes_selected+outdated");
+ p->latest_pending_subchannel_list = NULL;
+ }
+ return;
+ }
+ }
+ // Not keeping the previous selected subchannel, so set the latest
+ // pending subchannel list to the new subchannel list. We will wait
+ // for it to report READY before swapping it into the current
+ // subchannel list.
+ if (p->latest_pending_subchannel_list != NULL) {
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG,
+ "Pick First %p Shutting down latest pending subchannel list "
+ "%p, about to be replaced by newer latest %p",
+ (void *)p, (void *)p->latest_pending_subchannel_list,
+ (void *)subchannel_list);
+ }
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list,
+ "sl_outdated_dont_smash");
+ }
+ p->latest_pending_subchannel_list = subchannel_list;
+ }
+ // If we've started picking, start trying to connect to the first
+ // subchannel in the new list.
+ if (p->started_picking) {
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ subchannel_list, "connectivity_watch+update");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &subchannel_list->subchannels[0]);
+ }
+}
+
+static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
+ pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG,
+ "Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
+ " of %" PRIuPTR
+ "), subchannel_list %p: state=%s p->shutdown=%d "
+ "sd->subchannel_list->shutting_down=%d error=%s",
+ (void *)p, (void *)sd->subchannel,
+ sd->subchannel_list->checking_subchannel,
+ sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
+ grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
+ p->shutdown, sd->subchannel_list->shutting_down,
+ grpc_error_string(error));
+ }
+ // If the policy is shutting down, unref and return.
+ if (p->shutdown) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_shutdown");
+ return;
+ }
+ // If the subchannel list is shutting down, stop watching.
+ if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_sl_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_sl_shutdown");
+ return;
+ }
+ // If we're still here, the notification must be for a subchannel in
+ // either the current or latest pending subchannel lists.
+ GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
+ sd->subchannel_list == p->latest_pending_subchannel_list);
+ // Update state.
+ sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
+ // Handle updates for the currently selected subchannel.
+ if (p->selected == sd) {
+ // If the new state is anything other than READY and there is a
+ // pending update, switch to the pending update.
+ if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
+ p->latest_pending_subchannel_list != NULL) {
+ p->selected = NULL;
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
+ p->subchannel_list = p->latest_pending_subchannel_list;
+ p->latest_pending_subchannel_list = NULL;
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
+ } else {
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ /* if the selected channel goes bad, we're done */
+ sd->curr_connectivity_state = GRPC_CHANNEL_SHUTDOWN;
+ }
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ sd->curr_connectivity_state,
+ GRPC_ERROR_REF(error), "selected_changed");
+ if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ } else {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_selected_shutdown");
+ shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
+ }
+ }
+ return;
+ }
+ // If we get here, there are two possible cases:
+ // 1. We do not currently have a selected subchannel, and the update is
+ // for a subchannel in p->subchannel_list that we're trying to
+ // connect to. The goal here is to find a subchannel that we can
+ // select.
+ // 2. We do currently have a selected subchannel, and the update is
+ // for a subchannel in p->latest_pending_subchannel_list. The
+ // goal here is to find a subchannel from the update that we can
+ // select in place of the current one.
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
+ sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ }
+ while (true) {
+ switch (sd->curr_connectivity_state) {
+ case GRPC_CHANNEL_READY: {
+ // Case 2. Promote p->latest_pending_subchannel_list to
+ // p->subchannel_list.
+ if (sd->subchannel_list == p->latest_pending_subchannel_list) {
+ GPR_ASSERT(p->subchannel_list != NULL);
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "finish_update");
+ p->subchannel_list = p->latest_pending_subchannel_list;
+ p->latest_pending_subchannel_list = NULL;
+ }
+ // Cases 1 and 2.
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
+ "connecting_ready");
+ sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(sd->subchannel),
+ "connected");
+ p->selected = sd;
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
+ (void *)sd->subchannel);
+ }
+ // Drop all other subchannels, since we are now connected.
+ destroy_unselected_subchannels_locked(exec_ctx, p);
+ // Update any calls that were waiting for a pick.
+ pending_pick *pp;
+ while ((pp = p->pending_picks)) {
+ p->pending_picks = pp->next;
+ *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ p->selected->connected_subchannel, "picked");
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_INFO,
+ "Servicing pending pick with selected subchannel %p",
+ (void *)p->selected);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+ gpr_free(pp);
+ }
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ case GRPC_CHANNEL_TRANSIENT_FAILURE: {
+ do {
+ sd->subchannel_list->checking_subchannel =
+ (sd->subchannel_list->checking_subchannel + 1) %
+ sd->subchannel_list->num_subchannels;
+ sd = &sd->subchannel_list
+ ->subchannels[sd->subchannel_list->checking_subchannel];
+ } while (sd->subchannel == NULL);
+ // Case 1: Only set state to TRANSIENT_FAILURE if we've tried
+ // all subchannels.
+ if (sd->subchannel_list->checking_subchannel == 0 &&
+ sd->subchannel_list == p->subchannel_list) {
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(error), "connecting_transient_failure");
+ }
+ sd->curr_connectivity_state =
+ grpc_subchannel_check_connectivity(sd->subchannel, &error);
+ GRPC_ERROR_UNREF(error);
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ // Reuses the connectivity refs from the previous watch.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ break; // Go back to top of loop.
+ }
+ case GRPC_CHANNEL_CONNECTING:
+ case GRPC_CHANNEL_IDLE: {
+ // Only update connectivity state in case 1.
+ if (sd->subchannel_list == p->subchannel_list) {
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
+ GRPC_ERROR_REF(error), "connecting_changed");
+ }
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ case GRPC_CHANNEL_SHUTDOWN: {
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "pf_candidate_shutdown");
+ // Advance to next subchannel and check its state.
+ grpc_lb_subchannel_data *original_sd = sd;
+ do {
+ sd->subchannel_list->checking_subchannel =
+ (sd->subchannel_list->checking_subchannel + 1) %
+ sd->subchannel_list->num_subchannels;
+ sd = &sd->subchannel_list
+ ->subchannels[sd->subchannel_list->checking_subchannel];
+ } while (sd->subchannel == NULL && sd != original_sd);
+ if (sd == original_sd) {
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
+ shutdown_locked(exec_ctx, p,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick first exhausted channels", &error, 1));
+ return;
+ }
+ if (sd->subchannel_list == p->subchannel_list) {
+ grpc_connectivity_state_set(
+ exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_REF(error), "subchannel_failed");
+ }
+ sd->curr_connectivity_state =
+ grpc_subchannel_check_connectivity(sd->subchannel, &error);
+ GRPC_ERROR_UNREF(error);
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ // Reuses the connectivity refs from the previous watch.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+ return;
+ }
+ // For any other state, go back to top of loop.
+ // We will reuse the connectivity refs from the previous watch.
+ }
+ }
+ }
+}
+
+static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
+ pf_destroy,
+ pf_shutdown_locked,
+ pf_pick_locked,
+ pf_cancel_pick_locked,
+ pf_cancel_picks_locked,
+ pf_ping_one_locked,
+ pf_exit_idle_locked,
+ pf_check_connectivity_locked,
+ pf_notify_on_state_change_locked,
+ pf_update_locked};
+
+static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
+
+static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
+
+static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
+ grpc_lb_policy_factory *factory,
+ grpc_lb_policy_args *args) {
+ GPR_ASSERT(args->client_channel_factory != NULL);
+ pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
+ if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
+ gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
+ }
+ pf_update_locked(exec_ctx, &p->base, args);
+ grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
+ grpc_subchannel_index_ref();
+ return &p->base;
+}
+
+static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
+ pick_first_factory_ref, pick_first_factory_unref, create_pick_first,
+ "pick_first"};
+
+static grpc_lb_policy_factory pick_first_lb_policy_factory = {
+ &pick_first_factory_vtable};
+
+static grpc_lb_policy_factory *pick_first_lb_factory_create() {
+ return &pick_first_lb_policy_factory;
+}
+
+/* Plugin registration */
+
+extern "C" void grpc_lb_policy_pick_first_init() {
+ grpc_register_lb_policy(pick_first_lb_factory_create());
+ grpc_register_tracer(&grpc_lb_pick_first_trace);
+}
+
+extern "C" void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 0d70409713..fae40e378b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -28,8 +28,10 @@
#include <grpc/support/alloc.h>
+#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
+#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/combiner.h"
@@ -63,12 +65,11 @@ typedef struct pending_pick {
grpc_closure *on_complete;
} pending_pick;
-typedef struct rr_subchannel_list rr_subchannel_list;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- rr_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list *subchannel_list;
/** have we started picking? */
bool started_picking;
@@ -88,156 +89,9 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
- rr_subchannel_list *latest_pending_subchannel_list;
+ grpc_lb_subchannel_list *latest_pending_subchannel_list;
} round_robin_lb_policy;
-typedef struct {
- /** backpointer to owning subchannel list */
- rr_subchannel_list *subchannel_list;
- /** subchannel itself */
- grpc_subchannel *subchannel;
- /** notification that connectivity has changed on subchannel */
- grpc_closure connectivity_changed_closure;
- /** last observed connectivity. Not updated by
- * \a grpc_subchannel_notify_on_state_change. Used to determine the previous
- * state while processing the new state in \a rr_connectivity_changed */
- grpc_connectivity_state prev_connectivity_state;
- /** current connectivity state. Updated by \a
- * grpc_subchannel_notify_on_state_change */
- grpc_connectivity_state curr_connectivity_state;
- /** connectivity state to be updated by the watcher, not guarded by
- * the combiner. Will be moved to curr_connectivity_state inside of
- * the combiner by rr_connectivity_changed_locked(). */
- grpc_connectivity_state pending_connectivity_state_unsafe;
- /** the subchannel's target user data */
- void *user_data;
- /** vtable to operate over \a user_data */
- const grpc_lb_user_data_vtable *user_data_vtable;
-} subchannel_data;
-
-struct rr_subchannel_list {
- /** backpointer to owning policy */
- round_robin_lb_policy *policy;
-
- /** all our subchannels */
- size_t num_subchannels;
- subchannel_data *subchannels;
-
- /** how many subchannels are in state READY */
- size_t num_ready;
- /** how many subchannels are in state TRANSIENT_FAILURE */
- size_t num_transient_failures;
- /** how many subchannels are in state SHUTDOWN */
- size_t num_shutdown;
- /** how many subchannels are in state IDLE */
- size_t num_idle;
-
- /** There will be one ref for each entry in subchannels for which there is a
- * pending connectivity state watcher callback. */
- gpr_refcount refcount;
-
- /** Is this list shutting down? This may be true due to the shutdown of the
- * policy itself or because a newer update has arrived while this one hadn't
- * finished processing. */
- bool shutting_down;
-};
-
-static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
- size_t num_subchannels) {
- rr_subchannel_list *subchannel_list = gpr_zalloc(sizeof(*subchannel_list));
- subchannel_list->policy = p;
- subchannel_list->subchannels =
- gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
- subchannel_list->num_subchannels = num_subchannels;
- gpr_ref_init(&subchannel_list->refcount, 1);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Created subchannel list %p for %lu subchannels",
- (void *)p, (void *)subchannel_list, (unsigned long)num_subchannels);
- }
- return subchannel_list;
-}
-
-static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
- rr_subchannel_list *subchannel_list) {
- GPR_ASSERT(subchannel_list->shutting_down);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Destroying subchannel_list %p",
- (void *)subchannel_list->policy, (void *)subchannel_list);
- }
- for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- subchannel_data *sd = &subchannel_list->subchannels[i];
- if (sd->subchannel != NULL) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel,
- "rr_subchannel_list_destroy");
- }
- sd->subchannel = NULL;
- if (sd->user_data != NULL) {
- GPR_ASSERT(sd->user_data_vtable != NULL);
- sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
- sd->user_data = NULL;
- }
- }
- gpr_free(subchannel_list->subchannels);
- gpr_free(subchannel_list);
-}
-
-static void rr_subchannel_list_ref(rr_subchannel_list *subchannel_list,
- const char *reason) {
- gpr_ref_non_zero(&subchannel_list->refcount);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
- gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu (%s)",
- (void *)subchannel_list->policy, (void *)subchannel_list,
- (unsigned long)(count - 1), (unsigned long)count, reason);
- }
-}
-
-static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
- rr_subchannel_list *subchannel_list,
- const char *reason) {
- const bool done = gpr_unref(&subchannel_list->refcount);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
- gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu (%s)",
- (void *)subchannel_list->policy, (void *)subchannel_list,
- (unsigned long)(count + 1), (unsigned long)count, reason);
- }
- if (done) {
- rr_subchannel_list_destroy(exec_ctx, subchannel_list);
- }
-}
-
-/** Mark \a subchannel_list as discarded. Unsubscribes all its subchannels. The
- * watcher's callback will ultimately unref \a subchannel_list. */
-static void rr_subchannel_list_shutdown_and_unref(
- grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list,
- const char *reason) {
- GPR_ASSERT(!subchannel_list->shutting_down);
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down subchannel_list %p (%s)",
- (void *)subchannel_list->policy, (void *)subchannel_list, reason);
- }
- GPR_ASSERT(!subchannel_list->shutting_down);
- subchannel_list->shutting_down = true;
- for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- subchannel_data *sd = &subchannel_list->subchannels[i];
- if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe.
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(
- GPR_DEBUG,
- "[RR %p] Unsubscribing from subchannel %p as part of shutting down "
- "subchannel_list %p",
- (void *)subchannel_list->policy, (void *)sd->subchannel,
- (void *)subchannel_list);
- }
- grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
- NULL,
- &sd->connectivity_changed_closure);
- }
- }
- rr_subchannel_list_unref(exec_ctx, subchannel_list, reason);
-}
-
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY.
@@ -297,8 +151,8 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void *)p, (unsigned long)last_ready_index,
(void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
- (void *)grpc_subchannel_get_connected_subchannel(
- p->subchannel_list->subchannels[last_ready_index].subchannel));
+ (void *)p->subchannel_list->subchannels[last_ready_index]
+ .connected_subchannel);
}
}
@@ -308,41 +162,47 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
(void *)pol, (void *)pol);
}
+ GPR_ASSERT(p->subchannel_list == NULL);
+ GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+ grpc_subchannel_index_unref();
gpr_free(p);
}
-static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
+ grpc_error *error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p",
- (void *)pol, (void *)pol);
+ gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
pending_pick *pp;
- while ((pp = p->pending_picks)) {
+ while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
- GRPC_CLOSURE_SCHED(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+ GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
gpr_free(pp);
}
- grpc_connectivity_state_set(
- exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
- const bool latest_is_current =
- p->subchannel_list == p->latest_pending_subchannel_list;
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "sl_shutdown_rr_shutdown");
- p->subchannel_list = NULL;
- if (!latest_is_current && p->latest_pending_subchannel_list != NULL &&
- !p->latest_pending_subchannel_list->shutting_down) {
- rr_subchannel_list_shutdown_and_unref(exec_ctx,
- p->latest_pending_subchannel_list,
- "sl_shutdown_pending_rr_shutdown");
+ grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+ GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
+ "rr_shutdown");
+ if (p->subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "sl_shutdown_rr_shutdown");
+ p->subchannel_list = NULL;
+ }
+ if (p->latest_pending_subchannel_list != NULL) {
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list,
+ "sl_shutdown_pending_rr_shutdown");
p->latest_pending_subchannel_list = NULL;
}
+ GRPC_ERROR_UNREF(error);
+}
+
+static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
+ round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ shutdown_locked(exec_ctx, p,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
@@ -397,13 +257,10 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p) {
p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
- subchannel_data *sd = &p->subchannel_list->subchannels[i];
- GRPC_LB_POLICY_WEAK_REF(&p->base, "start_picking_locked");
- rr_subchannel_list_ref(sd->subchannel_list, "started_picking");
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->pending_connectivity_state_unsafe,
- &sd->connectivity_changed_closure);
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
+ "connectivity_watch");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &p->subchannel_list->subchannels[i]);
}
}
@@ -429,10 +286,10 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
- subchannel_data *sd = &p->subchannel_list->subchannels[next_ready_index];
- *target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(sd->subchannel),
- "rr_picked");
+ grpc_lb_subchannel_data *sd =
+ &p->subchannel_list->subchannels[next_ready_index];
+ *target =
+ GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
if (user_data != NULL) {
*user_data = sd->user_data;
}
@@ -453,7 +310,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = gpr_malloc(sizeof(*pp));
+ pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@@ -463,8 +320,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void update_state_counters_locked(subchannel_data *sd) {
- rr_subchannel_list *subchannel_list = sd->subchannel_list;
+static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
+ grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
@@ -478,6 +335,7 @@ static void update_state_counters_locked(subchannel_data *sd) {
GPR_ASSERT(subchannel_list->num_idle > 0);
--subchannel_list->num_idle;
}
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
++subchannel_list->num_ready;
} else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
@@ -490,12 +348,12 @@ static void update_state_counters_locked(subchannel_data *sd) {
}
/** Sets the policy's connectivity status based on that of the passed-in \a sd
- * (the subchannel_data associted with the updated subchannel) and the
+ * (the grpc_lb_subchannel_data associted with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked(
- grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@@ -517,8 +375,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
grpc_connectivity_state new_state = sd->curr_connectivity_state;
- rr_subchannel_list *subchannel_list = sd->subchannel_list;
- round_robin_lb_policy *p = subchannel_list->policy;
+ grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
+ round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
@@ -535,8 +393,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"rr_shutdown");
p->shutdown = true;
+ new_state = GRPC_CHANNEL_SHUTDOWN;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- new_state = GRPC_CHANNEL_SHUTDOWN;
gpr_log(GPR_INFO,
"[RR %p] Shutting down: all subchannels have gone into shutdown",
(void *)p);
@@ -559,8 +417,9 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- subchannel_data *sd = arg;
- round_robin_lb_policy *p = sd->subchannel_list->policy;
+ grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
+ round_robin_lb_policy *p =
+ (round_robin_lb_policy *)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
@@ -575,71 +434,50 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
// If the policy is shutting down, unref and return.
if (p->shutdown) {
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
- "pol_shutdown+started_picking");
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown");
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "rr_shutdown");
return;
}
- if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) {
- // the subchannel list associated with sd has been discarded. This callback
- // corresponds to the unsubscription. The unrefs correspond to the picking
- // ref (start_picking_locked or update_started_picking).
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
- "sl_shutdown+started_picking");
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown+picking");
- return;
- }
- // Dispose of outdated subchannel lists.
- if (sd->subchannel_list != p->subchannel_list &&
- sd->subchannel_list != p->latest_pending_subchannel_list) {
- char *reason = NULL;
- if (sd->subchannel_list->shutting_down) {
- reason = "sl_outdated_straggler";
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason);
- } else {
- reason = "sl_outdated";
- rr_subchannel_list_shutdown_and_unref(exec_ctx, sd->subchannel_list,
- reason);
- }
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, reason);
+ // If the subchannel list is shutting down, stop watching.
+ if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_sl_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "rr_sl_shutdown");
return;
}
+ // If we're still here, the notification must be for a subchannel in
+ // either the current or latest pending subchannel lists.
+ GPR_ASSERT(sd->subchannel_list == p->subchannel_list ||
+ sd->subchannel_list == p->latest_pending_subchannel_list);
// Now that we're inside the combiner, copy the pending connectivity
// state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Update state counters and determine new overall state.
update_state_counters_locked(sd);
- sd->prev_connectivity_state = sd->curr_connectivity_state;
const grpc_connectivity_state new_policy_connectivity_state =
update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
// If the sd's new state is SHUTDOWN, unref the subchannel, and if the new
// policy's state is SHUTDOWN, clean up.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
- sd->subchannel = NULL;
- if (sd->user_data != NULL) {
- GPR_ASSERT(sd->user_data_vtable != NULL);
- sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
- sd->user_data = NULL;
- }
+ grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "rr_connectivity_shutdown");
+ grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown");
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
- // the policy is shutting down. Flush all the pending picks...
- pending_pick *pp;
- while ((pp = p->pending_picks)) {
- p->pending_picks = pp->next;
- *pp->target = NULL;
- GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
- gpr_free(pp);
- }
+ shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
}
- rr_subchannel_list_unref(exec_ctx, sd->subchannel_list,
- "sd_shutdown+started_picking");
- // unref the "rr_connectivity_update" weak ref from start_picking.
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
- "rr_connectivity_sd_shutdown");
} else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
+ if (sd->connected_subchannel == NULL) {
+ sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_subchannel_get_connected_subchannel(sd->subchannel),
+ "connected");
+ }
if (sd->subchannel_list != p->subchannel_list) {
// promote sd->subchannel_list to p->subchannel_list.
// sd->subchannel_list must be equal to
@@ -660,8 +498,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "sl_phase_out_shutdown");
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
}
p->subchannel_list = p->latest_pending_subchannel_list;
p->latest_pending_subchannel_list = NULL;
@@ -671,7 +509,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
- subchannel_data *selected =
+ grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
@@ -682,8 +520,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected->subchannel),
- "rr_picked");
+ selected->connected_subchannel, "rr_picked");
if (pp->user_data != NULL) {
*pp->user_data = selected->user_data;
}
@@ -698,12 +535,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(pp);
}
}
- /* renew notification: reuses the "rr_connectivity_update" weak ref on the
- * policy as well as the sd->subchannel_list ref. */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->pending_connectivity_state_unsafe,
- &sd->connectivity_changed_closure);
+ // Renew notification.
+ grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
}
}
@@ -727,13 +560,12 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
- subchannel_data *selected =
+ grpc_lb_subchannel_data *selected =
&p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
- grpc_subchannel_get_connected_subchannel(selected->subchannel),
- "rr_picked");
+ selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
- GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
@@ -746,134 +578,68 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const grpc_arg *arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
+ gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
+ // If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
+ // Otherwise, keep using the current subchannel list (ignore this update).
if (p->subchannel_list == NULL) {
- // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
"rr_update_missing");
- } else {
- // otherwise, keep using the current subchannel list (ignore this update).
- gpr_log(GPR_ERROR,
- "[RR %p] No valid LB addresses channel arg for update, ignoring.",
- (void *)p);
}
return;
}
- grpc_lb_addresses *addresses = arg->value.pointer.p;
+ grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Update with %lu addressees (shutdown: %d)",
- (void *)p, (unsigned long)addresses->num_addresses, p->shutdown);
+ gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
+ addresses->num_addresses);
}
- rr_subchannel_list *subchannel_list =
- rr_subchannel_list_create(p, addresses->num_addresses);
- if (addresses->num_addresses == 0) {
+ grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
+ rr_connectivity_changed_locked);
+ if (subchannel_list->num_subchannels == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"rr_update_empty");
if (p->subchannel_list != NULL) {
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "sl_shutdown_empty_update");
+ grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+ "sl_shutdown_empty_update");
}
p->subchannel_list = subchannel_list; // empty list
return;
}
- size_t subchannel_index = 0;
- if (p->latest_pending_subchannel_list != NULL && p->started_picking) {
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG,
- "[RR %p] Shutting down latest pending subchannel list %p, about "
- "to be replaced by newer latest %p",
- (void *)p, (void *)p->latest_pending_subchannel_list,
- (void *)subchannel_list);
- }
- rr_subchannel_list_shutdown_and_unref(
- exec_ctx, p->latest_pending_subchannel_list, "sl_outdated_dont_smash");
- }
- p->latest_pending_subchannel_list = subchannel_list;
- grpc_subchannel_args sc_args;
- /* We need to remove the LB addresses in order to be able to compare the
- * subchannel keys of subchannels from a different batch of addresses. */
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
- GRPC_ARG_LB_ADDRESSES};
- /* Create subchannels for addresses in the update. */
- for (size_t i = 0; i < addresses->num_addresses; i++) {
- // If there were any balancer, we would have chosen grpclb policy instead.
- GPR_ASSERT(!addresses->addresses[i].is_balancer);
- memset(&sc_args, 0, sizeof(grpc_subchannel_args));
- grpc_arg addr_arg =
- grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
- 1);
- gpr_free(addr_arg.value.string);
- sc_args.args = new_args;
- grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
- exec_ctx, args->client_channel_factory, &sc_args);
- grpc_channel_args_destroy(exec_ctx, new_args);
- grpc_error *error;
- // Get the connectivity state of the subchannel. Already existing ones may
- // be in a state other than INIT.
- const grpc_connectivity_state subchannel_connectivity_state =
- grpc_subchannel_check_connectivity(subchannel, &error);
- if (error != GRPC_ERROR_NONE) {
- // The subchannel is in error (e.g. shutting down). Ignore it.
- GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "new_sc_connectivity_error");
- GRPC_ERROR_UNREF(error);
- continue;
- }
- if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- char *address_uri =
- grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(
- GPR_DEBUG,
- "[RR %p] index %lu: Created subchannel %p for address uri %s into "
- "subchannel_list %p. Connectivity state %s",
- (void *)p, (unsigned long)subchannel_index, (void *)subchannel,
- address_uri, (void *)subchannel_list,
- grpc_connectivity_state_name(subchannel_connectivity_state));
- gpr_free(address_uri);
- }
- subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++];
- sd->subchannel_list = subchannel_list;
- sd->subchannel = subchannel;
- GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
- rr_connectivity_changed_locked, sd,
- grpc_combiner_scheduler(args->combiner));
- /* use some sentinel value outside of the range of
- * grpc_connectivity_state to signal an undefined previous state. We
- * won't be referring to this value again and it'll be overwritten after
- * the first call to rr_connectivity_changed_locked */
- sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
- sd->curr_connectivity_state = subchannel_connectivity_state;
- sd->user_data_vtable = addresses->user_data_vtable;
- if (sd->user_data_vtable != NULL) {
- sd->user_data =
- sd->user_data_vtable->copy(addresses->addresses[i].user_data);
+ if (p->started_picking) {
+ if (p->latest_pending_subchannel_list != NULL) {
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
+ gpr_log(GPR_DEBUG,
+ "[RR %p] Shutting down latest pending subchannel list %p, "
+ "about to be replaced by newer latest %p",
+ (void *)p, (void *)p->latest_pending_subchannel_list,
+ (void *)subchannel_list);
+ }
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
}
- if (p->started_picking) {
- rr_subchannel_list_ref(sd->subchannel_list, "update_started_picking");
- GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity_update");
- /* 2. Watch every new subchannel. A subchannel list becomes active the
+ p->latest_pending_subchannel_list = subchannel_list;
+ for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
+ /* Watch every new subchannel. A subchannel list becomes active the
* moment one of its subchannels is READY. At that moment, we swap
* p->subchannel_list for sd->subchannel_list, provided the subchannel
* list is still valid (ie, isn't shutting down) */
- grpc_subchannel_notify_on_state_change(
- exec_ctx, sd->subchannel, p->base.interested_parties,
- &sd->pending_connectivity_state_unsafe,
- &sd->connectivity_changed_closure);
+ grpc_lb_subchannel_list_ref_for_connectivity_watch(subchannel_list,
+ "connectivity_watch");
+ grpc_lb_subchannel_data_start_connectivity_watch(
+ exec_ctx, &subchannel_list->subchannels[i]);
}
- }
- if (!p->started_picking) {
+ } else {
// The policy isn't picking yet. Save the update for later, disposing of
// previous version if any.
if (p->subchannel_list != NULL) {
- rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
- "rr_update_before_started_picking");
+ grpc_lb_subchannel_list_shutdown_and_unref(
+ exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
}
p->subchannel_list = subchannel_list;
- p->latest_pending_subchannel_list = NULL;
}
}
@@ -897,8 +663,9 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- round_robin_lb_policy *p = gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
+ grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);
@@ -922,9 +689,9 @@ static grpc_lb_policy_factory *round_robin_lb_factory_create() {
/* Plugin registration */
-void grpc_lb_policy_round_robin_init() {
+extern "C" void grpc_lb_policy_round_robin_init() {
grpc_register_lb_policy(round_robin_lb_factory_create());
grpc_register_tracer(&grpc_lb_round_robin_trace);
}
-void grpc_lb_policy_round_robin_shutdown() {}
+extern "C" void grpc_lb_policy_round_robin_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
new file mode 100644
index 0000000000..08ea4f480b
--- /dev/null
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -0,0 +1,265 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_data *sd,
+ const char *reason) {
+ if (sd->subchannel != NULL) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(
+ GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
+ " of %" PRIuPTR " (subchannel %p): unreffing subchannel",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
+ }
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
+ sd->subchannel = NULL;
+ if (sd->connected_subchannel != NULL) {
+ GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
+ reason);
+ sd->connected_subchannel = NULL;
+ }
+ if (sd->user_data != NULL) {
+ GPR_ASSERT(sd->user_data_vtable != NULL);
+ sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
+ sd->user_data = NULL;
+ }
+ }
+}
+
+void grpc_lb_subchannel_data_start_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): requesting connectivity change notification",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
+ }
+ sd->connectivity_notification_pending = true;
+ grpc_subchannel_notify_on_state_change(
+ exec_ctx, sd->subchannel, sd->subchannel_list->policy->interested_parties,
+ &sd->pending_connectivity_state_unsafe,
+ &sd->connectivity_changed_closure);
+}
+
+void grpc_lb_subchannel_data_stop_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(
+ GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): stopping connectivity watch",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
+ }
+ GPR_ASSERT(sd->connectivity_notification_pending);
+ sd->connectivity_notification_pending = false;
+}
+
+grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
+ const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+ grpc_iomgr_cb_func connectivity_changed_cb) {
+ grpc_lb_subchannel_list *subchannel_list =
+ (grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
+ if (GRPC_TRACER_ON(*tracer)) {
+ gpr_log(GPR_DEBUG,
+ "[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
+ tracer->name, p, subchannel_list, addresses->num_addresses);
+ }
+ subchannel_list->policy = p;
+ subchannel_list->tracer = tracer;
+ gpr_ref_init(&subchannel_list->refcount, 1);
+ subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
+ sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
+ // We need to remove the LB addresses in order to be able to compare the
+ // subchannel keys of subchannels from a different batch of addresses.
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
+ GRPC_ARG_LB_ADDRESSES};
+ // Create a subchannel for each address.
+ grpc_subchannel_args sc_args;
+ size_t subchannel_index = 0;
+ for (size_t i = 0; i < addresses->num_addresses; i++) {
+ // If there were any balancer, we would have chosen grpclb policy instead.
+ GPR_ASSERT(!addresses->addresses[i].is_balancer);
+ memset(&sc_args, 0, sizeof(grpc_subchannel_args));
+ grpc_arg addr_arg =
+ grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
+ 1);
+ gpr_free(addr_arg.value.string);
+ sc_args.args = new_args;
+ grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
+ exec_ctx, args->client_channel_factory, &sc_args);
+ grpc_channel_args_destroy(exec_ctx, new_args);
+ if (subchannel == NULL) {
+ // Subchannel could not be created.
+ if (GRPC_TRACER_ON(*tracer)) {
+ char *address_uri =
+ grpc_sockaddr_to_uri(&addresses->addresses[i].address);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] could not create subchannel for address uri %s, "
+ "ignoring",
+ tracer->name, subchannel_list->policy, address_uri);
+ gpr_free(address_uri);
+ }
+ continue;
+ }
+ if (GRPC_TRACER_ON(*tracer)) {
+ char *address_uri =
+ grpc_sockaddr_to_uri(&addresses->addresses[i].address);
+ gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
+ ": Created subchannel %p for address uri %s",
+ tracer->name, p, subchannel_list, subchannel_index, subchannel,
+ address_uri);
+ gpr_free(address_uri);
+ }
+ grpc_lb_subchannel_data *sd =
+ &subchannel_list->subchannels[subchannel_index++];
+ sd->subchannel_list = subchannel_list;
+ sd->subchannel = subchannel;
+ GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure,
+ connectivity_changed_cb, sd,
+ grpc_combiner_scheduler(args->combiner));
+ // We assume that the current state is IDLE. If not, we'll get a
+ // callback telling us that.
+ sd->prev_connectivity_state = GRPC_CHANNEL_IDLE;
+ sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
+ sd->pending_connectivity_state_unsafe = GRPC_CHANNEL_IDLE;
+ sd->user_data_vtable = addresses->user_data_vtable;
+ if (sd->user_data_vtable != NULL) {
+ sd->user_data =
+ sd->user_data_vtable->copy(addresses->addresses[i].user_data);
+ }
+ }
+ subchannel_list->num_subchannels = subchannel_index;
+ subchannel_list->num_idle = subchannel_index;
+ return subchannel_list;
+}
+
+static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_list *subchannel_list) {
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list);
+ }
+ for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
+ grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+ "subchannel_list_destroy");
+ }
+ gpr_free(subchannel_list->subchannels);
+ gpr_free(subchannel_list);
+}
+
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ gpr_ref_non_zero(&subchannel_list->refcount);
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
+ gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p REF %lu->%lu (%s)",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list, (unsigned long)(count - 1), (unsigned long)count,
+ reason);
+ }
+}
+
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ const bool done = gpr_unref(&subchannel_list->refcount);
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
+ gpr_log(GPR_DEBUG, "[%s %p] subchannel_list %p UNREF %lu->%lu (%s)",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list, (unsigned long)(count + 1), (unsigned long)count,
+ reason);
+ }
+ if (done) {
+ subchannel_list_destroy(exec_ctx, subchannel_list);
+ }
+}
+
+void grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ grpc_lb_subchannel_list *subchannel_list, const char *reason) {
+ GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
+ grpc_lb_subchannel_list_ref(subchannel_list, reason);
+}
+
+void grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
+ grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
+}
+
+static void subchannel_data_cancel_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
+ if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
+ gpr_log(
+ GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): canceling connectivity watch (%s)",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel, reason);
+ }
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
+ &sd->connectivity_changed_closure);
+}
+
+void grpc_lb_subchannel_list_shutdown_and_unref(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason) {
+ if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
+ gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
+ subchannel_list->tracer->name, subchannel_list->policy,
+ subchannel_list, reason);
+ }
+ GPR_ASSERT(!subchannel_list->shutting_down);
+ subchannel_list->shutting_down = true;
+ for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
+ grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ // If there's a pending notification for this subchannel, cancel it;
+ // the callback is responsible for unreffing the subchannel.
+ // Otherwise, unref the subchannel directly.
+ if (sd->connectivity_notification_pending) {
+ subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
+ } else if (sd->subchannel != NULL) {
+ grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
+ }
+ }
+ grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
+}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
new file mode 100644
index 0000000000..9d5984260f
--- /dev/null
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H
+
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/ext/filters/client_channel/subchannel.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+// TODO(roth): This code is intended to be shared between pick_first and
+// round_robin. However, the interface needs more work to provide clean
+// encapsulation. For example, the structs here have some fields that are
+// only used in one of the two (e.g., the state counters in
+// grpc_lb_subchannel_list and the prev_connectivity_state field in
+// grpc_lb_subchannel_data are only used in round_robin, and the
+// checking_subchannel field in grpc_lb_subchannel_list is only used by
+// pick_first). Also, there is probably some code duplication between the
+// connectivity state notification callback code in both pick_first and
+// round_robin that could be refactored and moved here. In a future PR,
+// need to clean this up.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
+
+typedef struct {
+ /** backpointer to owning subchannel list */
+ grpc_lb_subchannel_list *subchannel_list;
+ /** subchannel itself */
+ grpc_subchannel *subchannel;
+ grpc_connected_subchannel *connected_subchannel;
+ /** Is a connectivity notification pending? */
+ bool connectivity_notification_pending;
+ /** notification that connectivity has changed on subchannel */
+ grpc_closure connectivity_changed_closure;
+ /** previous and current connectivity states. Updated by \a
+ * \a connectivity_changed_closure based on
+ * \a pending_connectivity_state_unsafe. */
+ grpc_connectivity_state prev_connectivity_state;
+ grpc_connectivity_state curr_connectivity_state;
+ /** connectivity state to be updated by
+ * grpc_subchannel_notify_on_state_change(), not guarded by
+ * the combiner. To be copied to \a curr_connectivity_state by
+ * \a connectivity_changed_closure. */
+ grpc_connectivity_state pending_connectivity_state_unsafe;
+ /** the subchannel's target user data */
+ void *user_data;
+ /** vtable to operate over \a user_data */
+ const grpc_lb_user_data_vtable *user_data_vtable;
+} grpc_lb_subchannel_data;
+
+/// Unrefs the subchannel contained in sd.
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_data *sd,
+ const char *reason);
+
+/// Starts watching the connectivity state of the subchannel.
+/// The connectivity_changed_cb callback must invoke either
+/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
+/// grpc_lb_subchannel_data_start_connectivity_watch().
+void grpc_lb_subchannel_data_start_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+
+/// Stops watching the connectivity state of the subchannel.
+void grpc_lb_subchannel_data_stop_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+
+struct grpc_lb_subchannel_list {
+ /** backpointer to owning policy */
+ grpc_lb_policy *policy;
+
+ grpc_tracer_flag *tracer;
+
+ /** all our subchannels */
+ size_t num_subchannels;
+ grpc_lb_subchannel_data *subchannels;
+
+ /** Index into subchannels of the one we're currently checking.
+ * Used when connecting to subchannels serially instead of in parallel. */
+ // TODO(roth): When we have time, we can probably make this go away
+ // and compute the index dynamically by subtracting
+ // subchannel_list->subchannels from the subchannel_data pointer.
+ size_t checking_subchannel;
+
+ /** how many subchannels are in state READY */
+ size_t num_ready;
+ /** how many subchannels are in state TRANSIENT_FAILURE */
+ size_t num_transient_failures;
+ /** how many subchannels are in state SHUTDOWN */
+ size_t num_shutdown;
+ /** how many subchannels are in state IDLE */
+ size_t num_idle;
+
+ /** There will be one ref for each entry in subchannels for which there is a
+ * pending connectivity state watcher callback. */
+ gpr_refcount refcount;
+
+ /** Is this list shutting down? This may be true due to the shutdown of the
+ * policy itself or because a newer update has arrived while this one hadn't
+ * finished processing. */
+ bool shutting_down;
+};
+
+grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
+ grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
+ const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+ grpc_iomgr_cb_func connectivity_changed_cb);
+
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
+ grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+/// Takes and releases refs needed for a connectivity notification.
+/// This includes a ref to subchannel_list and a weak ref to the LB policy.
+void grpc_lb_subchannel_list_ref_for_connectivity_watch(
+ grpc_lb_subchannel_list *subchannel_list, const char *reason);
+void grpc_lb_subchannel_list_unref_for_connectivity_watch(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
+/// connectivity state notification callback will ultimately unref it.
+void grpc_lb_subchannel_list_shutdown_and_unref(
+ grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
+ const char *reason);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.cc
index 538d8d65ed..05ab43d0b6 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.c
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.cc
@@ -28,11 +28,12 @@
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
- grpc_lb_addresses* addresses = gpr_zalloc(sizeof(grpc_lb_addresses));
+ grpc_lb_addresses* addresses =
+ (grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses));
addresses->num_addresses = num_addresses;
addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
- addresses->addresses = gpr_zalloc(addresses_size);
+ addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size);
return addresses;
}
@@ -55,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) {
}
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
- void* address, size_t address_len,
+ const void* address, size_t address_len,
bool is_balancer, const char* balancer_name,
void* user_data) {
GPR_ASSERT(index < addresses->num_addresses);
@@ -125,13 +126,14 @@ void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
}
static void* lb_addresses_copy(void* addresses) {
- return grpc_lb_addresses_copy(addresses);
+ return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses);
}
static void lb_addresses_destroy(grpc_exec_ctx* exec_ctx, void* addresses) {
- grpc_lb_addresses_destroy(exec_ctx, addresses);
+ grpc_lb_addresses_destroy(exec_ctx, (grpc_lb_addresses*)addresses);
}
static int lb_addresses_cmp(void* addresses1, void* addresses2) {
- return grpc_lb_addresses_cmp(addresses1, addresses2);
+ return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1,
+ (grpc_lb_addresses*)addresses2);
}
static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp};
@@ -139,7 +141,7 @@ static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = {
grpc_arg grpc_lb_addresses_create_channel_arg(
const grpc_lb_addresses* addresses) {
return grpc_channel_arg_pointer_create(
- GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
+ (char*)GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable);
}
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
@@ -148,7 +150,7 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES);
if (lb_addresses_arg == NULL || lb_addresses_arg->type != GRPC_ARG_POINTER)
return NULL;
- return lb_addresses_arg->value.pointer.p;
+ return (grpc_lb_addresses*)lb_addresses_arg->value.pointer.p;
}
void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) {
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 9d9fb143df..8790ffdda3 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -29,6 +29,10 @@
// Channel arg key for grpc_lb_addresses.
#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
@@ -73,7 +77,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
- void *address, size_t address_len,
+ const void *address, size_t address_len,
bool is_balancer, const char *balancer_name,
void *user_data);
@@ -130,4 +134,8 @@ grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.c b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index f2460f8304..f2460f8304 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.c
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index f5995687cf..55154cb02a 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -22,6 +22,10 @@
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Initialize the registry and set \a default_factory as the factory to be
* returned when no name is provided in a lookup */
void grpc_lb_policy_registry_init(void);
@@ -37,4 +41,8 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
grpc_lb_policy_args *args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/parse_address.c b/src/core/ext/filters/client_channel/parse_address.cc
index 2152b5a1e9..2152b5a1e9 100644
--- a/src/core/ext/filters/client_channel/parse_address.c
+++ b/src/core/ext/filters/client_channel/parse_address.cc
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index c90a827da5..27d06a1cb3 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -24,6 +24,10 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
bool grpc_parse_unix(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
@@ -45,4 +49,8 @@ bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
bool log_errors);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.c b/src/core/ext/filters/client_channel/proxy_mapper.cc
index c6ea5fc680..c6ea5fc680 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.c
+++ b/src/core/ext/filters/client_channel/proxy_mapper.cc
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.h b/src/core/ext/filters/client_channel/proxy_mapper.h
index a13861ccaf..bb8259f854 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_proxy_mapper grpc_proxy_mapper;
typedef struct {
@@ -71,4 +75,8 @@ bool grpc_proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.c b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
index 5f43a0596a..09967eea3c 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.c
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
bool at_start,
grpc_proxy_mapper* mapper) {
- list->list = gpr_realloc(
+ list->list = (grpc_proxy_mapper**)gpr_realloc(
list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.h b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
index 99e54d1a78..39c607cefc 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
@@ -21,6 +21,10 @@
#include "src/core/ext/filters/client_channel/proxy_mapper.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_proxy_mapper_registry_init();
void grpc_proxy_mapper_registry_shutdown();
@@ -41,4 +45,8 @@ bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx,
grpc_resolved_address** new_address,
grpc_channel_args** new_args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/resolver.c b/src/core/ext/filters/client_channel/resolver.cc
index 8401504fcf..8401504fcf 100644
--- a/src/core/ext/filters/client_channel/resolver.c
+++ b/src/core/ext/filters/client_channel/resolver.cc
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index ae9c8f66fe..73fbbbbc3b 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -22,6 +22,10 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/iomgr/iomgr.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
@@ -87,4 +91,8 @@ void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_channel_args **result,
grpc_closure *on_complete);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index f1480bb1ae..5f7ab987cb 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -32,13 +32,13 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/json/json.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/service_config.h"
@@ -89,7 +89,7 @@ typedef struct {
bool have_retry_timer;
grpc_timer retry_timer;
/** retry backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_lb_addresses *lb_addresses;
@@ -137,14 +137,14 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
ares_dns_resolver *r = (ares_dns_resolver *)resolver;
if (!r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- ares_dns_resolver *r = arg;
+ ares_dns_resolver *r = (ares_dns_resolver *)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@@ -204,7 +204,7 @@ static char *choose_service_config(char *service_config_choice_json) {
int random_pct = rand() % 100;
int percentage;
if (sscanf(field->value, "%d", &percentage) != 1 ||
- random_pct > percentage) {
+ random_pct > percentage || percentage == 0) {
service_config_json = NULL;
break;
}
@@ -227,7 +227,7 @@ static char *choose_service_config(char *service_config_choice_json) {
static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- ares_dns_resolver *r = arg;
+ ares_dns_resolver *r = (ares_dns_resolver *)arg;
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
@@ -249,7 +249,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- GRPC_ARG_SERVICE_CONFIG, service_config_string);
+ (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) {
const char *lb_policy_name =
@@ -257,7 +257,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
+ (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
}
}
}
@@ -271,22 +271,20 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
+ grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
- timeout.tv_nsec);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
- &r->dns_ares_on_retry_timer_locked, now);
+ &r->dns_ares_on_retry_timer_locked);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@@ -307,7 +305,7 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
} else {
dns_ares_maybe_finish_next_locked(exec_ctx, r);
@@ -363,7 +361,8 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
const char *path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
- ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
+ ares_dns_resolver *r =
+ (ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);
@@ -380,11 +379,11 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
- gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_DNS_RECONNECT_JITTER,
- GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
dns_ares_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
@@ -424,7 +423,7 @@ static grpc_resolver_factory *dns_ares_resolver_factory_create() {
return &dns_resolver_factory;
}
-void grpc_resolver_dns_ares_init(void) {
+extern "C" void grpc_resolver_dns_ares_init(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
sorter and the CNAME support are added. */
@@ -440,7 +439,7 @@ void grpc_resolver_dns_ares_init(void) {
gpr_free(resolver);
}
-void grpc_resolver_dns_ares_shutdown(void) {
+extern "C" void grpc_resolver_dns_ares_shutdown(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
grpc_ares_cleanup();
@@ -450,8 +449,8 @@ void grpc_resolver_dns_ares_shutdown(void) {
#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */
-void grpc_resolver_dns_ares_init(void) {}
+extern "C" void grpc_resolver_dns_ares_init(void) {}
-void grpc_resolver_dns_ares_shutdown(void) {}
+extern "C" void grpc_resolver_dns_ares_shutdown(void) {}
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 386012d2ed..3d4309f2fa 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -22,6 +22,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
@@ -49,5 +53,9 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
grpc_ares_ev_driver *ev_driver);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index b696344eab..c30cc93b6f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -20,6 +20,7 @@
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
#include <ares.h>
+#include <sys/ioctl.h>
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h"
@@ -37,8 +38,6 @@
typedef struct fd_node {
/** the owner of this fd node */
grpc_ares_ev_driver *ev_driver;
- /** the grpc_fd owned by this fd node */
- grpc_fd *grpc_fd;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@@ -50,10 +49,14 @@ typedef struct fd_node {
/** mutex guarding the rest of the state */
gpr_mu mu;
+ /** the grpc_fd owned by this fd node */
+ grpc_fd *fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
bool writable_registered;
+ /** if the fd is being shut down */
+ bool shutting_down;
} fd_node;
struct grpc_ares_ev_driver {
@@ -96,22 +99,34 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
}
static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
- gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
gpr_mu_destroy(&fdn->mu);
- grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->grpc_fd);
/* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
immediately by another thread, and should not be closed by the following
grpc_fd_orphan. */
- grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, NULL, true /* already_closed */,
+ grpc_fd_orphan(exec_ctx, fdn->fd, NULL, NULL, true /* already_closed */,
"c-ares query finished");
gpr_free(fdn);
}
+static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+ gpr_mu_lock(&fdn->mu);
+ fdn->shutting_down = true;
+ if (!fdn->readable_registered && !fdn->writable_registered) {
+ gpr_mu_unlock(&fdn->mu);
+ fd_node_destroy(exec_ctx, fdn);
+ } else {
+ grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "c-ares fd shutdown"));
+ gpr_mu_unlock(&fdn->mu);
+ }
+}
+
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
- *ev_driver = gpr_malloc(sizeof(grpc_ares_ev_driver));
+ *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
@@ -150,9 +165,8 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
ev_driver->shutting_down = true;
fd_node *fn = ev_driver->fds;
while (fn != NULL) {
- grpc_fd_shutdown(
- exec_ctx, fn->grpc_fd,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
+ grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);
@@ -165,7 +179,7 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
dummy_head.next = *head;
fd_node *node = &dummy_head;
while (node->next != NULL) {
- if (grpc_fd_wrapped_fd(node->next->grpc_fd) == fd) {
+ if (grpc_fd_wrapped_fd(node->next->fd) == fd) {
fd_node *ret = node->next;
node->next = node->next->next;
*head = dummy_head.next;
@@ -176,18 +190,33 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
return NULL;
}
+/* Check if \a fd is still readable */
+static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
+ int fd) {
+ size_t bytes_available = 0;
+ return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
+}
+
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- fd_node *fdn = arg;
+ fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
+ const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
+ if (fdn->shutting_down && !fdn->writable_registered) {
+ gpr_mu_unlock(&fdn->mu);
+ fd_node_destroy(exec_ctx, fdn);
+ grpc_ares_ev_driver_unref(ev_driver);
+ return;
+ }
gpr_mu_unlock(&fdn->mu);
- gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ gpr_log(GPR_DEBUG, "readable on %d", fd);
if (error == GRPC_ERROR_NONE) {
- ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->grpc_fd),
- ARES_SOCKET_BAD);
+ do {
+ ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD);
+ } while (grpc_ares_is_fd_still_readable(ev_driver, fd));
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@@ -205,16 +234,22 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- fd_node *fdn = arg;
+ fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
+ const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
+ if (fdn->shutting_down && !fdn->readable_registered) {
+ gpr_mu_unlock(&fdn->mu);
+ fd_node_destroy(exec_ctx, fdn);
+ grpc_ares_ev_driver_unref(ev_driver);
+ return;
+ }
gpr_mu_unlock(&fdn->mu);
- gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ gpr_log(GPR_DEBUG, "writable on %d", fd);
if (error == GRPC_ERROR_NONE) {
- ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
- grpc_fd_wrapped_fd(fdn->grpc_fd));
+ ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd);
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
// timed out. The pending lookups made on this ev_driver will be cancelled
@@ -251,19 +286,19 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (fdn == NULL) {
char *fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = gpr_malloc(sizeof(fd_node));
+ fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
- fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
+ fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
fdn->readable_registered = false;
fdn->writable_registered = false;
+ fdn->shutting_down = false;
gpr_mu_init(&fdn->mu);
GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
grpc_schedule_on_exec_ctx);
- grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
- fdn->grpc_fd);
+ grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set, fdn->fd);
gpr_free(fd_name);
}
fdn->next = new_list;
@@ -274,9 +309,8 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (ARES_GETSOCK_READABLE(socks_bitmask, i) &&
!fdn->readable_registered) {
grpc_ares_ev_driver_ref(ev_driver);
- gpr_log(GPR_DEBUG, "notify read on: %d",
- grpc_fd_wrapped_fd(fdn->grpc_fd));
- grpc_fd_notify_on_read(exec_ctx, fdn->grpc_fd, &fdn->read_closure);
+ gpr_log(GPR_DEBUG, "notify read on: %d", grpc_fd_wrapped_fd(fdn->fd));
+ grpc_fd_notify_on_read(exec_ctx, fdn->fd, &fdn->read_closure);
fdn->readable_registered = true;
}
// Register write_closure if the socket is writable and write_closure
@@ -284,9 +318,9 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
!fdn->writable_registered) {
gpr_log(GPR_DEBUG, "notify write on: %d",
- grpc_fd_wrapped_fd(fdn->grpc_fd));
+ grpc_fd_wrapped_fd(fdn->fd));
grpc_ares_ev_driver_ref(ev_driver);
- grpc_fd_notify_on_write(exec_ctx, fdn->grpc_fd, &fdn->write_closure);
+ grpc_fd_notify_on_write(exec_ctx, fdn->fd, &fdn->write_closure);
fdn->writable_registered = true;
}
gpr_mu_unlock(&fdn->mu);
@@ -299,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
while (ev_driver->fds != NULL) {
fd_node *cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
- fd_node_destroy(exec_ctx, cur);
+ fd_node_shutdown(exec_ctx, cur);
}
ev_driver->fds = new_list;
// If the ev driver has no working fd, all the tasks are done.
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index e65723a63b..04379975e1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -123,8 +123,8 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
static grpc_ares_hostbyname_request *create_hostbyname_request(
grpc_ares_request *parent_request, char *host, uint16_t port,
bool is_balancer) {
- grpc_ares_hostbyname_request *hr =
- gpr_zalloc(sizeof(grpc_ares_hostbyname_request));
+ grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc(
+ sizeof(grpc_ares_hostbyname_request));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
hr->port = port;
@@ -158,9 +158,9 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses =
- gpr_realloc((*lb_addresses)->addresses,
- sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
+ (*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
+ (*lb_addresses)->addresses,
+ sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
switch (hostent->h_addrtype) {
case AF_INET6: {
@@ -174,7 +174,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
hr->is_balancer /* is_balancer */,
- hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
+ hr->is_balancer ? hr->host : NULL /* balancer_name */,
NULL /* user_data */);
char output[INET6_ADDRSTRLEN];
ares_inet_ntop(AF_INET6, &addr.sin6_addr, output, INET6_ADDRSTRLEN);
@@ -195,7 +195,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
grpc_lb_addresses_set_address(
*lb_addresses, i, &addr, addr_len,
hr->is_balancer /* is_balancer */,
- hr->is_balancer ? strdup(hr->host) : NULL /* balancer_name */,
+ hr->is_balancer ? hr->host : NULL /* balancer_name */,
NULL /* user_data */);
char output[INET_ADDRSTRLEN];
ares_inet_ntop(AF_INET, &addr.sin_addr, output, INET_ADDRSTRLEN);
@@ -275,14 +275,15 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
gpr_log(GPR_DEBUG, "on_txt_done_cb");
char *error_msg;
grpc_ares_request *r = (grpc_ares_request *)arg;
+ const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
+ struct ares_txt_ext *result = NULL;
+ struct ares_txt_ext *reply = NULL;
+ grpc_error *error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
- struct ares_txt_ext *reply = NULL;
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail;
// Find service config in TXT record.
- const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
- struct ares_txt_ext *result;
for (result = reply; result != NULL; result = result->next) {
if (result->record_start &&
memcmp(result->txt, g_service_config_attribute_prefix, prefix_len) ==
@@ -293,12 +294,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
- *r->service_config_json_out = gpr_realloc(
+ *r->service_config_json_out = (char *)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@@ -313,7 +314,7 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
fail:
gpr_asprintf(&error_msg, "C-ares TXT lookup status is not ARES_SUCCESS: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@@ -331,6 +332,9 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
char **service_config_json) {
grpc_error *error = GRPC_ERROR_NONE;
+ grpc_ares_hostbyname_request *hr = NULL;
+ grpc_ares_request *r = NULL;
+ ares_channel *channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@@ -360,7 +364,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- grpc_ares_request *r = gpr_zalloc(sizeof(grpc_ares_request));
+ r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -368,7 +372,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
r->service_config_json_out = service_config_json;
r->success = false;
r->error = GRPC_ERROR_NONE;
- ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
// If dns_server is specified, use it.
if (dns_server != NULL) {
@@ -409,12 +413,12 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
}
gpr_ref_init(&r->pending_queries, 1);
if (grpc_ipv6_loopback_available()) {
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
- r, host, strhtons(port), false /* is_balancer */);
+ hr = create_hostbyname_request(r, host, strhtons(port),
+ false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr);
}
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
- r, host, strhtons(port), false /* is_balancer */);
+ hr = create_hostbyname_request(r, host, strhtons(port),
+ false /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr);
if (check_grpclb) {
/* Query the SRV record */
@@ -502,10 +506,11 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
- *resolved_addresses = gpr_zalloc(sizeof(grpc_resolved_addresses));
+ *resolved_addresses =
+ (grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = gpr_zalloc(sizeof(grpc_resolved_address) *
- (*resolved_addresses)->naddrs);
+ (*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
+ sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
memcpy(&(*resolved_addresses)->addrs[i],
@@ -525,7 +530,8 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
grpc_resolve_address_ares_request *r =
- gpr_zalloc(sizeof(grpc_resolve_address_ares_request));
+ (grpc_resolve_address_ares_request *)gpr_zalloc(
+ sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 108333047d..38fbea9aac 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_ares_request grpc_ares_request;
/* Asynchronously resolve \a name. Use \a default_port if a port isn't
@@ -65,5 +69,9 @@ grpc_error *grpc_ares_init(void);
it has been called the same number of times as grpc_ares_init(). */
void grpc_ares_cleanup(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index f2587c4520..f2587c4520 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 5ea75f0554..e669b6dfc7 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -16,6 +16,9 @@
*
*/
+#include <grpc/support/port_platform.h>
+
+#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -24,11 +27,11 @@
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
@@ -67,7 +70,7 @@ typedef struct {
grpc_timer retry_timer;
grpc_closure on_retry;
/** retry backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** currently resolving addresses */
grpc_resolved_addresses *addresses;
@@ -110,7 +113,7 @@ static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
if (!r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
}
@@ -123,7 +126,7 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
r->next_completion = on_complete;
r->target_result = target_result;
if (r->resolved_version == 0 && !r->resolving) {
- gpr_backoff_reset(&r->backoff_state);
+ grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
} else {
dns_maybe_finish_next_locked(exec_ctx, r);
@@ -150,6 +153,9 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
+ GRPC_ERROR_REF(error);
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(r->name_to_resolve));
if (r->addresses != NULL) {
grpc_lb_addresses *addresses = grpc_lb_addresses_create(
r->addresses->naddrs, NULL /* user_data_vtable */);
@@ -164,23 +170,21 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses_destroy(r->addresses);
grpc_lb_addresses_destroy(exec_ctx, addresses);
} else {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
- gpr_timespec timeout = gpr_time_sub(next_try, now);
+ grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
+ grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
- if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
- timeout.tv_nsec);
+ if (timeout > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
grpc_combiner_scheduler(r->base.combiner));
- grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now);
+ grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry);
}
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
@@ -188,6 +192,7 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
r->resolved_result = result;
r->resolved_version++;
dns_maybe_finish_next_locked(exec_ctx, r);
+ GRPC_ERROR_UNREF(error);
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
@@ -251,11 +256,11 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
- gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
- GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
- GRPC_DNS_RECONNECT_JITTER,
- GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
- GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
return &r->base;
}
@@ -289,7 +294,7 @@ static grpc_resolver_factory *dns_resolver_factory_create() {
return &dns_resolver_factory;
}
-void grpc_resolver_dns_native_init(void) {
+extern "C" void grpc_resolver_dns_native_init(void) {
char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
@@ -307,4 +312,4 @@ void grpc_resolver_dns_native_init(void) {
gpr_free(resolver);
}
-void grpc_resolver_dns_native_shutdown(void) {}
+extern "C" void grpc_resolver_dns_native_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index 3ff081a514..ed5b1011fb 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -159,7 +159,7 @@ typedef struct set_response_closure_arg {
static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- set_response_closure_arg* closure_arg = arg;
+ set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
grpc_fake_resolver_response_generator* generator = closure_arg->generator;
fake_resolver* r = generator->resolver;
if (r->next_results != NULL) {
@@ -178,7 +178,8 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
- set_response_closure_arg* closure_arg = gpr_zalloc(sizeof(*closure_arg));
+ set_response_closure_arg* closure_arg =
+ (set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
closure_arg->generator = generator;
closure_arg->next_response = grpc_channel_args_copy(next_response);
GRPC_CLOSURE_SCHED(exec_ctx,
@@ -209,7 +210,7 @@ grpc_arg grpc_fake_resolver_response_generator_arg(
grpc_fake_resolver_response_generator* generator) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
- arg.key = GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
+ arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
arg.value.pointer.p = generator;
arg.value.pointer.vtable = &response_generator_arg_vtable;
return arg;
@@ -257,8 +258,8 @@ static const grpc_resolver_factory_vtable fake_resolver_factory_vtable = {
static grpc_resolver_factory fake_resolver_factory = {
&fake_resolver_factory_vtable};
-void grpc_resolver_fake_init(void) {
+extern "C" void grpc_resolver_fake_init(void) {
grpc_register_resolver_type(&fake_resolver_factory);
}
-void grpc_resolver_fake_shutdown(void) {}
+extern "C" void grpc_resolver_fake_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index c084ef2a58..95c3bafed8 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -21,6 +21,10 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \
"grpc.fake_resolver.response_generator"
@@ -56,5 +60,9 @@ grpc_fake_resolver_response_generator_ref(
void grpc_fake_resolver_response_generator_unref(
grpc_fake_resolver_response_generator* generator);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \
*/
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index 7ceb8f40a1..dda9542325 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -211,7 +211,7 @@ DECL_FACTORY(unix);
DECL_FACTORY(ipv4);
DECL_FACTORY(ipv6);
-void grpc_resolver_sockaddr_init(void) {
+extern "C" void grpc_resolver_sockaddr_init(void) {
grpc_register_resolver_type(&ipv4_resolver_factory);
grpc_register_resolver_type(&ipv6_resolver_factory);
#ifdef GRPC_HAVE_UNIX_SOCKET
@@ -219,4 +219,4 @@ void grpc_resolver_sockaddr_init(void) {
#endif
}
-void grpc_resolver_sockaddr_shutdown(void) {}
+extern "C" void grpc_resolver_sockaddr_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver_factory.c b/src/core/ext/filters/client_channel/resolver_factory.cc
index 6f0a7c1e36..6f0a7c1e36 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.c
+++ b/src/core/ext/filters/client_channel/resolver_factory.cc
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index 6bd7929d4e..c8b2c58db3 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -24,6 +24,10 @@
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
@@ -67,4 +71,8 @@ grpc_resolver *grpc_resolver_factory_create_resolver(
char *grpc_resolver_factory_get_default_authority(
grpc_resolver_factory *factory, grpc_uri *uri);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/resolver_registry.c b/src/core/ext/filters/client_channel/resolver_registry.cc
index 1a0fb0bc3c..1a0fb0bc3c 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.c
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index 692490543d..06d0b99a35 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -22,6 +22,10 @@
#include "src/core/ext/filters/client_channel/resolver_factory.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
@@ -66,4 +70,8 @@ char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target);
char *grpc_resolver_factory_add_default_prefix_if_needed(
grpc_exec_ctx *exec_ctx, const char *target);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/retry_throttle.c b/src/core/ext/filters/client_channel/retry_throttle.cc
index 0c7a3ae651..09dcade089 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.c
+++ b/src/core/ext/filters/client_channel/retry_throttle.cc
@@ -99,7 +99,7 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
int max_milli_tokens, int milli_token_ratio,
grpc_server_retry_throttle_data* old_throttle_data) {
grpc_server_retry_throttle_data* throttle_data =
- gpr_malloc(sizeof(*throttle_data));
+ (grpc_server_retry_throttle_data*)gpr_malloc(sizeof(*throttle_data));
memset(throttle_data, 0, sizeof(*throttle_data));
gpr_ref_init(&throttle_data->refs, 1);
throttle_data->max_milli_tokens = max_milli_tokens;
@@ -131,20 +131,22 @@ static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
//
static void* copy_server_name(void* key, void* unused) {
- return gpr_strdup(key);
+ return gpr_strdup((const char*)key);
}
static long compare_server_name(void* key1, void* key2, void* unused) {
- return strcmp(key1, key2);
+ return strcmp((const char*)key1, (const char*)key2);
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
- grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data* throttle_data =
+ (grpc_server_retry_throttle_data*)value;
grpc_server_retry_throttle_data_unref(throttle_data);
}
static void* copy_server_retry_throttle_data(void* value, void* unused) {
- grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data* throttle_data =
+ (grpc_server_retry_throttle_data*)value;
return grpc_server_retry_throttle_data_ref(throttle_data);
}
@@ -175,7 +177,8 @@ grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio) {
gpr_mu_lock(&g_mu);
grpc_server_retry_throttle_data* throttle_data =
- gpr_avl_get(g_avl, (char*)server_name, NULL);
+ (grpc_server_retry_throttle_data*)gpr_avl_get(g_avl, (char*)server_name,
+ NULL);
if (throttle_data == NULL) {
// Entry not found. Create a new one.
throttle_data = grpc_server_retry_throttle_data_create(
diff --git a/src/core/ext/filters/client_channel/retry_throttle.h b/src/core/ext/filters/client_channel/retry_throttle.h
index bf99297e98..399383df78 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.h
+++ b/src/core/ext/filters/client_channel/retry_throttle.h
@@ -21,6 +21,10 @@
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Tracks retry throttling data for an individual server name.
typedef struct grpc_server_retry_throttle_data grpc_server_retry_throttle_data;
@@ -47,4 +51,8 @@ void grpc_retry_throttle_map_shutdown();
grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
const char* server_name, int max_milli_tokens, int milli_token_ratio);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.cc
index 5788819331..5710a22178 100644
--- a/src/core/ext/filters/client_channel/subchannel.c
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -18,6 +18,7 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
+#include <inttypes.h>
#include <limits.h>
#include <string.h>
@@ -30,13 +31,14 @@
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
+#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/support/backoff.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/connectivity_state.h"
@@ -116,9 +118,9 @@ struct grpc_subchannel {
external_state_watcher root_external_state_watcher;
/** next connect attempt time */
- gpr_timespec next_attempt;
+ grpc_millis next_attempt;
/** backoff state */
- gpr_backoff backoff_state;
+ grpc_backoff backoff_state;
/** do we have an active alarm? */
bool have_alarm;
/** have we started the backoff loop */
@@ -157,7 +159,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_connected_subchannel *c = arg;
+ grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
@@ -181,7 +183,7 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
@@ -290,21 +292,24 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return c;
}
- c = gpr_zalloc(sizeof(*c));
+ GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx);
+ c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = gpr_malloc(sizeof(grpc_channel_filter *) * c->num_filters);
+ c->filters = (const grpc_channel_filter **)gpr_malloc(
+ sizeof(grpc_channel_filter *) * c->num_filters);
memcpy((void *)c->filters, args->filters,
sizeof(grpc_channel_filter *) * c->num_filters);
} else {
c->filters = NULL;
}
c->pollset_set = grpc_pollset_set_create();
- grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
+ grpc_resolved_address *addr =
+ (grpc_resolved_address *)gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
grpc_resolved_address *new_address = NULL;
grpc_channel_args *new_args = NULL;
@@ -339,31 +344,27 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
"grpc.testing.fixed_reconnect_backoff_ms")) {
fixed_reconnect_backoff = true;
initial_backoff_ms = min_backoff_ms = max_backoff_ms =
- grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){initial_backoff_ms, 100, INT_MAX});
+ grpc_channel_arg_get_integer(&c->args->args[i],
+ {initial_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
min_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){min_backoff_ms, 100, INT_MAX});
+ &c->args->args[i], {min_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
max_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){max_backoff_ms, 100, INT_MAX});
+ &c->args->args[i], {max_backoff_ms, 100, INT_MAX});
} else if (0 == strcmp(c->args->args[i].key,
GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) {
fixed_reconnect_backoff = false;
initial_backoff_ms = grpc_channel_arg_get_integer(
- &c->args->args[i],
- (grpc_integer_options){initial_backoff_ms, 100, INT_MAX});
+ &c->args->args[i], {initial_backoff_ms, 100, INT_MAX});
}
}
}
- gpr_backoff_init(
+ grpc_backoff_init(
&c->backoff_state, initial_backoff_ms,
fixed_reconnect_backoff ? 1.0
: GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
@@ -400,7 +401,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- external_state_watcher *w = arg;
+ external_state_watcher *w = (external_state_watcher *)arg;
grpc_closure *follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
@@ -416,7 +417,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -427,8 +428,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
- c->next_attempt =
- gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC));
+ c->next_attempt = grpc_backoff_step(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
} else {
@@ -463,24 +463,22 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
c->connecting = true;
GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!c->backoff_begun) {
c->backoff_begun = true;
- c->next_attempt = gpr_backoff_begin(&c->backoff_state, now);
+ c->next_attempt = grpc_backoff_begin(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
} else {
GPR_ASSERT(!c->have_alarm);
c->have_alarm = true;
- gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now);
- if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <=
- 0) {
+ const grpc_millis time_til_next =
+ c->next_attempt - grpc_exec_ctx_now(exec_ctx);
+ if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Retry immediately");
} else {
- gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds",
- time_til_next.tv_sec, time_til_next.tv_nsec);
+ gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now);
+ grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm);
}
}
@@ -501,7 +499,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = gpr_malloc(sizeof(*w));
+ w = (external_state_watcher *)gpr_malloc(sizeof(*w));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -533,7 +531,7 @@ void grpc_connected_subchannel_process_transport_op(
static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
grpc_error *error) {
- state_watcher *sw = p;
+ state_watcher *sw = (state_watcher *)p;
grpc_subchannel *c = sw->subchannel;
gpr_mu *mu = &c->mu;
@@ -623,7 +621,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
@@ -660,7 +658,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_subchannel *c = arg;
+ grpc_subchannel *c = (grpc_subchannel *)arg;
grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
@@ -696,7 +694,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- grpc_subchannel_call *c = call;
+ grpc_subchannel_call *c = (grpc_subchannel_call *)call;
GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
@@ -724,20 +722,14 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call) {
- grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
- grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- return top_elem->filter->get_peer(exec_ctx, top_elem);
-}
-
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call,
- grpc_transport_stream_op_batch *op) {
+ grpc_transport_stream_op_batch *batch) {
GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
- top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, op);
+ GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
+ top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
}
@@ -756,17 +748,20 @@ grpc_error *grpc_connected_subchannel_create_call(
const grpc_connected_subchannel_call_args *args,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = gpr_arena_alloc(
+ *call = (grpc_subchannel_call *)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
- const grpc_call_element_args call_args = {.call_stack = callstk,
- .server_transport_data = NULL,
- .context = args->context,
- .path = args->path,
- .start_time = args->start_time,
- .deadline = args->deadline,
- .arena = args->arena};
+ const grpc_call_element_args call_args = {
+ callstk, /* call_stack */
+ NULL, /* server_transport_data */
+ args->context, /* context */
+ args->path, /* path */
+ args->start_time, /* start_time */
+ args->deadline, /* deadline */
+ args->arena, /* arena */
+ args->call_combiner /* call_combiner */
+ };
grpc_error *error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
@@ -811,6 +806,6 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
return grpc_channel_arg_string_create(
- GRPC_ARG_SUBCHANNEL_ADDRESS,
+ (char *)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 6d2abb04df..1cd73f3ff4 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -26,6 +26,10 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Channel arg containing a grpc_resolved_address to connect to.
#define GRPC_ARG_SUBCHANNEL_ADDRESS "grpc.subchannel_address"
@@ -103,9 +107,10 @@ typedef struct {
grpc_polling_entity *pollent;
grpc_slice path;
gpr_timespec start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
grpc_call_context_element *context;
+ grpc_call_combiner *call_combiner;
} grpc_connected_subchannel_call_args;
grpc_error *grpc_connected_subchannel_create_call(
@@ -122,8 +127,8 @@ void grpc_connected_subchannel_process_transport_op(
grpc_connectivity_state grpc_subchannel_check_connectivity(
grpc_subchannel *channel, grpc_error **error);
-/** call notify when the connectivity state of a channel changes from *state.
- Updates *state with the new state of the channel */
+/** Calls notify when the connectivity state of a channel becomes different
+ from *state. Updates *state with the new state of the channel. */
void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
@@ -150,10 +155,6 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op_batch *op);
-/** continue querying for peer */
-char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *subchannel_call);
-
/** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure(
@@ -191,4 +192,8 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args);
/// Caller is responsible for freeing the string.
grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H */
diff --git a/src/core/ext/filters/client_channel/subchannel_index.c b/src/core/ext/filters/client_channel/subchannel_index.cc
index ababd05d84..1f466ec0b8 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.c
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -34,6 +34,8 @@ static gpr_avl g_subchannel_index;
static gpr_mu g_mu;
+static gpr_refcount g_refcount;
+
struct grpc_subchannel_key {
grpc_subchannel_args args;
};
@@ -43,11 +45,11 @@ static bool g_force_creation = false;
static grpc_subchannel_key *create_key(
const grpc_subchannel_args *args,
grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
- grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
+ grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters =
- gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count);
+ k->args.filters = (const grpc_channel_filter **)gpr_malloc(
+ sizeof(*k->args.filters) * k->args.filter_count);
memcpy((grpc_channel_filter *)k->args.filters, args->filters,
sizeof(*k->args.filters) * k->args.filter_count);
} else {
@@ -88,46 +90,61 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
static void sck_avl_destroy(void *p, void *user_data) {
grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- grpc_subchannel_key_destroy(exec_ctx, p);
+ grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key *)p);
}
static void *sck_avl_copy(void *p, void *unused) {
- return subchannel_key_copy(p);
+ return subchannel_key_copy((grpc_subchannel_key *)p);
}
static long sck_avl_compare(void *a, void *b, void *unused) {
- return grpc_subchannel_key_compare(a, b);
+ return grpc_subchannel_key_compare((grpc_subchannel_key *)a,
+ (grpc_subchannel_key *)b);
}
static void scv_avl_destroy(void *p, void *user_data) {
grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, p, "subchannel_index");
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
+ "subchannel_index");
}
static void *scv_avl_copy(void *p, void *unused) {
- GRPC_SUBCHANNEL_WEAK_REF(p, "subchannel_index");
+ GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel *)p, "subchannel_index");
return p;
}
static const gpr_avl_vtable subchannel_avl_vtable = {
- .destroy_key = sck_avl_destroy,
- .copy_key = sck_avl_copy,
- .compare_keys = sck_avl_compare,
- .destroy_value = scv_avl_destroy,
- .copy_value = scv_avl_copy};
+ sck_avl_destroy, // destroy_key
+ sck_avl_copy, // copy_key
+ sck_avl_compare, // compare_keys
+ scv_avl_destroy, // destroy_value
+ scv_avl_copy // copy_value
+};
void grpc_subchannel_index_init(void) {
g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable);
gpr_mu_init(&g_mu);
+ gpr_ref_init(&g_refcount, 1);
}
void grpc_subchannel_index_shutdown(void) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- gpr_mu_destroy(&g_mu);
- gpr_avl_unref(g_subchannel_index, &exec_ctx);
- grpc_exec_ctx_finish(&exec_ctx);
+ // TODO(juanlishen): This refcounting mechanism may lead to memory leackage.
+ // To solve that, we should force polling to flush any pending callbacks, then
+ // shutdown safely.
+ grpc_subchannel_index_unref();
+}
+
+void grpc_subchannel_index_unref(void) {
+ if (gpr_unref(&g_refcount)) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ gpr_mu_destroy(&g_mu);
+ gpr_avl_unref(g_subchannel_index, &exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
}
+void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); }
+
grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
grpc_subchannel_key *key) {
// Lock, and take a reference to the subchannel index.
@@ -137,7 +154,7 @@ grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- gpr_avl_get(index, key, exec_ctx), "index_find");
+ (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;
@@ -159,7 +176,7 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = gpr_avl_get(index, key, exec_ctx);
+ c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != NULL) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -207,7 +224,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel *c = gpr_avl_get(index, key, exec_ctx);
+ grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != constructed) {
gpr_avl_unref(index, exec_ctx);
break;
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index 98d882a453..05c3878379 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -21,6 +21,10 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** \file Provides an index of active subchannels so that they can be
shared amongst channels */
@@ -59,6 +63,13 @@ void grpc_subchannel_index_init(void);
/** Shutdown the subchannel index (global) */
void grpc_subchannel_index_shutdown(void);
+/** Increment the refcount (non-zero) of subchannel index (global). */
+void grpc_subchannel_index_ref(void);
+
+/** Decrement the refcount of subchannel index (global). If the refcount drops
+ to zero, unref the subchannel index and destroy its mutex. */
+void grpc_subchannel_index_unref(void);
+
/** \em TEST ONLY.
* If \a force_creation is true, all key comparisons will be false, resulting in
* new subchannels always being created. Otherwise, the keys will be compared as
@@ -71,4 +82,8 @@ void grpc_subchannel_index_shutdown(void);
* force_creation set. */
void grpc_subchannel_index_test_only_set_force_creation(bool force_creation);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */
diff --git a/src/core/ext/filters/client_channel/uri_parser.c b/src/core/ext/filters/client_channel/uri_parser.cc
index e841928760..fb4fb8e694 100644
--- a/src/core/ext/filters/client_channel/uri_parser.c
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -45,7 +45,7 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = gpr_malloc(pfx_len + 1);
+ line_prefix = (char *)gpr_malloc(pfx_len + 1);
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -156,7 +156,8 @@ static void parse_query_parts(grpc_uri *uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
- uri->query_parts_values = gpr_malloc(uri->num_query_parts * sizeof(char **));
+ uri->query_parts_values =
+ (char **)gpr_malloc(uri->num_query_parts * sizeof(char **));
for (size_t i = 0; i < uri->num_query_parts; i++) {
char **query_param_parts;
size_t num_query_param_parts;
@@ -269,7 +270,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
fragment_end = i;
}
- uri = gpr_zalloc(sizeof(*uri));
+ uri = (grpc_uri *)gpr_zalloc(sizeof(*uri));
uri->scheme =
decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
uri->authority = decode_and_copy_component(exec_ctx, uri_text,
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
index 05ca2e00e4..e78da5928b 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/ext/filters/client_channel/uri_parser.h
@@ -22,6 +22,10 @@
#include <stddef.h>
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
char *scheme;
char *authority;
@@ -47,4 +51,8 @@ const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key);
/** destroy a uri */
void grpc_uri_destroy(grpc_uri *uri);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */
diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.cc
index 6789903c95..dc194ec068 100644
--- a/src/core/ext/filters/deadline/deadline_filter.c
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -34,75 +34,95 @@
// grpc_deadline_state
//
+// The on_complete callback used when sending a cancel_error batch down the
+// filter stack. Yields the call combiner when the batch returns.
+static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "got on_complete from cancel_stream batch");
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
+}
+
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
+static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
+ grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
+ deadline_state, grpc_schedule_on_exec_ctx));
+ batch->cancel_stream = true;
+ batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+}
+
// Timer callback.
static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
if (error != GRPC_ERROR_CANCELLED) {
- grpc_call_element_signal_error(
- exec_ctx, elem,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
+ grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
+ GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
+ send_cancel_op_in_call_combiner, elem,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &deadline_state->timer_callback, error,
+ "deadline exceeded -- sending cancel_stream op");
+ } else {
+ GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
+ "deadline_timer");
}
- GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
// Starts the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
- gpr_timespec deadline) {
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) {
+ grpc_millis deadline) {
+ if (deadline == GRPC_MILLIS_INF_FUTURE) {
return;
}
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
- grpc_deadline_timer_state cur_state;
grpc_closure* closure = NULL;
-retry:
- cur_state =
- (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
- switch (cur_state) {
+ switch (deadline_state->timer_state) {
case GRPC_DEADLINE_STATE_PENDING:
// Note: We do not start the timer if there is already a timer
return;
case GRPC_DEADLINE_STATE_FINISHED:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_FINISHED,
- GRPC_DEADLINE_STATE_PENDING)) {
- // If we've already created and destroyed a timer, we always create a
- // new closure: we have no other guarantee that the inlined closure is
- // not in use (it may hold a pending call to timer_callback)
- closure = GRPC_CLOSURE_CREATE(timer_callback, elem,
- grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ // If we've already created and destroyed a timer, we always create a
+ // new closure: we have no other guarantee that the inlined closure is
+ // not in use (it may hold a pending call to timer_callback)
+ closure =
+ GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx);
break;
case GRPC_DEADLINE_STATE_INITIAL:
- if (gpr_atm_rel_cas(&deadline_state->timer_state,
- GRPC_DEADLINE_STATE_INITIAL,
- GRPC_DEADLINE_STATE_PENDING)) {
- closure =
- GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
- elem, grpc_schedule_on_exec_ctx);
- } else {
- goto retry;
- }
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING;
+ closure =
+ GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback,
+ elem, grpc_schedule_on_exec_ctx);
break;
}
- GPR_ASSERT(closure);
+ GPR_ASSERT(closure != NULL);
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
- grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure);
}
// Cancels the deadline timer.
+// This is called via the call combiner, so access to deadline_state is
+// synchronized.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
- if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING,
- GRPC_DEADLINE_STATE_FINISHED)) {
+ if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
+ deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
} else {
// timer was either in STATE_INITAL (nothing to cancel)
@@ -131,26 +151,42 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
// Callback and associated state for starting the timer after call stack
// initialization has been completed.
struct start_timer_after_init_state {
+ bool in_call_combiner;
grpc_call_element* elem;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_closure closure;
};
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- struct start_timer_after_init_state* state = arg;
+ struct start_timer_after_init_state* state =
+ (struct start_timer_after_init_state*)arg;
+ grpc_deadline_state* deadline_state =
+ (grpc_deadline_state*)state->elem->call_data;
+ if (!state->in_call_combiner) {
+ // We are initially called without holding the call combiner, so we
+ // need to bounce ourselves into it.
+ state->in_call_combiner = true;
+ GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+ &state->closure, GRPC_ERROR_REF(error),
+ "scheduling deadline timer");
+ return;
+ }
start_timer_if_needed(exec_ctx, state->elem, state->deadline);
gpr_free(state);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+ "done scheduling deadline timer");
}
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
- gpr_timespec deadline) {
+ grpc_call_combiner* call_combiner,
+ grpc_millis deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
deadline_state->call_stack = call_stack;
+ deadline_state->call_combiner = call_combiner;
// Deadline will always be infinite on servers, so the timer will only be
// set on clients with a finite deadline.
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
- if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
+ if (deadline != GRPC_MILLIS_INF_FUTURE) {
// When the deadline passes, we indicate the failure by sending down
// an op with cancel_error set. However, we can't send down any ops
// until after the call stack is fully initialized. If we start the
@@ -158,7 +194,8 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
- struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
+ struct start_timer_after_init_state* state =
+ (struct start_timer_after_init_state*)gpr_zalloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
@@ -174,7 +211,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
}
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- gpr_timespec new_deadline) {
+ grpc_millis new_deadline) {
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
start_timer_if_needed(exec_ctx, elem, new_deadline);
@@ -232,7 +269,8 @@ typedef struct server_call_data {
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
const grpc_call_element_args* args) {
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline);
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
+ args->call_combiner, args->deadline);
return GRPC_ERROR_NONE;
}
@@ -310,7 +348,6 @@ const grpc_channel_filter grpc_client_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
@@ -325,7 +362,6 @@ const grpc_channel_filter grpc_server_deadline_filter = {
0, // sizeof(channel_data)
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"deadline",
};
@@ -346,7 +382,7 @@ static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx,
: true;
}
-void grpc_deadline_filter_init(void) {
+extern "C" void grpc_deadline_filter_init(void) {
grpc_channel_init_register_stage(
GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
@@ -355,4 +391,4 @@ void grpc_deadline_filter_init(void) {
maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
}
-void grpc_deadline_filter_shutdown(void) {}
+extern "C" void grpc_deadline_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 420bf7065a..e665dc53ee 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -20,6 +20,10 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/timer.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum grpc_deadline_timer_state {
GRPC_DEADLINE_STATE_INITIAL,
GRPC_DEADLINE_STATE_PENDING,
@@ -31,7 +35,8 @@ typedef enum grpc_deadline_timer_state {
typedef struct grpc_deadline_state {
// We take a reference to the call stack for the timer callback.
grpc_call_stack* call_stack;
- gpr_atm timer_state;
+ grpc_call_combiner* call_combiner;
+ grpc_deadline_timer_state timer_state;
grpc_timer timer;
grpc_closure timer_callback;
// Closure to invoke when the call is complete.
@@ -50,7 +55,9 @@ typedef struct grpc_deadline_state {
// assumes elem->call_data is zero'd
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_call_stack* call_stack,
- gpr_timespec deadline);
+ grpc_call_combiner* call_combiner,
+ grpc_millis deadline);
+
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem);
@@ -61,8 +68,10 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
- gpr_timespec new_deadline);
+ grpc_millis new_deadline);
// To be called from the client-side filter's start_transport_stream_op_batch()
// method. Ensures that the deadline timer is cancelled when the call
@@ -70,6 +79,8 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
//
// Note: It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
+//
+// Note: Must be called while holding the call combiner.
void grpc_deadline_state_client_start_transport_stream_op_batch(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op_batch* op);
@@ -83,4 +94,8 @@ bool grpc_deadline_checking_enabled(const grpc_channel_args* args);
extern const grpc_channel_filter grpc_client_deadline_filter;
extern const grpc_channel_filter grpc_server_deadline_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_DEADLINE_DEADLINE_FILTER_H */
diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.cc
index 3ca01a41b5..6208089f2e 100644
--- a/src/core/ext/filters/http/client/http_client_filter.c
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -36,6 +36,7 @@
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -138,8 +139,8 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *user_data, grpc_error *error) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -153,8 +154,8 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_error *error) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_trailing_metadata);
@@ -215,13 +216,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
return;
}
// There may or may not be more to read, but we don't care. If we got
@@ -233,7 +234,7 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
- char *payload_bytes = gpr_malloc(slice_buffer->length + 1);
+ char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1);
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@@ -299,10 +300,9 @@ static void remove_if_present(grpc_exec_ctx *exec_ctx,
static void hc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
- GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
if (batch->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -414,7 +414,7 @@ static void hc_start_transport_stream_op_batch(
done:
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
+ exec_ctx, calld->send_message_batch, error, calld->call_combiner);
} else if (!batch_will_be_handled_asynchronously) {
grpc_call_next_op(exec_ctx, elem, batch);
}
@@ -426,6 +426,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -535,7 +536,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
@@ -551,7 +552,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
}
@@ -565,6 +566,5 @@ const grpc_channel_filter grpc_http_client_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-client"};
diff --git a/src/core/ext/filters/http/client/http_client_filter.h b/src/core/ext/filters/http/client/http_client_filter.h
index ec8177c436..9ed8e76915 100644
--- a/src/core/ext/filters/http/client/http_client_filter.h
+++ b/src/core/ext/filters/http/client/http_client_filter.h
@@ -20,10 +20,18 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Processes metadata on the client side for HTTP2 transports */
extern const grpc_channel_filter grpc_http_client_filter;
/* Channel arg to determine maximum size of payload eligable for GET request */
#define GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET "grpc.max_payload_size_for_get"
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_HTTP_CLIENT_FILTER_H */
diff --git a/src/core/ext/filters/http/http_filters_plugin.c b/src/core/ext/filters/http/http_filters_plugin.cc
index a5c1b92054..8f5b856317 100644
--- a/src/core/ext/filters/http/http_filters_plugin.c
+++ b/src/core/ext/filters/http/http_filters_plugin.cc
@@ -44,7 +44,7 @@ static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder,
void *arg) {
if (!is_building_http_like_transport(builder)) return true;
- optional_filter *filtarg = arg;
+ optional_filter *filtarg = (optional_filter *)arg;
const grpc_channel_args *channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(
@@ -64,7 +64,7 @@ static bool maybe_add_required_filter(grpc_exec_ctx *exec_ctx,
: true;
}
-void grpc_http_filters_init(void) {
+extern "C" void grpc_http_filters_init(void) {
grpc_register_tracer(&grpc_compression_trace);
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
@@ -86,4 +86,4 @@ void grpc_http_filters_init(void) {
maybe_add_required_filter, (void *)&grpc_http_server_filter);
}
-void grpc_http_filters_shutdown(void) {}
+extern "C" void grpc_http_filters_shutdown(void) {}
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index a32819bfe4..f785e1355d 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.c
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -35,35 +35,29 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/static_metadata.h"
-#define INITIAL_METADATA_UNSEEN 0
-#define HAS_COMPRESSION_ALGORITHM 2
-#define NO_COMPRESSION_ALGORITHM 4
-
-#define CANCELLED_BIT ((gpr_atm)1)
+typedef enum {
+ // Initial metadata not yet seen.
+ INITIAL_METADATA_UNSEEN = 0,
+ // Initial metadata seen; compression algorithm set.
+ HAS_COMPRESSION_ALGORITHM,
+ // Initial metadata seen; no compression algorithm set.
+ NO_COMPRESSION_ALGORITHM,
+} initial_metadata_state;
typedef struct call_data {
- grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
grpc_linked_mdelem accept_stream_encoding_storage;
- uint32_t remaining_slice_bytes;
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
-
- /* Atomic recording the state of initial metadata; allowed values:
- INITIAL_METADATA_UNSEEN - initial metadata op not seen
- HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm
- set
- NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm
- set
- pointer - a stalled op containing a send_message that's waiting on initial
- metadata
- pointer | CANCELLED_BIT - request was cancelled with error pointed to */
- gpr_atm send_initial_metadata_state;
-
+ initial_metadata_state send_initial_metadata_state;
+ grpc_error *cancel_error;
+ grpc_closure start_send_message_batch_in_call_combiner;
grpc_transport_stream_op_batch *send_message_batch;
+ grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream;
grpc_closure *original_send_message_on_complete;
grpc_closure send_message_on_complete;
@@ -88,17 +82,17 @@ typedef struct channel_data {
static bool skip_compression(grpc_call_element *elem, uint32_t flags,
bool has_compression_algorithm) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
- return 1;
+ return true;
}
if (has_compression_algorithm) {
if (calld->compression_algorithm == GRPC_COMPRESS_NONE) {
- return 1;
+ return true;
}
- return 0; /* we have an actual call-specific algorithm */
+ return false; /* we have an actual call-specific algorithm */
}
/* no per-call compression override */
return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE;
@@ -112,8 +106,8 @@ static grpc_error *process_send_initial_metadata(
static grpc_error *process_send_initial_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) {
- call_data *calld = elem->call_data;
- channel_data *channeld = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
*has_compression_algorithm = false;
grpc_stream_compression_algorithm stream_compression_algorithm =
GRPC_STREAM_COMPRESS_NONE;
@@ -226,6 +220,18 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
+static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
+ call_data *calld = (call_data *)elem->call_data;
+ // Note: The call to grpc_call_next_op() results in yielding the
+ // call combiner, so we need to clear calld->send_message_batch
+ // before we do that.
+ grpc_transport_stream_op_batch *send_message_batch =
+ calld->send_message_batch;
+ calld->send_message_batch = NULL;
+ grpc_call_next_op(exec_ctx, elem, send_message_batch);
+}
+
static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
@@ -234,11 +240,11 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&tmp);
uint32_t send_flags =
calld->send_message_batch->payload->send_message.send_message->flags;
- const bool did_compress = grpc_msg_compress(
- exec_ctx, calld->compression_algorithm, &calld->slices, &tmp);
+ bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
+ &calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name;
+ const char *algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
@@ -252,7 +258,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name;
+ const char *algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG,
@@ -273,7 +279,19 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
calld->original_send_message_on_complete =
calld->send_message_batch->on_complete;
calld->send_message_batch->on_complete = &calld->send_message_on_complete;
- grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
+ send_message_batch_continue(exec_ctx, elem);
+}
+
+static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *arg,
+ grpc_error *error) {
+ call_data *calld = (call_data *)arg;
+ if (calld->send_message_batch != NULL) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
+ calld->call_combiner);
+ calld->send_message_batch = NULL;
+ }
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
@@ -293,21 +311,25 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
-static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
+static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem) {
call_data *calld = (call_data *)elem->call_data;
while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) {
grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) return error;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
break;
}
}
- return GRPC_ERROR_NONE;
}
// Async callback for grpc_byte_stream_next().
@@ -315,142 +337,118 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ return;
+ }
error = pull_slice_from_send_message(exec_ctx, calld);
- if (error != GRPC_ERROR_NONE) goto fail;
+ if (error != GRPC_ERROR_NONE) {
+ // Closure callback; does not take ownership of error.
+ fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+ GRPC_ERROR_UNREF(error);
+ return;
+ }
if (calld->slices.length ==
calld->send_message_batch->payload->send_message.send_message->length) {
finish_send_message(exec_ctx, elem);
} else {
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // this function again.
- error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) goto fail;
+ continue_reading_send_message(exec_ctx, elem);
}
- return;
-fail:
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
}
-static void start_send_message_batch(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch,
- bool has_compression_algorithm) {
+static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *unused) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- if (!skip_compression(elem, batch->payload->send_message.send_message->flags,
- has_compression_algorithm)) {
- calld->send_message_batch = batch;
- // This will either finish reading all of the data and invoke
- // finish_send_message(), or else it will make an async call to
- // grpc_byte_stream_next(), which will eventually result in calling
- // on_send_message_next_done().
- grpc_error *error = continue_reading_send_message(exec_ctx, elem);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, calld->send_message_batch, error);
- }
+ if (skip_compression(
+ elem,
+ calld->send_message_batch->payload->send_message.send_message->flags,
+ calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) {
+ send_message_batch_continue(exec_ctx, elem);
} else {
- /* pass control down the stack */
- grpc_call_next_op(exec_ctx, elem, batch);
+ continue_reading_send_message(exec_ctx, elem);
}
}
static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
-
+ call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
-
+ // Handle cancel_stream.
if (batch->cancel_stream) {
- // TODO(roth): As part of the upcoming call combiner work, change
- // this to call grpc_byte_stream_shutdown() on the incoming byte
- // stream, to cancel any in-flight calls to grpc_byte_stream_next().
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- gpr_atm cur = gpr_atm_full_xchg(
- &calld->send_initial_metadata_state,
- CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error);
- switch (cur) {
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- case INITIAL_METADATA_UNSEEN:
- break;
- default:
- if ((cur & CANCELLED_BIT) == 0) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, (grpc_transport_stream_op_batch *)cur,
- GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
- } else {
- GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT));
- }
- break;
+ GRPC_ERROR_UNREF(calld->cancel_error);
+ calld->cancel_error =
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
+ if (calld->send_message_batch != NULL) {
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_REF(calld->cancel_error), "failing send_message op");
+ } else {
+ grpc_byte_stream_shutdown(
+ exec_ctx,
+ calld->send_message_batch->payload->send_message.send_message,
+ GRPC_ERROR_REF(calld->cancel_error));
+ }
}
+ } else if (calld->cancel_error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(
+ exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error),
+ calld->call_combiner);
+ goto done;
}
-
+ // Handle send_initial_metadata.
if (batch->send_initial_metadata) {
+ GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm;
grpc_error *error = process_send_initial_metadata(
exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm);
if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- error);
- return;
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
+ goto done;
}
- gpr_atm cur;
- retry_send_im:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM &&
- cur != NO_COMPRESSION_ALGORITHM);
- if ((cur & CANCELLED_BIT) == 0) {
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- has_compression_algorithm
- ? HAS_COMPRESSION_ALGORITHM
- : NO_COMPRESSION_ALGORITHM)) {
- goto retry_send_im;
- }
- if (cur != INITIAL_METADATA_UNSEEN) {
- start_send_message_batch(exec_ctx, elem,
- (grpc_transport_stream_op_batch *)cur,
- has_compression_algorithm);
- }
+ calld->send_initial_metadata_state = has_compression_algorithm
+ ? HAS_COMPRESSION_ALGORITHM
+ : NO_COMPRESSION_ALGORITHM;
+ // If we had previously received a batch containing a send_message op,
+ // handle it now. Note that we need to re-enter the call combiner
+ // for this, since we can't send two batches down while holding the
+ // call combiner, since the connected_channel filter (at the bottom of
+ // the call stack) will release the call combiner for each batch it sees.
+ if (calld->send_message_batch != NULL) {
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, calld->call_combiner,
+ &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE,
+ "starting send_message after send_initial_metadata");
}
}
+ // Handle send_message.
if (batch->send_message) {
- gpr_atm cur;
- retry_send:
- cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
- switch (cur) {
- case INITIAL_METADATA_UNSEEN:
- if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
- (gpr_atm)batch)) {
- goto retry_send;
- }
- break;
- case HAS_COMPRESSION_ALGORITHM:
- case NO_COMPRESSION_ALGORITHM:
- start_send_message_batch(exec_ctx, elem, batch,
- cur == HAS_COMPRESSION_ALGORITHM);
- break;
- default:
- if (cur & CANCELLED_BIT) {
- grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, batch,
- GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT)));
- } else {
- /* >1 send_message concurrently */
- GPR_UNREACHABLE_CODE(break);
- }
+ GPR_ASSERT(calld->send_message_batch == NULL);
+ calld->send_message_batch = batch;
+ // If we have not yet seen send_initial_metadata, then we have to
+ // wait. We save the batch in calld and then drop the call
+ // combiner, which we'll have to pick up again later when we get
+ // send_initial_metadata.
+ if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
+ GRPC_CALL_COMBINER_STOP(
+ exec_ctx, calld->call_combiner,
+ "send_message batch pending send_initial_metadata");
+ goto done;
}
+ start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE);
} else {
- /* pass control down the stack */
+ // Pass control down the stack.
grpc_call_next_op(exec_ctx, elem, batch);
}
-
+done:
GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
}
@@ -458,16 +456,16 @@ static void compress_start_transport_stream_op_batch(
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
-
- /* initialize members */
+ call_data *calld = (call_data *)elem->call_data;
+ calld->call_combiner = args->call_combiner;
+ calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
+ GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
+ start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
elem, grpc_schedule_on_exec_ctx);
-
return GRPC_ERROR_NONE;
}
@@ -475,21 +473,16 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- /* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
- gpr_atm imstate =
- gpr_atm_no_barrier_load(&calld->send_initial_metadata_state);
- if (imstate & CANCELLED_BIT) {
- GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT));
- }
+ GRPC_ERROR_UNREF(calld->cancel_error);
}
/* Constructor for channel_data */
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *channeld = elem->channel_data;
+ channel_data *channeld = (channel_data *)elem->channel_data;
/* Configuration for message compression */
channeld->enabled_algorithms_bitset =
@@ -550,6 +543,5 @@ const grpc_channel_filter grpc_message_compress_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
- "compress"};
+ "message_compress"};
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.h b/src/core/ext/filters/http/message_compress/message_compress_filter.h
index c121a391eb..92771d9858 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.h
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.h
@@ -23,6 +23,10 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Compression filter for outgoing data.
*
* See <grpc/compression.h> for the available compression settings.
@@ -47,5 +51,9 @@
extern const grpc_channel_filter grpc_message_compress_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H \
*/
diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.cc
index b145f12aff..03958136b4 100644
--- a/src/core/ext/filters/http/server/http_server_filter.c
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -32,6 +32,8 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@@ -81,18 +83,18 @@ static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
}
static void add_error(const char *error_name, grpc_error **cumulative,
- grpc_error *new) {
- if (new == GRPC_ERROR_NONE) return;
+ grpc_error *new_err) {
+ if (new_err == GRPC_ERROR_NONE) return;
if (*cumulative == GRPC_ERROR_NONE) {
*cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name);
}
- *cumulative = grpc_error_add_child(*cumulative, new);
+ *cumulative = grpc_error_add_child(*cumulative, new_err);
}
static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_metadata_batch *b) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_error *error = GRPC_ERROR_NONE;
static const char *error_name = "Failed processing incoming headers";
@@ -261,8 +263,8 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -274,14 +276,18 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
- GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+ // Re-enter call combiner for recv_message_ready, since the surface
+ // code will release the call combiner for each callback it receives.
+ GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+ calld->recv_message_ready, GRPC_ERROR_REF(err),
+ "resuming recv_message_ready from on_complete");
calld->recv_message_ready = NULL;
calld->payload_bin_delivered = true;
}
@@ -290,20 +296,25 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (calld->seen_path_with_query) {
- /* do nothing. This is probably a GET request, and payload will be returned
- in hs_on_complete callback. */
+ // Do nothing. This is probably a GET request, and payload will be
+ // returned in hs_on_complete callback.
+ // Note that we release the call combiner here, so that other
+ // callbacks can run.
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "pausing recv_message_ready until on_complete");
} else {
GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
}
}
-static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->send_initial_metadata) {
grpc_error *error = GRPC_ERROR_NONE;
@@ -323,10 +334,7 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_initial_metadata.send_initial_metadata));
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
if (op->recv_initial_metadata) {
@@ -359,21 +367,25 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_error *error = server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata);
- if (error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
- return;
- }
+ if (error != GRPC_ERROR_NONE) return error;
}
+
+ return GRPC_ERROR_NONE;
}
-static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
- GPR_TIMER_BEGIN("hs_start_transport_op", 0);
- hs_mutate_op(exec_ctx, elem, op);
- grpc_call_next_op(exec_ctx, elem, op);
- GPR_TIMER_END("hs_start_transport_op", 0);
+static void hs_start_transport_stream_op_batch(
+ grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
+ grpc_transport_stream_op_batch *op) {
+ call_data *calld = (call_data *)elem->call_data;
+ GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
+ grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
+ calld->call_combiner);
+ } else {
+ grpc_call_next_op(exec_ctx, elem, op);
+ }
+ GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
}
/* Constructor for call_data */
@@ -381,8 +393,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
/* initialize members */
+ calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem,
@@ -397,7 +410,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}
@@ -414,7 +427,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
- hs_start_transport_op,
+ hs_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
init_call_elem,
@@ -423,6 +436,5 @@ const grpc_channel_filter grpc_http_server_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"http-server"};
diff --git a/src/core/ext/filters/http/server/http_server_filter.h b/src/core/ext/filters/http/server/http_server_filter.h
index c0f678a329..4b38cc5bf7 100644
--- a/src/core/ext/filters/http/server/http_server_filter.h
+++ b/src/core/ext/filters/http/server/http_server_filter.h
@@ -21,7 +21,15 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Processes metadata on the client side for HTTP2 transports */
extern const grpc_channel_filter grpc_http_server_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_HTTP_SERVER_HTTP_SERVER_FILTER_H */
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index 08474efb2e..ca8a3b2a13 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -24,8 +24,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-#include "src/core/ext/filters/load_reporting/load_reporting.h"
-#include "src/core/ext/filters/load_reporting/load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -56,8 +56,8 @@ typedef struct channel_data {
static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != NULL) {
@@ -88,7 +88,7 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -111,7 +111,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@@ -141,7 +141,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@@ -176,8 +176,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_mdelem md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@@ -189,7 +189,7 @@ static void lr_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
@@ -213,7 +213,7 @@ static void lr_start_transport_stream_op_batch(
GPR_TIMER_END("lr_start_transport_stream_op_batch", 0);
}
-const grpc_channel_filter grpc_load_reporting_filter = {
+const grpc_channel_filter grpc_server_load_reporting_filter = {
lr_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(call_data),
@@ -223,6 +223,5 @@ const grpc_channel_filter grpc_load_reporting_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"load_reporting"};
diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.h b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
index 1a5424e43a..94d19cc249 100644
--- a/src/core/ext/filters/load_reporting/load_reporting_filter.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
@@ -16,12 +16,21 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_FILTER_H
-#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_FILTER_H
+#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H
+#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H
-#include "src/core/ext/filters/load_reporting/load_reporting.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
#include "src/core/lib/channel/channel_stack.h"
-extern const grpc_channel_filter grpc_load_reporting_filter;
+#ifdef __cplusplus
+extern "C" {
+#endif
-#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_FILTER_H */
+extern const grpc_channel_filter grpc_server_load_reporting_filter;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H \
+ */
diff --git a/src/core/ext/filters/load_reporting/load_reporting.c b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
index 9745763c91..223fb3ee8b 100644
--- a/src/core/ext/filters/load_reporting/load_reporting.c
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
@@ -25,8 +25,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/sync.h>
-#include "src/core/ext/filters/load_reporting/load_reporting.h"
-#include "src/core/ext/filters/load_reporting/load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_filter.h"
+#include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/call.h"
@@ -37,28 +37,34 @@ static bool is_load_reporting_enabled(const grpc_channel_args *a) {
grpc_channel_args_find(a, GRPC_ARG_ENABLE_LOAD_REPORTING), false);
}
-static bool maybe_add_load_reporting_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
+static bool maybe_add_server_load_reporting_filter(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- if (is_load_reporting_enabled(args)) {
- return grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ const grpc_channel_filter *filter = (const grpc_channel_filter *)arg;
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator_find(builder, filter->name);
+ const bool already_has_load_reporting_filter =
+ !grpc_channel_stack_builder_iterator_is_end(it);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ if (is_load_reporting_enabled(args) && !already_has_load_reporting_filter) {
+ return grpc_channel_stack_builder_prepend_filter(builder, filter, NULL,
+ NULL);
}
return true;
}
grpc_arg grpc_load_reporting_enable_arg() {
- return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1);
+ return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING,
+ 1);
}
/* Plugin registration */
-void grpc_load_reporting_plugin_init(void) {
+extern "C" void grpc_server_load_reporting_plugin_init(void) {
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
- maybe_add_load_reporting_filter,
- (void *)&grpc_load_reporting_filter);
+ maybe_add_server_load_reporting_filter,
+ (void *)&grpc_server_load_reporting_filter);
}
-void grpc_load_reporting_plugin_shutdown() {}
+extern "C" void grpc_server_load_reporting_plugin_shutdown() {}
diff --git a/src/core/ext/filters/load_reporting/load_reporting.h b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
index fc04d2826a..65e254eb53 100644
--- a/src/core/ext/filters/load_reporting/load_reporting.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
@@ -16,13 +16,17 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_H
-#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_H
+#ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H
+#define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Identifiers for the invocation point of the users LR callback */
typedef enum grpc_load_reporting_source {
GRPC_LR_POINT_UNKNOWN = 0,
@@ -55,4 +59,9 @@ typedef struct grpc_load_reporting_call_data {
/** Return a \a grpc_arg enabling load reporting */
grpc_arg grpc_load_reporting_enable_arg();
-#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_LOAD_REPORTING_H */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H \
+ */
diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.cc
index 7d748b9c32..ade2e5bc82 100644
--- a/src/core/ext/filters/max_age/max_age_filter.c
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -33,9 +33,9 @@
#define MAX_CONNECTION_AGE_JITTER 0.1
#define MAX_CONNECTION_AGE_INTEGER_OPTIONS \
- (grpc_integer_options) { DEFAULT_MAX_CONNECTION_AGE_MS, 1, INT_MAX }
+ { DEFAULT_MAX_CONNECTION_AGE_MS, 1, INT_MAX }
#define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \
- (grpc_integer_options) { DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
+ { DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX }
typedef struct channel_data {
/* We take a reference to the channel stack for the timer callback */
@@ -56,11 +56,11 @@ typedef struct channel_data {
max_connection_idle */
grpc_timer max_idle_timer;
/* Allowed max time a channel may have no outstanding rpcs */
- gpr_timespec max_connection_idle;
+ grpc_millis max_connection_idle;
/* Allowed max time a channel may exist */
- gpr_timespec max_connection_age;
+ grpc_millis max_connection_age;
/* Allowed grace period after the channel reaches its max age */
- gpr_timespec max_connection_age_grace;
+ grpc_millis max_connection_age_grace;
/* Closure to run when the channel's idle duration reaches max_connection_idle
and should be closed gracefully */
grpc_closure close_max_idle_channel;
@@ -99,10 +99,9 @@ static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer");
- grpc_timer_init(
- exec_ctx, &chand->max_idle_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_idle),
- &chand->close_max_idle_channel, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &chand->max_idle_timer,
+ grpc_exec_ctx_now(exec_ctx) + chand->max_connection_idle,
+ &chand->close_max_idle_channel);
}
}
@@ -123,10 +122,9 @@ static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
- grpc_timer_init(
- exec_ctx, &chand->max_age_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_age),
- &chand->close_max_age_channel, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &chand->max_age_timer,
+ grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age,
+ &chand->close_max_age_channel);
gpr_mu_unlock(&chand->max_age_timer_mu);
grpc_transport_op* op = grpc_make_transport_op(NULL);
op->on_connectivity_state_change = &chand->channel_connectivity_changed,
@@ -144,11 +142,12 @@ static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true;
GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
- grpc_timer_init(exec_ctx, &chand->max_age_grace_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- chand->max_connection_age_grace),
- &chand->force_close_max_age_channel,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(
+ exec_ctx, &chand->max_age_grace_timer,
+ chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE
+ ? GRPC_MILLIS_INF_FUTURE
+ : grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age_grace,
+ &chand->force_close_max_age_channel);
gpr_mu_unlock(&chand->max_age_timer_mu);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
"max_age start_max_age_grace_timer_after_goaway_op");
@@ -249,7 +248,8 @@ static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg,
connection storms. Note that the MAX_CONNECTION_AGE option without jitter
would not create connection storms by itself, but if there happened to be a
connection storm it could cause it to repeat at a fixed period. */
-static int add_random_max_connection_age_jitter(int value) {
+static grpc_millis
+add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
/* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and
1 + MAX_CONNECTION_AGE_JITTER */
double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX +
@@ -257,7 +257,9 @@ static int add_random_max_connection_age_jitter(int value) {
double result = multiplier * value;
/* INT_MAX - 0.5 converts the value to float, so that result will not be
cast to int implicitly before the comparison. */
- return result > INT_MAX - 0.5 ? INT_MAX : (int)result;
+ return result > ((double)GRPC_MILLIS_INF_FUTURE) - 0.5
+ ? GRPC_MILLIS_INF_FUTURE
+ : (grpc_millis)result;
}
/* Constructor for call_data. */
@@ -273,7 +275,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
- channel_data* chand = elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
decrease_call_count(exec_ctx, chand);
}
@@ -287,46 +289,36 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
chand->max_age_grace_timer_pending = false;
chand->channel_stack = args->channel_stack;
chand->max_connection_age =
- DEFAULT_MAX_CONNECTION_AGE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(add_random_max_connection_age_jitter(
- DEFAULT_MAX_CONNECTION_AGE_MS),
- GPR_TIMESPAN);
+ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(
+ DEFAULT_MAX_CONNECTION_AGE_MS);
chand->max_connection_age_grace =
DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_GRACE_MS,
- GPR_TIMESPAN);
- chand->max_connection_idle =
- DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_IDLE_MS, GPR_TIMESPAN);
+ ? GRPC_MILLIS_INF_FUTURE
+ : DEFAULT_MAX_CONNECTION_AGE_GRACE_MS;
+ chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : DEFAULT_MAX_CONNECTION_IDLE_MS;
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS);
chand->max_connection_age =
- value == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(
- add_random_max_connection_age_jitter(value), GPR_TIMESPAN);
+ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(
+ value);
} else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0,
- INT_MAX});
+ {DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX});
chand->max_connection_age_grace =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_IDLE_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS);
chand->max_connection_idle =
- value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
}
}
GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel,
@@ -349,8 +341,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
channel_connectivity_changed, chand,
grpc_schedule_on_exec_ctx);
- if (gpr_time_cmp(chand->max_connection_age, gpr_inf_future(GPR_TIMESPAN)) !=
- 0) {
+ if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) {
/* When the channel reaches its max age, we send down an op with
goaway_error set. However, we can't send down any ops until after the
channel stack is fully initialized. If we start the timer here, we have
@@ -367,8 +358,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
/* Initialize the number of calls as 1, so that the max_idle_timer will not
start until start_max_idle_timer_after_init is invoked. */
gpr_atm_rel_store(&chand->call_count, 1);
- if (gpr_time_cmp(chand->max_connection_idle, gpr_inf_future(GPR_TIMESPAN)) !=
- 0) {
+ if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) {
GRPC_CHANNEL_STACK_REF(chand->channel_stack,
"max_age start_max_idle_timer_after_init");
GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init,
@@ -391,7 +381,6 @@ const grpc_channel_filter grpc_max_age_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"max_age"};
@@ -403,7 +392,7 @@ static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx,
bool enable =
grpc_channel_arg_get_integer(
grpc_channel_args_find(channel_args, GRPC_ARG_MAX_CONNECTION_AGE_MS),
- MAX_CONNECTION_AGE_INTEGER_OPTIONS) != INT_MAX &&
+ MAX_CONNECTION_AGE_INTEGER_OPTIONS) != INT_MAX ||
grpc_channel_arg_get_integer(
grpc_channel_args_find(channel_args, GRPC_ARG_MAX_CONNECTION_IDLE_MS),
MAX_CONNECTION_IDLE_INTEGER_OPTIONS) != INT_MAX;
@@ -415,10 +404,10 @@ static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx,
}
}
-void grpc_max_age_filter_init(void) {
+extern "C" void grpc_max_age_filter_init(void) {
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_max_age_filter, NULL);
}
-void grpc_max_age_filter_shutdown(void) {}
+extern "C" void grpc_max_age_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/max_age/max_age_filter.h b/src/core/ext/filters/max_age/max_age_filter.h
index 68fb4a4ca5..eeeefd695e 100644
--- a/src/core/ext/filters/max_age/max_age_filter.h
+++ b/src/core/ext/filters/max_age/max_age_filter.h
@@ -19,6 +19,14 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_max_age_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_MAX_AGE_MAX_AGE_FILTER_H */
diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.cc
index 846c7df69a..5dc131b9f6 100644
--- a/src/core/ext/filters/message_size/message_size_filter.c
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -68,6 +68,7 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
}
typedef struct call_data {
+ grpc_call_combiner* call_combiner;
message_size_limits limits;
// Receive closures are chained: we inject this closure as the
// recv_message_ready up-call on transport_stream_op, and remember to
@@ -131,7 +132,8 @@ static void start_transport_stream_op_batch(
exec_ctx, op,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_RESOURCE_EXHAUSTED));
+ GRPC_STATUS_RESOURCE_EXHAUSTED),
+ calld->call_combiner);
gpr_free(message_string);
return;
}
@@ -152,6 +154,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
const grpc_call_element_args* args) {
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
+ calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = NULL;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -259,7 +262,6 @@ const grpc_channel_filter grpc_message_size_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"message_size"};
@@ -286,7 +288,7 @@ static bool maybe_add_message_size_filter(grpc_exec_ctx* exec_ctx,
}
}
-void grpc_message_size_filter_init(void) {
+extern "C" void grpc_message_size_filter_init(void) {
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_message_size_filter, NULL);
@@ -298,4 +300,4 @@ void grpc_message_size_filter_init(void) {
maybe_add_message_size_filter, NULL);
}
-void grpc_message_size_filter_shutdown(void) {}
+extern "C" void grpc_message_size_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/message_size/message_size_filter.h b/src/core/ext/filters/message_size/message_size_filter.h
index d3667f7003..da325d6f89 100644
--- a/src/core/ext/filters/message_size/message_size_filter.h
+++ b/src/core/ext/filters/message_size/message_size_filter.h
@@ -19,6 +19,14 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_message_size_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H */
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
index b4d2cb4b8c..f77ed02421 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@@ -177,7 +177,6 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = {
0,
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"workaround_cronet_compression"};
@@ -197,7 +196,7 @@ static bool register_workaround_cronet_compression(
builder, &grpc_workaround_cronet_compression_filter, NULL, NULL);
}
-void grpc_workaround_cronet_compression_filter_init(void) {
+extern "C" void grpc_workaround_cronet_compression_filter_init(void) {
grpc_channel_init_register_stage(
GRPC_SERVER_CHANNEL, GRPC_WORKAROUND_PRIORITY_HIGH,
register_workaround_cronet_compression, NULL);
@@ -205,4 +204,4 @@ void grpc_workaround_cronet_compression_filter_init(void) {
parse_user_agent);
}
-void grpc_workaround_cronet_compression_filter_shutdown(void) {}
+extern "C" void grpc_workaround_cronet_compression_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h
index 9dae4f0734..c8b07df63e 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h
@@ -19,7 +19,15 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_workaround_cronet_compression_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_CRONET_COMPRESSION_FILTER_H \
*/
diff --git a/src/core/ext/filters/workarounds/workaround_utils.c b/src/core/ext/filters/workarounds/workaround_utils.cc
index e600fbee67..e600fbee67 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.c
+++ b/src/core/ext/filters/workarounds/workaround_utils.cc
diff --git a/src/core/ext/filters/workarounds/workaround_utils.h b/src/core/ext/filters/workarounds/workaround_utils.h
index 2ad7a876d5..3913cae6b2 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.h
+++ b/src/core/ext/filters/workarounds/workaround_utils.h
@@ -24,6 +24,10 @@
#define GRPC_WORKAROUND_PRIORITY_HIGH 10001
#define GRPC_WORKAROUND_PROIRITY_LOW 9999
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_workaround_user_agent_md {
bool workaround_active[GRPC_MAX_WORKAROUND_ID];
} grpc_workaround_user_agent_md;
@@ -34,4 +38,8 @@ typedef bool (*user_agent_parser)(grpc_mdelem);
void grpc_register_workaround(uint32_t id, user_agent_parser parser);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_UTILS_H */
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.c b/src/core/ext/transport/chttp2/alpn/alpn.cc
index ca2e801ec8..ca2e801ec8 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.c
+++ b/src/core/ext/transport/chttp2/alpn/alpn.cc
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.h b/src/core/ext/transport/chttp2/alpn/alpn.h
index 379af4b24c..99b928ea59 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.h
+++ b/src/core/ext/transport/chttp2/alpn/alpn.h
@@ -21,6 +21,10 @@
#include <string.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Retuns 1 if the version is supported, 0 otherwise. */
int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size);
@@ -31,4 +35,8 @@ size_t grpc_chttp2_num_alpn_versions(void);
* grpc_chttp2_num_alpn_versions()) */
const char *grpc_chttp2_get_alpn_version_index(size_t i);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H */
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
index 983691bbad..74839f2156 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
@@ -93,8 +93,8 @@ static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- chttp2_connector *c = args->user_data;
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ chttp2_connector *c = (chttp2_connector *)args->user_data;
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
@@ -115,6 +115,8 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
memset(c->result, 0, sizeof(*c->result));
} else {
+ grpc_endpoint_delete_from_pollset_set(exec_ctx, args->endpoint,
+ c->args.interested_parties);
c->result->transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 1);
GPR_ASSERT(c->result->transport);
@@ -136,6 +138,8 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
c->handshake_mgr = grpc_handshake_manager_create();
grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, c->args.channel_args,
c->handshake_mgr);
+ grpc_endpoint_add_to_pollset_set(exec_ctx, c->endpoint,
+ c->args.interested_parties);
grpc_handshake_manager_do_handshake(
exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args,
c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
@@ -143,7 +147,7 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- chttp2_connector *c = arg;
+ chttp2_connector *c = (chttp2_connector *)arg;
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting);
c->connecting = false;
@@ -161,7 +165,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
}
gpr_mu_unlock(&c->mu);
- chttp2_connector_unref(exec_ctx, arg);
+ chttp2_connector_unref(exec_ctx, (grpc_connector *)arg);
} else {
GPR_ASSERT(c->endpoint != NULL);
start_handshake_locked(exec_ctx, c);
@@ -198,7 +202,7 @@ static const grpc_connector_vtable chttp2_connector_vtable = {
chttp2_connector_connect};
grpc_connector *grpc_chttp2_connector_create() {
- chttp2_connector *c = gpr_zalloc(sizeof(*c));
+ chttp2_connector *c = (chttp2_connector *)gpr_zalloc(sizeof(*c));
c->base.vtable = &chttp2_connector_vtable;
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.h b/src/core/ext/transport/chttp2/client/chttp2_connector.h
index e258892cfc..63f264e0ef 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.h
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.h
@@ -19,8 +19,16 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "src/core/ext/filters/client_channel/connector.h"
grpc_connector* grpc_chttp2_connector_create();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H */
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
index cccb347bf1..6410a6043d 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
@@ -55,7 +55,7 @@ static grpc_channel *client_channel_factory_create_channel(
}
// Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create(
- GRPC_ARG_SERVER_URI,
+ (char *)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args =
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
index 0346d50b6c..dd88136f7b 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
@@ -42,7 +42,7 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
(target, fd, args));
grpc_arg default_authority_arg = grpc_channel_arg_string_create(
- GRPC_ARG_DEFAULT_AUTHORITY, "test.authority");
+ (char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority");
grpc_channel_args *final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
index d4580f15f5..fe296cf4ff 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
@@ -86,7 +86,8 @@ static grpc_subchannel_args *get_secure_naming_subchannel_args(
if (target_uri->path[0] != '\0') { // "path" may be empty
const grpc_slice key = grpc_slice_from_static_string(
target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path);
- const char *value = grpc_slice_hash_table_get(targets_info, key);
+ const char *value =
+ (const char *)grpc_slice_hash_table_get(targets_info, key);
if (value != NULL) target_name_to_check = gpr_strdup(value);
grpc_slice_unref_internal(exec_ctx, key);
}
@@ -127,7 +128,8 @@ static grpc_subchannel_args *get_secure_naming_subchannel_args(
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(exec_ctx, new_args_from_connector);
}
- grpc_subchannel_args *final_sc_args = gpr_malloc(sizeof(*final_sc_args));
+ grpc_subchannel_args *final_sc_args =
+ (grpc_subchannel_args *)gpr_malloc(sizeof(*final_sc_args));
memcpy(final_sc_args, args, sizeof(*args));
final_sc_args->args = new_args;
return final_sc_args;
@@ -164,7 +166,7 @@ static grpc_channel *client_channel_factory_create_channel(
}
// Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create(
- GRPC_ARG_SERVER_URI,
+ (char *)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
const char *to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args *new_args =
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.cc
index f207155900..7ac7f4ece8 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.cc
@@ -20,6 +20,7 @@
#include <grpc/grpc.h>
+#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -52,7 +53,7 @@ typedef struct {
} server_state;
typedef struct {
- server_state *server_state;
+ server_state *svr_state;
grpc_pollset *accepting_pollset;
grpc_tcp_server_acceptor *acceptor;
grpc_handshake_manager *handshake_mgr;
@@ -60,10 +61,11 @@ typedef struct {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- server_connection_state *connection_state = args->user_data;
- gpr_mu_lock(&connection_state->server_state->mu);
- if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ server_connection_state *connection_state =
+ (server_connection_state *)args->user_data;
+ gpr_mu_lock(&connection_state->svr_state->mu);
+ if (error != GRPC_ERROR_NONE || connection_state->svr_state->shutdown) {
const char *error_str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str);
@@ -88,7 +90,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport *transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0);
grpc_server_setup_transport(
- exec_ctx, connection_state->server_state->server, transport,
+ exec_ctx, connection_state->svr_state->server, transport,
connection_state->accepting_pollset, args->args);
grpc_chttp2_transport_start_reading(exec_ctx, transport,
args->read_buffer);
@@ -96,11 +98,11 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
}
}
grpc_handshake_manager_pending_list_remove(
- &connection_state->server_state->pending_handshake_mgrs,
+ &connection_state->svr_state->pending_handshake_mgrs,
connection_state->handshake_mgr);
- gpr_mu_unlock(&connection_state->server_state->mu);
+ gpr_mu_unlock(&connection_state->svr_state->mu);
grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
- grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
+ grpc_tcp_server_unref(exec_ctx, connection_state->svr_state->tcp_server);
gpr_free(connection_state->acceptor);
gpr_free(connection_state);
}
@@ -108,7 +110,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
if (state->shutdown) {
gpr_mu_unlock(&state->mu);
@@ -123,8 +125,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
server_connection_state *connection_state =
- gpr_malloc(sizeof(*connection_state));
- connection_state->server_state = state;
+ (server_connection_state *)gpr_malloc(sizeof(*connection_state));
+ connection_state->svr_state = state;
connection_state->accepting_pollset = accepting_pollset;
connection_state->acceptor = acceptor;
connection_state->handshake_mgr = handshake_mgr;
@@ -132,8 +134,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
connection_state->handshake_mgr);
// TODO(roth): We should really get this timeout value from channel
// args instead of hard-coding it.
- const gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(120, GPR_TIMESPAN));
+ const grpc_millis deadline =
+ grpc_exec_ctx_now(exec_ctx) + 120 * GPR_MS_PER_SEC;
grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr,
tcp, state->args, deadline, acceptor,
on_handshake_done, connection_state);
@@ -143,7 +145,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
void *arg, grpc_pollset **pollsets,
size_t pollset_count) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = false;
gpr_mu_unlock(&state->mu);
@@ -153,7 +155,7 @@ static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
grpc_closure *destroy_done = state->server_destroy_listener_done;
@@ -178,7 +180,7 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
grpc_server *server, void *arg,
grpc_closure *destroy_done) {
- server_state *state = arg;
+ server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = true;
state->server_destroy_listener_done = destroy_done;
@@ -200,6 +202,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
grpc_error *err = GRPC_ERROR_NONE;
server_state *state = NULL;
grpc_error **errors = NULL;
+ size_t naddrs = 0;
*port_num = -1;
@@ -208,7 +211,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state = gpr_zalloc(sizeof(*state));
+ state = (server_state *)gpr_zalloc(sizeof(*state));
GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
@@ -224,8 +227,8 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
state->shutdown = true;
gpr_mu_init(&state->mu);
- const size_t naddrs = resolved->naddrs;
- errors = gpr_malloc(sizeof(*errors) * naddrs);
+ naddrs = resolved->naddrs;
+ errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
errors[i] =
grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp);
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.h b/src/core/ext/transport/chttp2/server/chttp2_server.h
index ed968496f9..2ac155160f 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.h
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.h
@@ -23,10 +23,18 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Adds a port to \a server. Sets \a port_num to the port number.
/// Takes ownership of \a args.
grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
grpc_server *server, const char *addr,
grpc_channel_args *args, int *port_num);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H */
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
index d42b2d123e..d42b2d123e 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
index e647067f73..e647067f73 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
index 5ad63aaf1f..e74a138d23 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
@@ -40,6 +40,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_error *err = GRPC_ERROR_NONE;
grpc_server_security_connector *sc = NULL;
int port_num = 0;
+ grpc_security_status status;
+ grpc_channel_args *args = NULL;
GRPC_API_TRACE(
"grpc_server_add_secure_http2_port("
"server=%p, addr=%s, creds=%p)",
@@ -50,7 +52,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
"No credentials specified for secure server port (creds==NULL)");
goto done;
}
- grpc_security_status status =
+ status =
grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc);
if (status != GRPC_SECURITY_OK) {
char *msg;
@@ -66,7 +68,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_arg args_to_add[2];
args_to_add[0] = grpc_server_credentials_to_arg(creds);
args_to_add[1] = grpc_security_connector_to_arg(&sc->base);
- grpc_channel_args *args =
+ args =
grpc_channel_args_copy_and_add(grpc_server_get_channel_args(server),
args_to_add, GPR_ARRAY_SIZE(args_to_add));
// Add server port.
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.c b/src/core/ext/transport/chttp2/transport/bin_decoder.cc
index 5a99cbeffc..5a99cbeffc 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.c
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.cc
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.h b/src/core/ext/transport/chttp2/transport/bin_decoder.h
index 047b33d587..1c0b2b7e97 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.h
@@ -22,6 +22,10 @@
#include <grpc/slice.h>
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct grpc_base64_decode_context {
/* input/output: */
uint8_t *input_cur;
@@ -49,4 +53,8 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
grpc_slice input,
size_t output_length);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.c b/src/core/ext/transport/chttp2/transport/bin_encoder.cc
index 42d481b3c0..42d481b3c0 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.cc
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.h b/src/core/ext/transport/chttp2/transport/bin_encoder.h
index a8f36a345a..0be3633354 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.h
@@ -21,6 +21,10 @@
#include <grpc/slice.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* base64 encode a slice. Returns a new slice, does not take ownership of the
input */
grpc_slice grpc_chttp2_base64_encode(grpc_slice input);
@@ -36,4 +40,8 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input);
return y; */
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
index 78551df9c3..ac9ae5c395 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
@@ -20,12 +20,13 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/metadata.h"
-void grpc_chttp2_plugin_init(void) {
+extern "C" void grpc_chttp2_plugin_init(void) {
grpc_register_tracer(&grpc_http_trace);
grpc_register_tracer(&grpc_flowctl_trace);
+ grpc_register_tracer(&grpc_trace_http2_stream_state);
#ifndef NDEBUG
grpc_register_tracer(&grpc_trace_chttp2_refcount);
#endif
}
-void grpc_chttp2_plugin_shutdown(void) {}
+extern "C" void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 7541bd5c92..02fc53122d 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -18,6 +18,9 @@
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
+#include <grpc/support/port_platform.h>
+
+#include <inttypes.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
@@ -34,6 +37,7 @@
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/compression/stream_compression.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/timer.h"
@@ -50,7 +54,6 @@
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
-#define DEFAULT_WINDOW 65535
#define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024)
#define MAX_WINDOW 0x7fffffffu
#define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024)
@@ -63,6 +66,11 @@
#define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false
#define KEEPALIVE_TIME_BACKOFF_MULTIPLIER 2
+#define DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
+#define DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
+#define DEFAULT_MAX_PINGS_BETWEEN_DATA 0 /* unlimited */
+#define DEFAULT_MAX_PING_STRIKES 2
+
static int g_default_client_keepalive_time_ms =
DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
static int g_default_client_keepalive_timeout_ms =
@@ -74,6 +82,13 @@ static int g_default_server_keepalive_timeout_ms =
static bool g_default_keepalive_permit_without_calls =
DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
+static int g_default_min_sent_ping_interval_without_data_ms =
+ DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS;
+static int g_default_min_recv_ping_interval_without_data_ms =
+ DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS;
+static int g_default_max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA;
+static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
+
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http");
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl");
@@ -83,8 +98,6 @@ grpc_tracer_flag grpc_trace_chttp2_refcount =
GRPC_TRACER_INITIALIZER(false, "chttp2_refcount");
#endif
-static const grpc_transport_vtable vtable;
-
/* forward declarations of various callbacks that we'll build closures around */
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
@@ -138,25 +151,23 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
+static void schedule_bdp_ping_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t);
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
+static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx *exec_ctx,
+ void *tp, grpc_error *error);
static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error);
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type,
grpc_closure *on_initiate,
grpc_closure *on_complete);
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
-#define DEFAULT_MIN_TIME_BETWEEN_PINGS_MS 0
-#define DEFAULT_MAX_PINGS_BETWEEN_DATA 3
-#define DEFAULT_MAX_PING_STRIKES 2
-#define DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
-
/** keepalive-relevant functions */
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -210,6 +221,9 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
t->write_cb_pool = next;
}
+ t->flow_control.Destroy();
+
+ GRPC_ERROR_UNREF(t->closed_with_error);
gpr_free(t->ping_acks);
gpr_free(t->peer_string);
gpr_free(t);
@@ -247,6 +261,8 @@ void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
+static const grpc_transport_vtable *get_vtable(void);
+
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
grpc_endpoint *ep, bool is_client) {
@@ -256,7 +272,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
- t->base.vtable = &vtable;
+ t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
@@ -265,9 +281,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
- t->flow_control.remote_window = DEFAULT_WINDOW;
- t->flow_control.announced_window = DEFAULT_WINDOW;
- t->flow_control.t = t;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
@@ -292,6 +305,9 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
+ GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked,
+ next_bdp_ping_timer_expired_locked, t,
+ grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
t, grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
@@ -304,18 +320,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
- grpc_bdp_estimator_init(&t->flow_control.bdp_estimator, t->peer_string);
- t->flow_control.last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC);
- grpc_pid_controller_init(
- &t->flow_control.pid_controller,
- (grpc_pid_controller_args){.gain_p = 4,
- .gain_i = 8,
- .gain_d = 0,
- .initial_control_value = log2(DEFAULT_WINDOW),
- .min_control_value = -1,
- .max_control_value = 25,
- .integral_range = 10});
-
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
@@ -339,13 +343,11 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
window -- this should by rights be 0 */
t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
t->sent_local_settings = 0;
- t->write_buffer_size = DEFAULT_WINDOW;
- t->flow_control.enable_bdp_probe = true;
+ t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING));
- grpc_chttp2_initiate_write(exec_ctx, t, "initial_write");
}
/* configure http2 the way we like it */
@@ -354,50 +356,40 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
- queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- DEFAULT_WINDOW);
queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
- t->ping_policy = (grpc_chttp2_repeated_ping_policy){
- .max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA,
- .min_time_between_pings =
- gpr_time_from_millis(DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, GPR_TIMESPAN),
- .max_ping_strikes = DEFAULT_MAX_PING_STRIKES,
- .min_ping_interval_without_data = gpr_time_from_millis(
- DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, GPR_TIMESPAN),
- };
+ t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
+ t->ping_policy.min_sent_ping_interval_without_data =
+ g_default_min_sent_ping_interval_without_data_ms;
+ t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
+ t->ping_policy.min_recv_ping_interval_without_data =
+ g_default_min_recv_ping_interval_without_data_ms;
/* Keepalive setting */
if (t->is_client) {
- t->keepalive_time =
- g_default_client_keepalive_time_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_client_keepalive_time_ms,
- GPR_TIMESPAN);
- t->keepalive_timeout =
- g_default_client_keepalive_timeout_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_client_keepalive_timeout_ms,
- GPR_TIMESPAN);
+ t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_client_keepalive_time_ms;
+ t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_client_keepalive_timeout_ms;
} else {
- t->keepalive_time =
- g_default_server_keepalive_time_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_server_keepalive_time_ms,
- GPR_TIMESPAN);
- t->keepalive_timeout =
- g_default_server_keepalive_timeout_ms == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(g_default_server_keepalive_timeout_ms,
- GPR_TIMESPAN);
+ t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_server_keepalive_time_ms;
+ t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
+ ? GRPC_MILLIS_INF_FUTURE
+ : g_default_server_keepalive_timeout_ms;
}
t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls;
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
+ bool enable_bdp = true;
+
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
@@ -427,65 +419,62 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_PINGS_BETWEEN_DATA, 0, INT_MAX});
+ {g_default_max_pings_without_data, 0, INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
- &channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_PING_STRIKES, 0, INT_MAX});
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS)) {
- t->ping_policy.min_time_between_pings = gpr_time_from_millis(
+ &channel_args->args[i], {g_default_max_ping_strikes, 0, INT_MAX});
+ } else if (0 ==
+ strcmp(
+ channel_args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ t->ping_policy.min_sent_ping_interval_without_data =
grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, 0,
- INT_MAX}),
- GPR_TIMESPAN);
+ grpc_integer_options{
+ g_default_min_sent_ping_interval_without_data_ms, 0,
+ INT_MAX});
} else if (0 ==
- strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS)) {
- t->ping_policy.min_ping_interval_without_data = gpr_time_from_millis(
+ strcmp(
+ channel_args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ t->ping_policy.min_recv_ping_interval_without_data =
grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){
- DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, 0, INT_MAX}),
- GPR_TIMESPAN);
+ grpc_integer_options{
+ g_default_min_recv_ping_interval_without_data_ms, 0,
+ INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer(
- &channel_args->args[i],
- (grpc_integer_options){0, 0, MAX_WRITE_BUFFER_SIZE});
+ &channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE});
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
- t->flow_control.enable_bdp_probe = grpc_channel_arg_get_integer(
- &channel_args->args[i], (grpc_integer_options){1, 0, 1});
+ enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){t->is_client
- ? g_default_client_keepalive_time_ms
- : g_default_server_keepalive_time_ms,
- 1, INT_MAX});
- t->keepalive_time = value == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ grpc_integer_options{t->is_client
+ ? g_default_client_keepalive_time_ms
+ : g_default_server_keepalive_time_ms,
+ 1, INT_MAX});
+ t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){t->is_client
- ? g_default_client_keepalive_timeout_ms
- : g_default_server_keepalive_timeout_ms,
- 0, INT_MAX});
- t->keepalive_timeout = value == INT_MAX
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis(value, GPR_TIMESPAN);
+ grpc_integer_options{t->is_client
+ ? g_default_client_keepalive_timeout_ms
+ : g_default_server_keepalive_timeout_ms,
+ 0, INT_MAX});
+ t->keepalive_timeout =
+ value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls =
- (uint32_t)grpc_channel_arg_get_integer(
- &channel_args->args[i], (grpc_integer_options){0, 0, 1});
+ (uint32_t)grpc_channel_arg_get_integer(&channel_args->args[i],
+ {0, 0, 1});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_OPTIMIZATION_TARGET)) {
if (channel_args->args[i].type != GRPC_ARG_STRING) {
@@ -556,39 +545,44 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
- GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
- t->opt_target == GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT
- ? grpc_executor_scheduler
- : grpc_schedule_on_exec_ctx);
+ t->flow_control.Init(exec_ctx, t, enable_bdp);
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
+ /* No pings allowed before receiving a header or data frame. */
+ t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
- t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
/* Start keepalive pings */
- if (gpr_time_cmp(t->keepalive_time, gpr_inf_future(GPR_TIMESPAN)) != 0) {
+ if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
- grpc_chttp2_initiate_write(exec_ctx, t, "init");
+ if (enable_bdp) {
+ GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
+ schedule_bdp_ping_locked(exec_ctx, t);
+
+ grpc_chttp2_act_on_flowctl_action(
+ exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, NULL);
+ }
+
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
post_benign_reclaimer(exec_ctx, t);
}
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
@@ -609,7 +603,9 @@ static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_error *error) {
- if (!t->closed) {
+ end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error));
+ cancel_pings(exec_ctx, t, GRPC_ERROR_REF(error));
+ if (t->closed_with_error == GRPC_ERROR_NONE) {
if (!grpc_error_has_clear_grpc_status(error)) {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAVAILABLE);
@@ -624,10 +620,16 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_error_add_child(t->close_transport_on_writes_finished, error);
return;
}
- t->closed = 1;
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
+ t->closed_with_error = GRPC_ERROR_REF(error);
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
- grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
+ if (t->ping_state.is_delayed_ping_timer_set) {
+ grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer);
+ }
+ if (t->have_next_bdp_ping_timer) {
+ grpc_timer_cancel(exec_ctx, &t->next_bdp_ping_timer);
+ }
switch (t->keepalive_state) {
case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING:
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
@@ -647,8 +649,8 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close");
}
- end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error));
- cancel_pings(exec_ctx, t, GRPC_ERROR_REF(error));
+ GPR_ASSERT(t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE);
+ grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
@@ -688,12 +690,15 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
- s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ s->deadline = GRPC_MILLIS_INF_FUTURE;
GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_init(&s->frame_storage);
+ grpc_slice_buffer_init(&s->compressed_data_buffer);
+ grpc_slice_buffer_init(&s->decompressed_data_buffer);
s->pending_byte_stream = false;
+ s->decompressed_header_bytes = 0;
GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
grpc_combiner_scheduler(t->combiner));
@@ -706,7 +711,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
post_destructive_reclaimer(exec_ctx, t);
}
- s->flow_control.s = s;
+ s->flow_control.Init(t->flow_control.get(), s);
GPR_TIMER_END("init_stream", 0);
return 0;
@@ -714,7 +719,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
- grpc_chttp2_stream *s = sp;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
GPR_TIMER_BEGIN("destroy_stream", 0);
@@ -727,14 +732,8 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_slice_buffer_destroy_internal(exec_ctx,
&s->unprocessed_incoming_frames_buffer);
grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage);
- if (s->compressed_data_buffer) {
- grpc_slice_buffer_destroy_internal(exec_ctx, s->compressed_data_buffer);
- gpr_free(s->compressed_data_buffer);
- }
- if (s->decompressed_data_buffer) {
- grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer);
- gpr_free(s->decompressed_data_buffer);
- }
+ grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer);
+ grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer);
grpc_chttp2_list_remove_stalled_by_transport(t, s);
grpc_chttp2_list_remove_stalled_by_stream(t, s);
@@ -763,7 +762,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_ERROR_UNREF(s->write_closed_error);
GRPC_ERROR_UNREF(s->byte_stream_error);
- grpc_chttp2_flowctl_destroy_stream(&t->flow_control, &s->flow_control);
+ s->flow_control.Destroy();
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
@@ -798,7 +797,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id) {
- return grpc_chttp2_stream_map_find(&t->stream_map, id);
+ return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id);
}
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
@@ -850,13 +849,89 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
+static void inc_initiate_write_reason(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_initiate_write_reason reason) {
+ switch (reason) {
+ case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED(
+ exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx);
+ break;
+ case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
+ GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx);
+ break;
+ }
+}
+
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, const char *reason) {
+ grpc_chttp2_transport *t,
+ grpc_chttp2_initiate_write_reason reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
+ inc_initiate_write_reason(exec_ctx, reason);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+ grpc_chttp2_initiate_write_reason_string(reason));
+ t->is_first_write_in_batch = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_SCHED(
exec_ctx,
@@ -867,7 +942,7 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- reason);
+ grpc_chttp2_initiate_write_reason_string(reason));
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
break;
@@ -875,52 +950,98 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
-void grpc_chttp2_become_writable(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_chttp2_stream_write_type stream_write_type, const char *reason) {
- if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
+void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s) {
+ if (t->closed_with_error == GRPC_ERROR_NONE &&
+ grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
- switch (stream_write_type) {
- case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
- break;
- case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, reason);
- break;
- case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
- grpc_chttp2_initiate_write(exec_ctx, t, reason);
- break;
+}
+
+static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
+ bool early_results_scheduled,
+ bool partial_write) {
+ /* if it's not the first write in a batch, always offload to the executor:
+ we'll probably end up queuing against the kernel anyway, so we'll likely
+ get better latency overall if we switch writing work elsewhere and continue
+ with application work above */
+ if (!t->is_first_write_in_batch) {
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ }
+ /* equivalently, if it's a partial write, we *know* we're going to be taking a
+ thread jump to write it because of the above, may as well do so
+ immediately */
+ if (partial_write) {
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ }
+ switch (t->opt_target) {
+ case GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT:
+ /* executor gives us the largest probability of being able to batch a
+ * write with others on this transport */
+ return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
+ case GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY:
+ return grpc_schedule_on_exec_ctx;
+ }
+ GPR_UNREACHABLE_CODE(return NULL);
+}
+
+#define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i))
+static const char *begin_writing_desc(bool partial, bool inlined) {
+ switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) {
+ case WRITE_STATE_TUPLE_TO_INT(false, false):
+ return "begin write in background";
+ case WRITE_STATE_TUPLE_TO_INT(false, true):
+ return "begin write in current thread";
+ case WRITE_STATE_TUPLE_TO_INT(true, false):
+ return "begin partial write in background";
+ case WRITE_STATE_TUPLE_TO_INT(true, true):
+ return "begin partial write in current thread";
}
+ GPR_UNREACHABLE_CODE(return "bad state tuple");
}
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
- grpc_chttp2_transport *t = gt;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
- switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
- : grpc_chttp2_begin_write(exec_ctx, t)) {
- case GRPC_CHTTP2_NOTHING_TO_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
- "begin writing nothing");
- GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
- break;
- case GRPC_CHTTP2_PARTIAL_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
- "begin writing partial");
- GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
- break;
- case GRPC_CHTTP2_FULL_WRITE:
- set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "begin writing");
- GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
- break;
+ grpc_chttp2_begin_write_result r;
+ if (t->closed_with_error != GRPC_ERROR_NONE) {
+ r.writing = false;
+ } else {
+ r = grpc_chttp2_begin_write(exec_ctx, t);
+ }
+ if (r.writing) {
+ if (r.partial) {
+ GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx);
+ }
+ if (!t->is_first_write_in_batch) {
+ GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
+ }
+ grpc_closure_scheduler *scheduler =
+ write_scheduler(t, r.early_results_scheduled, r.partial);
+ if (scheduler != grpc_schedule_on_exec_ctx) {
+ GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
+ }
+ set_write_state(
+ exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
+ : GRPC_CHTTP2_WRITE_STATE_WRITING,
+ begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
+ write_action, t, scheduler),
+ GRPC_ERROR_NONE);
+ } else {
+ GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
+ set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
+ "begin writing nothing");
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
GPR_TIMER_END("write_action_begin_locked", 0);
}
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
- grpc_chttp2_transport *t = gt;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
@@ -932,7 +1053,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@@ -957,7 +1078,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
- "continue writing [!covered]");
+ "continue writing");
+ t->is_first_write_in_batch = false;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_RUN(
exec_ctx,
@@ -1009,14 +1131,12 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR,
"Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug "
"data equal to \"too_many_pings\"");
- double current_keepalive_time_ms =
- gpr_timespec_to_micros(t->keepalive_time) / 1000;
+ double current_keepalive_time_ms = (double)t->keepalive_time;
t->keepalive_time =
current_keepalive_time_ms > INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER
- ? gpr_inf_future(GPR_TIMESPAN)
- : gpr_time_from_millis((int64_t)(current_keepalive_time_ms *
- KEEPALIVE_TIME_BACKOFF_MULTIPLIER),
- GPR_TIMESPAN);
+ ? GRPC_MILLIS_INF_FUTURE
+ : (grpc_millis)(current_keepalive_time_ms *
+ KEEPALIVE_TIME_BACKOFF_MULTIPLIER);
}
/* lie: use transient failure from the transport to indicate goaway has been
@@ -1059,9 +1179,9 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "new_stream");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM);
}
/* cancel out streams that will never be started */
while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@@ -1110,12 +1230,14 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *errstr = grpc_error_string(error);
- gpr_log(GPR_DEBUG,
- "complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
- closure,
- (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
- (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
- desc, errstr);
+ gpr_log(
+ GPR_DEBUG,
+ "complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
+ "write_state=%s",
+ t, closure,
+ (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
+ (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), desc,
+ errstr, write_state_name(t->write_state));
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
@@ -1156,9 +1278,9 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s) {
if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) {
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "op.send_message");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE);
}
}
@@ -1190,15 +1312,19 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
} else {
grpc_chttp2_write_cb *cb = t->write_cb_pool;
if (cb == NULL) {
- cb = gpr_malloc(sizeof(*cb));
+ cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb));
} else {
t->write_cb_pool = cb->next;
}
cb->call_at_byte = notify_offset;
cb->closure = s->fetching_send_message_finished;
s->fetching_send_message_finished = NULL;
- cb->next = s->on_write_finished_cbs;
- s->on_write_finished_cbs = cb;
+ grpc_chttp2_write_cb **list =
+ s->fetching_send_message->flags & GRPC_WRITE_THROUGH
+ ? &s->on_write_finished_cbs
+ : &s->on_flow_controlled_cbs;
+ cb->next = *list;
+ *list = cb;
}
s->fetching_send_message = NULL;
return; /* early out */
@@ -1218,7 +1344,7 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error) {
- grpc_chttp2_stream *s = gs;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
grpc_chttp2_transport *t = s->t;
if (error == GRPC_ERROR_NONE) {
error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
@@ -1253,11 +1379,14 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
- grpc_transport_stream_op_batch *op = stream_op;
- grpc_chttp2_stream *s = op->handler_private.extra_arg;
+ grpc_transport_stream_op_batch *op =
+ (grpc_transport_stream_op_batch *)stream_op;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg;
grpc_transport_stream_op_batch_payload *op_payload = op->payload;
grpc_chttp2_transport *t = s->t;
+ GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx);
+
if (GRPC_TRACER_ON(grpc_http_trace)) {
char *str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
@@ -1291,20 +1420,25 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->cancel_stream) {
+ GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx);
grpc_chttp2_cancel_stream(exec_ctx, t, s,
op_payload->cancel_stream.cancel_error);
}
if (op->send_initial_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx);
GPR_ASSERT(s->send_initial_metadata_finished == NULL);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
/* Identify stream compression */
- if ((s->stream_compression_send_enabled =
- (op_payload->send_initial_metadata.send_initial_metadata->idx.named
- .content_encoding != NULL)) == true) {
- s->compressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
- grpc_slice_buffer_init(s->compressed_data_buffer);
+ if (op_payload->send_initial_metadata.send_initial_metadata->idx.named
+ .content_encoding == NULL ||
+ grpc_stream_compression_method_parse(
+ GRPC_MDVALUE(
+ op_payload->send_initial_metadata.send_initial_metadata->idx
+ .named.content_encoding->md),
+ true, &s->stream_compression_method) == 0) {
+ s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
}
s->send_initial_metadata_finished = add_closure_barrier(on_complete);
@@ -1316,8 +1450,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
if (t->is_client) {
- s->deadline =
- gpr_time_min(s->deadline, s->send_initial_metadata->deadline);
+ s->deadline = GPR_MIN(s->deadline, s->send_initial_metadata->deadline);
}
if (metadata_size > metadata_peer_limit) {
grpc_chttp2_cancel_stream(
@@ -1337,7 +1470,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (!s->write_closed) {
if (t->is_client) {
- if (!t->closed) {
+ if (t->closed_with_error == GRPC_ERROR_NONE) {
GPR_ASSERT(s->id == 0);
grpc_chttp2_list_add_waiting_for_concurrency(t, s);
maybe_start_some_streams(exec_ctx, t);
@@ -1345,20 +1478,19 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"),
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Transport closed", &t->closed_with_error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
GPR_ASSERT(s->id != 0);
- grpc_chttp2_stream_write_type write_type =
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED;
- if (op->send_message &&
- (op->payload->send_message.send_message->flags &
- GRPC_WRITE_BUFFER_HINT)) {
- write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ if (!(op->send_message &&
+ (op->payload->send_message.send_message->flags &
+ GRPC_WRITE_BUFFER_HINT))) {
+ grpc_chttp2_initiate_write(
+ exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA);
}
- grpc_chttp2_become_writable(exec_ctx, t, s, write_type,
- "op.send_initial_metadata");
}
} else {
s->send_initial_metadata = NULL;
@@ -1370,17 +1502,31 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"send_initial_metadata_finished");
}
}
+ if (op_payload->send_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
}
if (op->send_message) {
+ GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx);
+ GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(
+ exec_ctx, op->payload->send_message.send_message->length);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
if (s->write_closed) {
+ // Return an error unless the client has already received trailing
+ // metadata from the server, since an application using a
+ // streaming call might send another message before getting a
+ // recv_message failure, breaking out of its loop, and then
+ // starting recv_trailing_metadata.
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Attempt to send message after stream was closed",
- &s->write_closed_error, 1),
+ t->is_client && s->received_trailing_metadata
+ ? GRPC_ERROR_NONE
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Attempt to send message after stream was closed",
+ &s->write_closed_error, 1),
"fetching_send_message_finished");
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
@@ -1410,6 +1556,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
if (op->send_trailing_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx);
GPR_ASSERT(s->send_trailing_metadata_finished == NULL);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
@@ -1451,14 +1598,15 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "op.send_trailing_metadata");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA);
}
}
}
if (op->recv_initial_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx);
GPR_ASSERT(s->recv_initial_metadata_ready == NULL);
s->recv_initial_metadata_ready =
op_payload->recv_initial_metadata.recv_initial_metadata_ready;
@@ -1466,10 +1614,15 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
op_payload->recv_initial_metadata.recv_initial_metadata;
s->trailing_metadata_available =
op_payload->recv_initial_metadata.trailing_metadata_available;
+ if (op_payload->recv_initial_metadata.peer_string != NULL) {
+ gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
+ (gpr_atm)gpr_strdup(t->peer_string));
+ }
grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
}
if (op->recv_message) {
+ GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx);
size_t already_received;
GPR_ASSERT(s->recv_message_ready == NULL);
GPR_ASSERT(!s->pending_byte_stream);
@@ -1478,19 +1631,17 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (s->id != 0) {
if (!s->read_closed) {
already_received = s->frame_storage.length;
- grpc_chttp2_flowctl_incoming_bs_update(
- &t->flow_control, &s->flow_control, GRPC_HEADER_SIZE_IN_BYTES,
- already_received);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control),
- t, s);
+ s->flow_control->IncomingByteStreamUpdate(GRPC_HEADER_SIZE_IN_BYTES,
+ already_received);
+ grpc_chttp2_act_on_flowctl_action(exec_ctx,
+ s->flow_control->MakeAction(), t, s);
}
}
grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
}
if (op->recv_trailing_metadata) {
+ GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx);
GPR_ASSERT(s->recv_trailing_metadata_finished == NULL);
s->recv_trailing_metadata_finished = add_closure_barrier(on_complete);
s->recv_trailing_metadata =
@@ -1515,16 +1666,14 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
if (!t->is_client) {
if (op->send_initial_metadata) {
- gpr_timespec deadline =
+ grpc_millis deadline =
op->payload->send_initial_metadata.send_initial_metadata->deadline;
- GPR_ASSERT(0 ==
- gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
+ GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE);
}
if (op->send_trailing_metadata) {
- gpr_timespec deadline =
+ grpc_millis deadline =
op->payload->send_trailing_metadata.send_trailing_metadata->deadline;
- GPR_ASSERT(0 ==
- gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline));
+ GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE);
}
}
@@ -1548,39 +1697,43 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
/* callback remaining pings: they're not allowed to call into the transpot,
and maybe they hold resources that need to be freed */
- for (size_t i = 0; i < GRPC_CHTTP2_PING_TYPE_COUNT; i++) {
- grpc_chttp2_ping_queue *pq = &t->ping_queues[i];
- for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
- grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
- GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
- }
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
+ for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
+ grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
}
GRPC_ERROR_UNREF(error);
}
static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type,
grpc_closure *on_initiate, grpc_closure *on_ack) {
- grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
+ if (t->closed_with_error != GRPC_ERROR_NONE) {
+ GRPC_CLOSURE_SCHED(exec_ctx, on_initiate,
+ GRPC_ERROR_REF(t->closed_with_error));
+ GRPC_CLOSURE_SCHED(exec_ctx, on_ack, GRPC_ERROR_REF(t->closed_with_error));
+ return;
+ }
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate,
GRPC_ERROR_NONE);
- if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
- GRPC_ERROR_NONE)) {
- grpc_chttp2_initiate_write(exec_ctx, t, "send_ping");
- }
+ grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
+ GRPC_ERROR_NONE);
}
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
- grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
+ if (error == GRPC_ERROR_NONE) {
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
+ }
}
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint64_t id) {
- grpc_chttp2_ping_queue *pq =
- &t->ping_queues[id % GRPC_CHTTP2_PING_TYPE_COUNT];
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
if (pq->inflight_id != id) {
char *from = grpc_endpoint_get_peer(t->ep);
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64, from, id);
@@ -1589,7 +1742,8 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
- grpc_chttp2_initiate_write(exec_ctx, t, "continue_pings");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS);
}
}
@@ -1598,17 +1752,18 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error;
grpc_slice slice;
- grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL,
- &slice, &http_error);
+ grpc_error_get_status(exec_ctx, error, GRPC_MILLIS_INF_FUTURE, NULL, &slice,
+ &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
grpc_slice_ref_internal(slice), &t->qbuf);
- grpc_chttp2_initiate_write(exec_ctx, t, "goaway_sent");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
- gpr_log(GPR_DEBUG, "PING strike");
+ t->ping_recv_state.ping_strikes++;
if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes &&
t->ping_policy.max_ping_strikes != 0) {
send_goaway(exec_ctx, t,
@@ -1617,15 +1772,18 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
/*The transport will be closed after the write is done */
close_transport_locked(
- exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"));
+ exec_ctx, t, grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
}
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op,
grpc_error *error_ignored) {
- grpc_transport_op *op = stream_op;
- grpc_chttp2_transport *t = op->handler_private.extra_arg;
+ grpc_transport_op *op = (grpc_transport_op *)stream_op;
+ grpc_chttp2_transport *t =
+ (grpc_chttp2_transport *)op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
if (op->goaway_error) {
@@ -1647,8 +1805,9 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
}
if (op->send_ping) {
- send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL,
- op->send_ping);
+ send_ping_locked(exec_ctx, t, NULL, op->send_ping);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
}
if (op->on_connectivity_state_change != NULL) {
@@ -1724,20 +1883,22 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
&s->frame_storage);
s->unprocessed_incoming_frames_decompressed = false;
}
- if (s->stream_compression_recv_enabled &&
- !s->unprocessed_incoming_frames_decompressed) {
- GPR_ASSERT(s->decompressed_data_buffer->length == 0);
+ if (!s->unprocessed_incoming_frames_decompressed &&
+ s->stream_decompression_method !=
+ GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS) {
+ GPR_ASSERT(s->decompressed_data_buffer.length == 0);
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx =
grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_DECOMPRESS);
+ s->stream_decompression_method);
}
- if (!grpc_stream_decompress(s->stream_decompression_ctx,
- &s->unprocessed_incoming_frames_buffer,
- s->decompressed_data_buffer, NULL,
- GRPC_HEADER_SIZE_IN_BYTES,
- &end_of_context)) {
+ if (!grpc_stream_decompress(
+ s->stream_decompression_ctx,
+ &s->unprocessed_incoming_frames_buffer,
+ &s->decompressed_data_buffer, NULL,
+ GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes,
+ &end_of_context)) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
&s->frame_storage);
grpc_slice_buffer_reset_and_unref_internal(
@@ -1745,9 +1906,13 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Stream decompression error.");
} else {
+ s->decompressed_header_bytes += s->decompressed_data_buffer.length;
+ if (s->decompressed_header_bytes == GRPC_HEADER_SIZE_IN_BYTES) {
+ s->decompressed_header_bytes = 0;
+ }
error = grpc_deframe_unprocessed_incoming_frames(
- exec_ctx, &s->data_parser, s, s->decompressed_data_buffer, NULL,
- s->recv_message);
+ exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer,
+ NULL, s->recv_message);
if (end_of_context) {
grpc_stream_compression_context_destroy(
s->stream_decompression_ctx);
@@ -1796,15 +1961,14 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
bool pending_data = s->pending_byte_stream ||
s->unprocessed_incoming_frames_buffer.length > 0;
- if (s->stream_compression_recv_enabled && s->read_closed &&
- s->frame_storage.length > 0 && !pending_data && !s->seen_error &&
- s->recv_trailing_metadata_finished != NULL) {
+ if (s->read_closed && s->frame_storage.length > 0 && !pending_data &&
+ !s->seen_error && s->recv_trailing_metadata_finished != NULL) {
/* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and
* maybe decompress the next 5 bytes in the stream. */
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_DECOMPRESS);
+ s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->frame_storage,
@@ -1817,6 +1981,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
} else {
if (s->unprocessed_incoming_frames_buffer.length > 0) {
s->unprocessed_incoming_frames_decompressed = true;
+ pending_data = true;
}
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
@@ -1824,8 +1989,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
}
}
- if (s->read_closed && s->frame_storage.length == 0 &&
- (!pending_data || s->seen_error) &&
+ if (s->read_closed && s->frame_storage.length == 0 && !pending_data &&
s->recv_trailing_metadata_finished != NULL) {
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
@@ -1838,7 +2002,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
- grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
+ grpc_chttp2_stream *s =
+ (grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
if (t->incoming_stream == s) {
t->incoming_stream = NULL;
@@ -1889,11 +2054,13 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
if (!s->read_closed || !s->write_closed) {
if (s->id != 0) {
grpc_http2_error_code http_error;
- grpc_error_get_status(due_to_error, s->deadline, NULL, NULL, &http_error);
+ grpc_error_get_status(exec_ctx, due_to_error, s->deadline, NULL, NULL,
+ &http_error);
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
&s->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, t, "rst_stream");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
}
}
if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
@@ -1906,7 +2073,7 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_status_code status;
grpc_slice slice;
- grpc_error_get_status(error, s->deadline, &status, &slice, NULL);
+ grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, NULL);
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
@@ -1969,6 +2136,21 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
+static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_chttp2_write_cb **list,
+ grpc_error *error) {
+ while (*list) {
+ grpc_chttp2_write_cb *cb = *list;
+ *list = cb->next;
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
+ GRPC_ERROR_REF(error),
+ "on_write_finished_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
@@ -1988,16 +2170,9 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
- while (s->on_write_finished_cbs) {
- grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
- s->on_write_finished_cbs = cb->next;
- grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
- GRPC_ERROR_REF(error),
- "on_write_finished_cb");
- cb->next = t->write_cb_pool;
- t->write_cb_pool = cb;
- }
- GRPC_ERROR_UNREF(error);
+ flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs,
+ GRPC_ERROR_REF(error));
+ flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
}
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
@@ -2063,7 +2238,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_slice slice;
- grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL);
+ grpc_error_get_status(exec_ctx, error, s->deadline, &grpc_status, &slice,
+ NULL);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
@@ -2206,7 +2382,8 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
&s->stats.outgoing));
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
- grpc_chttp2_initiate_write(exec_ctx, t, "close_from_api");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
}
typedef struct {
@@ -2216,8 +2393,8 @@ typedef struct {
} cancel_stream_cb_args;
static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
- cancel_stream_cb_args *args = user_data;
- grpc_chttp2_stream *s = stream;
+ cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream;
grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
GRPC_ERROR_REF(args->error));
}
@@ -2233,56 +2410,44 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
* INPUT PROCESSING - PARSING
*/
-void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_flowctl_action action,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- switch (action.send_stream_update) {
- case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
- break;
- case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- "immediate stream flowctl");
- break;
- case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
- grpc_chttp2_become_writable(exec_ctx, t, s,
- GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
- "queue stream flowctl");
+template <class F>
+static void WithUrgency(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_core::chttp2::FlowControlAction::Urgency urgency,
+ grpc_chttp2_initiate_write_reason reason, F action) {
+ switch (urgency) {
+ case grpc_core::chttp2::FlowControlAction::Urgency::NO_ACTION_NEEDED:
break;
- }
- switch (action.send_transport_update) {
- case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
- break;
- case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- grpc_chttp2_initiate_write(exec_ctx, t, "immediate transport flowctl");
- break;
- // this is the same as no action b/c every time the transport enters the
- // writing path it will maybe do an update
- case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
+ case grpc_core::chttp2::FlowControlAction::Urgency::UPDATE_IMMEDIATELY:
+ grpc_chttp2_initiate_write(exec_ctx, t, reason);
+ // fallthrough
+ case grpc_core::chttp2::FlowControlAction::Urgency::QUEUE_UPDATE:
+ action();
break;
}
- if (action.send_setting_update != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
- if (action.initial_window_size > 0) {
- queue_setting_update(exec_ctx, t,
- GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
- (uint32_t)action.initial_window_size);
- }
- if (action.max_frame_size > 0) {
- queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
- (uint32_t)action.max_frame_size);
- }
- if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) {
- grpc_chttp2_initiate_write(exec_ctx, t, "immediate setting update");
- }
- }
- if (action.need_ping) {
- GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
- grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator);
- send_ping_locked(exec_ctx, t,
- GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
- &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked);
- }
+}
+
+void grpc_chttp2_act_on_flowctl_action(
+ grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s) {
+ WithUrgency(
+ exec_ctx, t, action.send_stream_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
+ [exec_ctx, t, s]() { grpc_chttp2_mark_stream_writable(exec_ctx, t, s); });
+ WithUrgency(exec_ctx, t, action.send_transport_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, []() {});
+ WithUrgency(exec_ctx, t, action.send_initial_window_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
+ [exec_ctx, t, &action]() {
+ queue_setting_update(exec_ctx, t,
+ GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ action.initial_window_size());
+ });
+ WithUrgency(
+ exec_ctx, t, action.send_max_frame_size_update(),
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [exec_ctx, t, &action]() {
+ queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+ action.max_frame_size());
+ });
}
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@@ -2319,7 +2484,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("reading_action_locked", 0);
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
GRPC_ERROR_REF(error);
@@ -2332,14 +2497,13 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
}
GPR_SWAP(grpc_error *, err, error);
GRPC_ERROR_UNREF(err);
- if (!t->closed) {
+ if (t->closed_with_error == GRPC_ERROR_NONE) {
GPR_TIMER_BEGIN("reading_action.parse", 0);
size_t i = 0;
grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
- grpc_bdp_estimator_add_incoming_bytes(
- &t->flow_control.bdp_estimator,
+ t->flow_control->bdp_estimator()->AddIncomingBytes(
(int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i]));
errors[1] =
grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]);
@@ -2356,29 +2520,31 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("reading_action.parse", 0);
GPR_TIMER_BEGIN("post_parse_locked", 0);
- if (t->flow_control.initial_window_update != 0) {
- if (t->flow_control.initial_window_update > 0) {
+ if (t->initial_window_update != 0) {
+ if (t->initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
- grpc_chttp2_become_writable(
- exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
- "unstalled");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
}
}
- t->flow_control.initial_window_update = 0;
+ t->initial_window_update = 0;
}
GPR_TIMER_END("post_parse_locked", 0);
}
GPR_TIMER_BEGIN("post_reading_action_locked", 0);
bool keep_reading = false;
- if (error == GRPC_ERROR_NONE && t->closed) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed");
+ if (error == GRPC_ERROR_NONE && t->closed_with_error != GRPC_ERROR_NONE) {
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Transport closed", &t->closed_with_error, 1);
}
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
t->endpoint_reading = 0;
- } else if (!t->closed) {
+ } else if (t->closed_with_error == GRPC_ERROR_NONE) {
keep_reading = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "keep_reading");
}
@@ -2387,9 +2553,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (keep_reading) {
grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
&t->read_action_locked);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx, grpc_chttp2_flowctl_get_bdp_action(&t->flow_control), t,
- NULL);
+ grpc_chttp2_act_on_flowctl_action(exec_ctx, t->flow_control->MakeAction(),
+ t, NULL);
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
} else {
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
@@ -2402,28 +2567,60 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("reading_action_locked", 0);
}
+// t is reffed prior to calling the first time, and once the callback chain
+// that kicks off finishes, it's unreffed
+static void schedule_bdp_ping_locked(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ t->flow_control->bdp_estimator()->SchedulePing();
+ send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
+ &t->finish_bdp_ping_locked);
+}
+
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
- gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string);
+ gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string,
+ grpc_error_string(error));
}
/* Reset the keepalive ping timer */
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING) {
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
}
- grpc_bdp_estimator_start_ping(&t->flow_control.bdp_estimator);
+ t->flow_control->bdp_estimator()->StartPing();
}
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
- gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
+ gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string,
+ grpc_error_string(error));
}
- grpc_bdp_estimator_complete_ping(&t->flow_control.bdp_estimator);
-
- GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
+ if (error != GRPC_ERROR_NONE) {
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
+ return;
+ }
+ grpc_millis next_ping =
+ t->flow_control->bdp_estimator()->CompletePing(exec_ctx);
+ grpc_chttp2_act_on_flowctl_action(
+ exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, nullptr);
+ GPR_ASSERT(!t->have_next_bdp_ping_timer);
+ t->have_next_bdp_ping_timer = true;
+ grpc_timer_init(exec_ctx, &t->next_bdp_ping_timer, next_ping,
+ &t->next_bdp_ping_timer_expired_locked);
+}
+
+static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx *exec_ctx,
+ void *tp, grpc_error *error) {
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+ GPR_ASSERT(t->have_next_bdp_ping_timer);
+ t->have_next_bdp_ping_timer = false;
+ if (error != GRPC_ERROR_NONE) {
+ GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
+ return;
+ }
+ schedule_bdp_ping_locked(exec_ctx, t);
}
void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
@@ -2433,9 +2630,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
for (i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
- &args->args[i],
- (grpc_integer_options){g_default_client_keepalive_time_ms, 1,
- INT_MAX});
+ &args->args[i], {g_default_client_keepalive_time_ms, 1, INT_MAX});
if (is_client) {
g_default_client_keepalive_time_ms = value;
} else {
@@ -2445,8 +2640,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer(
&args->args[i],
- (grpc_integer_options){g_default_client_keepalive_timeout_ms, 0,
- INT_MAX});
+ {g_default_client_keepalive_timeout_ms, 0, INT_MAX});
if (is_client) {
g_default_client_keepalive_timeout_ms = value;
} else {
@@ -2457,8 +2651,31 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
g_default_keepalive_permit_without_calls =
(uint32_t)grpc_channel_arg_get_integer(
&args->args[i],
- (grpc_integer_options){g_default_keepalive_permit_without_calls,
- 0, 1});
+ {g_default_keepalive_permit_without_calls, 0, 1});
+ } else if (0 ==
+ strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
+ g_default_max_ping_strikes = grpc_channel_arg_get_integer(
+ &args->args[i], {g_default_max_ping_strikes, 0, INT_MAX});
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
+ g_default_max_pings_without_data = grpc_channel_arg_get_integer(
+ &args->args[i], {g_default_max_pings_without_data, 0, INT_MAX});
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ g_default_min_sent_ping_interval_without_data_ms =
+ grpc_channel_arg_get_integer(
+ &args->args[i],
+ {g_default_min_sent_ping_interval_without_data_ms, 0, INT_MAX});
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ g_default_min_recv_ping_interval_without_data_ms =
+ grpc_channel_arg_get_integer(
+ &args->args[i],
+ {g_default_min_recv_ping_interval_without_data_ms, 0, INT_MAX});
}
}
}
@@ -2466,58 +2683,55 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
- if (t->destroying || t->closed) {
+ if (t->destroying || t->closed_with_error != GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
} else if (error == GRPC_ERROR_NONE) {
if (t->keepalive_permit_without_calls ||
grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
- send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE,
- &t->start_keepalive_ping_locked,
+ send_ping_locked(exec_ctx, t, &t->start_keepalive_ping_locked,
&t->finish_keepalive_ping_locked);
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
} else {
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
}
} else if (error == GRPC_ERROR_CANCELLED) {
/* The keepalive ping timer may be cancelled by bdp */
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping");
}
static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
- grpc_timer_init(
- exec_ctx, &t->keepalive_watchdog_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_timeout),
- &t->keepalive_watchdog_fired_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->keepalive_watchdog_fired_locked);
}
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
- grpc_timer_init(
- exec_ctx, &t->keepalive_ping_timer,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time),
- &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
+ grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+ &t->init_keepalive_ping_locked);
}
}
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end");
@@ -2525,12 +2739,15 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
- close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "keepalive watchdog timeout"));
+ close_transport_locked(
+ exec_ctx, t,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "keepalive watchdog timeout"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL));
}
} else {
/* The watchdog timer should have been cancelled by
@@ -2592,7 +2809,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_UNREF(s->byte_stream_error);
s->byte_stream_error = GRPC_ERROR_NONE;
grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error));
- s->byte_stream_error = error;
+ s->byte_stream_error = GRPC_ERROR_REF(error);
}
}
@@ -2606,19 +2823,17 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs = argp;
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)argp;
grpc_chttp2_transport *t = bs->transport;
grpc_chttp2_stream *s = bs->stream;
size_t cur_length = s->frame_storage.length;
if (!s->read_closed) {
- grpc_chttp2_flowctl_incoming_bs_update(&t->flow_control, &s->flow_control,
- bs->next_action.max_size_hint,
- cur_length);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx,
- grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control), t,
- s);
+ s->flow_control->IncomingByteStreamUpdate(bs->next_action.max_size_hint,
+ cur_length);
+ grpc_chttp2_act_on_flowctl_action(exec_ctx, s->flow_control->MakeAction(),
+ t, s);
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
if (s->frame_storage.length > 0) {
@@ -2689,24 +2904,23 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
grpc_error *error;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
- if (s->stream_compression_recv_enabled &&
- !s->unprocessed_incoming_frames_decompressed) {
+ if (!s->unprocessed_incoming_frames_decompressed) {
bool end_of_context;
if (!s->stream_decompression_ctx) {
s->stream_decompression_ctx = grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_DECOMPRESS);
+ s->stream_decompression_method);
}
if (!grpc_stream_decompress(s->stream_decompression_ctx,
&s->unprocessed_incoming_frames_buffer,
- s->decompressed_data_buffer, NULL, MAX_SIZE_T,
- &end_of_context)) {
+ &s->decompressed_data_buffer, NULL,
+ MAX_SIZE_T, &end_of_context)) {
error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error.");
return error;
}
GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer,
- s->decompressed_data_buffer);
+ &s->decompressed_data_buffer);
s->unprocessed_incoming_frames_decompressed = true;
if (end_of_context) {
grpc_stream_compression_context_destroy(s->stream_decompression_ctx);
@@ -2816,7 +3030,8 @@ static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs = byte_stream;
+ grpc_chttp2_incoming_byte_stream *bs =
+ (grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_chttp2_stream *s = bs->stream;
grpc_chttp2_transport *t = s->t;
@@ -2831,7 +3046,8 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
uint32_t frame_size, uint32_t flags) {
grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
- gpr_malloc(sizeof(*incoming_byte_stream));
+ (grpc_chttp2_incoming_byte_stream *)gpr_malloc(
+ sizeof(*incoming_byte_stream));
incoming_byte_stream->base.length = frame_size;
incoming_byte_stream->remaining_bytes = frame_size;
incoming_byte_stream->base.flags = flags;
@@ -2872,7 +3088,7 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
@@ -2902,11 +3118,12 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_chttp2_transport *t = arg;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
- grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
+ grpc_chttp2_stream *s =
+ (grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
@@ -2932,16 +3149,56 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
/*******************************************************************************
- * INTEGRATION GLUE
+ * MONITORING
*/
-static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
+const char *grpc_chttp2_initiate_write_reason_string(
+ grpc_chttp2_initiate_write_reason reason) {
+ switch (reason) {
+ case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
+ return "INITIAL_WRITE";
+ case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
+ return "START_NEW_STREAM";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
+ return "SEND_MESSAGE";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
+ return "SEND_INITIAL_METADATA";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
+ return "SEND_TRAILING_METADATA";
+ case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
+ return "RETRY_SEND_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
+ return "CONTINUE_PINGS";
+ case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
+ return "GOAWAY_SENT";
+ case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
+ return "RST_STREAM";
+ case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
+ return "CLOSE_FROM_API";
+ case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
+ return "STREAM_FLOW_CONTROL";
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
+ return "TRANSPORT_FLOW_CONTROL";
+ case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
+ return "SEND_SETTINGS";
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
+ return "FLOW_CONTROL_UNSTALLED_BY_SETTING";
+ case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
+ return "FLOW_CONTROL_UNSTALLED_BY_UPDATE";
+ case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
+ return "APPLICATION_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
+ return "KEEPALIVE_PING";
+ case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
+ return "TRANSPORT_FLOW_CONTROL_UNSTALLED";
+ case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
+ return "PING_RESPONSE";
+ case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
+ return "FORCE_RST_STREAM";
+ }
+ GPR_UNREACHABLE_CODE(return "unknown");
}
-/*******************************************************************************
- * MONITORING
- */
static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *t) {
return ((grpc_chttp2_transport *)t)->ep;
@@ -2956,13 +3213,15 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
perform_transport_op,
destroy_stream,
destroy_transport,
- chttp2_get_peer,
chttp2_get_endpoint};
+static const grpc_transport_vtable *get_vtable(void) { return &vtable; }
+
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client) {
- grpc_chttp2_transport *t = gpr_zalloc(sizeof(grpc_chttp2_transport));
+ grpc_chttp2_transport *t =
+ (grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 0c4e2a91c0..321fca4c82 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -23,8 +23,13 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
+extern grpc_tracer_flag grpc_trace_http2_stream_state;
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_chttp2_refcount;
@@ -40,4 +45,8 @@ void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
grpc_slice_buffer *read_buffer);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H */
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.c b/src/core/ext/transport/chttp2/transport/flow_control.c
deleted file mode 100644
index cec99f6fb6..0000000000
--- a/src/core/ext/transport/chttp2/transport/flow_control.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/transport/chttp2/transport/internal.h"
-
-#include <math.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/support/string.h"
-
-static uint32_t grpc_chttp2_target_announced_window(
- const grpc_chttp2_transport_flowctl* tfc);
-
-#ifndef NDEBUG
-
-typedef struct {
- int64_t remote_window;
- int64_t target_window;
- int64_t announced_window;
- int64_t remote_window_delta;
- int64_t local_window_delta;
- int64_t announced_window_delta;
- uint32_t local_init_window;
- uint32_t local_max_frame;
-} shadow_flow_control;
-
-static void pretrace(shadow_flow_control* shadow_fc,
- grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- shadow_fc->remote_window = tfc->remote_window;
- shadow_fc->target_window = grpc_chttp2_target_announced_window(tfc);
- shadow_fc->announced_window = tfc->announced_window;
- if (sfc != NULL) {
- shadow_fc->remote_window_delta = sfc->remote_window_delta;
- shadow_fc->local_window_delta = sfc->local_window_delta;
- shadow_fc->announced_window_delta = sfc->announced_window_delta;
- }
-}
-
-#define TRACE_PADDING 30
-
-static char* fmt_int64_diff_str(int64_t old, int64_t new) {
- char* str;
- if (old != new) {
- gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old, new);
- } else {
- gpr_asprintf(&str, "%" PRId64 "", old);
- }
- char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
- gpr_free(str);
- return str_lp;
-}
-
-static char* fmt_uint32_diff_str(uint32_t old, uint32_t new) {
- char* str;
- if (new > 0 && old != new) {
- gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old, new);
- } else {
- gpr_asprintf(&str, "%" PRIu32 "", old);
- }
- char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING);
- gpr_free(str);
- return str_lp;
-}
-
-static void posttrace(shadow_flow_control* shadow_fc,
- grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc, char* reason) {
- uint32_t acked_local_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- uint32_t remote_window =
- tfc->t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- char* trw_str =
- fmt_int64_diff_str(shadow_fc->remote_window, tfc->remote_window);
- char* tlw_str = fmt_int64_diff_str(shadow_fc->target_window,
- grpc_chttp2_target_announced_window(tfc));
- char* taw_str =
- fmt_int64_diff_str(shadow_fc->announced_window, tfc->announced_window);
- char* srw_str;
- char* slw_str;
- char* saw_str;
- if (sfc != NULL) {
- srw_str = fmt_int64_diff_str(shadow_fc->remote_window_delta + remote_window,
- sfc->remote_window_delta + remote_window);
- slw_str =
- fmt_int64_diff_str(shadow_fc->local_window_delta + acked_local_window,
- sfc->local_window_delta + acked_local_window);
- saw_str = fmt_int64_diff_str(
- shadow_fc->announced_window_delta + acked_local_window,
- sfc->announced_window_delta + acked_local_window);
- } else {
- srw_str = gpr_leftpad("", ' ', TRACE_PADDING);
- slw_str = gpr_leftpad("", ' ', TRACE_PADDING);
- saw_str = gpr_leftpad("", ' ', TRACE_PADDING);
- }
- gpr_log(GPR_DEBUG,
- "%p[%u][%s] | %s | trw:%s, ttw:%s, taw:%s, srw:%s, slw:%s, saw:%s",
- tfc, sfc != NULL ? sfc->s->id : 0, tfc->t->is_client ? "cli" : "svr",
- reason, trw_str, tlw_str, taw_str, srw_str, slw_str, saw_str);
- gpr_free(trw_str);
- gpr_free(tlw_str);
- gpr_free(taw_str);
- gpr_free(srw_str);
- gpr_free(slw_str);
- gpr_free(saw_str);
-}
-
-static char* urgency_to_string(grpc_chttp2_flowctl_urgency urgency) {
- switch (urgency) {
- case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
- return "no action";
- case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
- return "update immediately";
- case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
- return "queue update";
- default:
- GPR_UNREACHABLE_CODE(return "unknown");
- }
- GPR_UNREACHABLE_CODE(return "unknown");
-}
-
-static void trace_action(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_flowctl_action action) {
- char* iw_str = fmt_uint32_diff_str(
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
- action.initial_window_size);
- char* mf_str = fmt_uint32_diff_str(
- tfc->t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- action.max_frame_size);
- gpr_log(GPR_DEBUG, "t[%s], s[%s], settings[%s] iw:%s mf:%s",
- urgency_to_string(action.send_transport_update),
- urgency_to_string(action.send_stream_update),
- urgency_to_string(action.send_setting_update), iw_str, mf_str);
- gpr_free(iw_str);
- gpr_free(mf_str);
-}
-
-#define PRETRACE(tfc, sfc) \
- shadow_flow_control shadow_fc; \
- GRPC_FLOW_CONTROL_IF_TRACING(pretrace(&shadow_fc, tfc, sfc))
-#define POSTTRACE(tfc, sfc, reason) \
- GRPC_FLOW_CONTROL_IF_TRACING(posttrace(&shadow_fc, tfc, sfc, reason))
-#define TRACEACTION(tfc, action) \
- GRPC_FLOW_CONTROL_IF_TRACING(trace_action(tfc, action))
-#else
-#define PRETRACE(tfc, sfc)
-#define POSTTRACE(tfc, sfc, reason)
-#define TRACEACTION(tfc, action)
-#endif
-
-/* How many bytes of incoming flow control would we like to advertise */
-static uint32_t grpc_chttp2_target_announced_window(
- const grpc_chttp2_transport_flowctl* tfc) {
- return (uint32_t)GPR_MIN(
- (int64_t)((1u << 31) - 1),
- tfc->announced_stream_total_over_incoming_window +
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
-}
-
-// we have sent data on the wire, we must track this in our bookkeeping for the
-// remote peer's flow control.
-void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- int64_t size) {
- PRETRACE(tfc, sfc);
- tfc->remote_window -= size;
- sfc->remote_window_delta -= size;
- POSTTRACE(tfc, sfc, " data sent");
-}
-
-static void announced_window_delta_preupdate(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- if (sfc->announced_window_delta > 0) {
- tfc->announced_stream_total_over_incoming_window -=
- sfc->announced_window_delta;
- } else {
- tfc->announced_stream_total_under_incoming_window +=
- -sfc->announced_window_delta;
- }
-}
-
-static void announced_window_delta_postupdate(
- grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
- if (sfc->announced_window_delta > 0) {
- tfc->announced_stream_total_over_incoming_window +=
- sfc->announced_window_delta;
- } else {
- tfc->announced_stream_total_under_incoming_window -=
- -sfc->announced_window_delta;
- }
-}
-
-// We have received data from the wire. We must track this in our own flow
-// control bookkeeping.
-// Returns an error if the incoming frame violates our flow control.
-grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- int64_t incoming_frame_size) {
- uint32_t sent_init_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- uint32_t acked_init_window =
- tfc->t->settings[GRPC_ACKED_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- PRETRACE(tfc, sfc);
- if (incoming_frame_size > tfc->announced_window) {
- char* msg;
- gpr_asprintf(&msg,
- "frame of size %" PRId64 " overflows local window of %" PRId64,
- incoming_frame_size, tfc->announced_window);
- grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
- gpr_free(msg);
- return err;
- }
-
- if (sfc != NULL) {
- int64_t acked_stream_window =
- sfc->announced_window_delta + acked_init_window;
- int64_t sent_stream_window = sfc->announced_window_delta + sent_init_window;
- if (incoming_frame_size > acked_stream_window) {
- if (incoming_frame_size <= sent_stream_window) {
- gpr_log(
- GPR_ERROR,
- "Incoming frame of size %" PRId64
- " exceeds local window size of %" PRId64
- ".\n"
- "The (un-acked, future) window size would be %" PRId64
- " which is not exceeded.\n"
- "This would usually cause a disconnection, but allowing it due to"
- "broken HTTP2 implementations in the wild.\n"
- "See (for example) https://github.com/netty/netty/issues/6520.",
- incoming_frame_size, acked_stream_window, sent_stream_window);
- } else {
- char* msg;
- gpr_asprintf(&msg, "frame of size %" PRId64
- " overflows local window of %" PRId64,
- incoming_frame_size, acked_stream_window);
- grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
- gpr_free(msg);
- return err;
- }
- }
-
- announced_window_delta_preupdate(tfc, sfc);
- sfc->announced_window_delta -= incoming_frame_size;
- announced_window_delta_postupdate(tfc, sfc);
- sfc->local_window_delta -= incoming_frame_size;
- }
-
- tfc->announced_window -= incoming_frame_size;
-
- POSTTRACE(tfc, sfc, " data recv");
- return GRPC_ERROR_NONE;
-}
-
-// Returns a non zero announce integer if we should send a transport window
-// update
-uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
- grpc_chttp2_transport_flowctl* tfc) {
- PRETRACE(tfc, NULL);
- uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
- uint32_t threshold_to_send_transport_window_update =
- tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4
- : target_announced_window / 2;
- if (tfc->announced_window <= threshold_to_send_transport_window_update &&
- tfc->announced_window != target_announced_window) {
- uint32_t announce = (uint32_t)GPR_CLAMP(
- target_announced_window - tfc->announced_window, 0, UINT32_MAX);
- tfc->announced_window += announce;
- POSTTRACE(tfc, NULL, "t updt sent");
- return announce;
- }
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "%p[0][%s] will not send transport update", tfc,
- tfc->t->is_client ? "cli" : "svr"));
- return 0;
-}
-
-// Returns a non zero announce integer if we should send a stream window update
-uint32_t grpc_chttp2_flowctl_maybe_send_stream_update(
- grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
- PRETRACE(tfc, sfc);
- if (sfc->local_window_delta > sfc->announced_window_delta) {
- uint32_t announce = (uint32_t)GPR_CLAMP(
- sfc->local_window_delta - sfc->announced_window_delta, 0, UINT32_MAX);
- announced_window_delta_preupdate(tfc, sfc);
- sfc->announced_window_delta += announce;
- announced_window_delta_postupdate(tfc, sfc);
- POSTTRACE(tfc, sfc, "s updt sent");
- return announce;
- }
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "%p[%u][%s] will not send stream update", tfc,
- sfc->s->id, tfc->t->is_client ? "cli" : "svr"));
- return 0;
-}
-
-// we have received a WINDOW_UPDATE frame for a transport
-void grpc_chttp2_flowctl_recv_transport_update(
- grpc_chttp2_transport_flowctl* tfc, uint32_t size) {
- PRETRACE(tfc, NULL);
- tfc->remote_window += size;
- POSTTRACE(tfc, NULL, "t updt recv");
-}
-
-// we have received a WINDOW_UPDATE frame for a stream
-void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- uint32_t size) {
- PRETRACE(tfc, sfc);
- sfc->remote_window_delta += size;
- POSTTRACE(tfc, sfc, "s updt recv");
-}
-
-void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc,
- size_t max_size_hint,
- size_t have_already) {
- PRETRACE(tfc, sfc);
- uint32_t max_recv_bytes;
- uint32_t sent_init_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
-
- /* clamp max recv hint to an allowable size */
- if (max_size_hint >= UINT32_MAX - sent_init_window) {
- max_recv_bytes = UINT32_MAX - sent_init_window;
- } else {
- max_recv_bytes = (uint32_t)max_size_hint;
- }
-
- /* account for bytes already received but unknown to higher layers */
- if (max_recv_bytes >= have_already) {
- max_recv_bytes -= (uint32_t)have_already;
- } else {
- max_recv_bytes = 0;
- }
-
- /* add some small lookahead to keep pipelines flowing */
- GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window);
- if (sfc->local_window_delta < max_recv_bytes) {
- uint32_t add_max_recv_bytes =
- (uint32_t)(max_recv_bytes - sfc->local_window_delta);
- sfc->local_window_delta += add_max_recv_bytes;
- }
- POSTTRACE(tfc, sfc, "app st recv");
-}
-
-void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl* tfc,
- grpc_chttp2_stream_flowctl* sfc) {
- announced_window_delta_preupdate(tfc, sfc);
-}
-
-// Returns an urgency with which to make an update
-static grpc_chttp2_flowctl_urgency delta_is_significant(
- const grpc_chttp2_transport_flowctl* tfc, int32_t value,
- grpc_chttp2_setting_id setting_id) {
- int64_t delta = (int64_t)value -
- (int64_t)tfc->t->settings[GRPC_LOCAL_SETTINGS][setting_id];
- // TODO(ncteisen): tune this
- if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) {
- return GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
- } else {
- return GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED;
- }
-}
-
-// Takes in a target and uses the pid controller to return a stabilized
-// guess at the new bdp.
-static double get_pid_controller_guess(grpc_chttp2_transport_flowctl* tfc,
- double target) {
- double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec dt_timespec = gpr_time_sub(now, tfc->last_pid_update);
- double dt = (double)dt_timespec.tv_sec + dt_timespec.tv_nsec * 1e-9;
- if (dt > 0.1) {
- dt = 0.1;
- }
- double log2_bdp_guess =
- grpc_pid_controller_update(&tfc->pid_controller, bdp_error, dt);
- tfc->last_pid_update = now;
- return pow(2, log2_bdp_guess);
-}
-
-// Take in a target and modifies it based on the memory pressure of the system
-static double get_target_under_memory_pressure(
- grpc_chttp2_transport_flowctl* tfc, double target) {
- // do not increase window under heavy memory pressure.
- double memory_pressure = grpc_resource_quota_get_memory_pressure(
- grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep)));
- if (memory_pressure > 0.8) {
- target *= 1 - GPR_MIN(1, (memory_pressure - 0.8) / 0.1);
- }
- return target;
-}
-
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
- grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) {
- grpc_chttp2_flowctl_action action;
- memset(&action, 0, sizeof(action));
- uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc);
- if (tfc->announced_window < target_announced_window / 2) {
- action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
- }
- // TODO(ncteisen): tune this
- if (sfc != NULL && !sfc->s->read_closed) {
- uint32_t sent_init_window =
- tfc->t->settings[GRPC_SENT_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
- if ((int64_t)sfc->local_window_delta >
- (int64_t)sfc->announced_window_delta &&
- (int64_t)sfc->announced_window_delta + sent_init_window <=
- sent_init_window / 2) {
- action.send_stream_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY;
- } else if (sfc->local_window_delta > sfc->announced_window_delta) {
- action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE;
- }
- }
- TRACEACTION(tfc, action);
- return action;
-}
-
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
- grpc_chttp2_transport_flowctl* tfc) {
- grpc_chttp2_flowctl_action action;
- memset(&action, 0, sizeof(action));
- if (tfc->enable_bdp_probe) {
- action.need_ping = grpc_bdp_estimator_need_ping(&tfc->bdp_estimator);
-
- // get bdp estimate and update initial_window accordingly.
- int64_t estimate = -1;
- int32_t bdp = -1;
- if (grpc_bdp_estimator_get_estimate(&tfc->bdp_estimator, &estimate)) {
- double target = 1 + log2((double)estimate);
-
- // target might change based on how much memory pressure we are under
- // TODO(ncteisen): experiment with setting target to be huge under low
- // memory pressure.
- target = get_target_under_memory_pressure(tfc, target);
-
- // run our target through the pid controller to stabilize change.
- // TODO(ncteisen): experiment with other controllers here.
- double bdp_guess = get_pid_controller_guess(tfc, target);
-
- // Though initial window 'could' drop to 0, we keep the floor at 128
- bdp = GPR_MAX((int32_t)bdp_guess, 128);
-
- grpc_chttp2_flowctl_urgency init_window_update_urgency =
- delta_is_significant(tfc, bdp,
- GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE);
- if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
- action.send_setting_update = init_window_update_urgency;
- action.initial_window_size = (uint32_t)bdp;
- }
- }
-
- // get bandwidth estimate and update max_frame accordingly.
- double bw_dbl = -1;
- if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) {
- // we target the max of BDP or bandwidth in microseconds.
- int32_t frame_size = (int32_t)GPR_CLAMP(
- GPR_MAX((int32_t)bw_dbl / 1000, bdp), 16384, 16777215);
- grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant(
- tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE);
- if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) {
- if (frame_size_urgency > action.send_setting_update) {
- action.send_setting_update = frame_size_urgency;
- }
- action.max_frame_size = (uint32_t)frame_size;
- }
- }
- }
-
- TRACEACTION(tfc, action);
- return action;
-}
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc
new file mode 100644
index 0000000000..40545bc74b
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/flow_control.cc
@@ -0,0 +1,381 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/ext/transport/chttp2/transport/flow_control.h"
+
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/support/string.h"
+
+namespace grpc_core {
+namespace chttp2 {
+
+namespace {
+
+static constexpr const int kTracePadding = 30;
+
+static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) {
+ char* str;
+ if (old_val != new_val) {
+ gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old_val, new_val);
+ } else {
+ gpr_asprintf(&str, "%" PRId64 "", old_val);
+ }
+ char* str_lp = gpr_leftpad(str, ' ', kTracePadding);
+ gpr_free(str);
+ return str_lp;
+}
+
+static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) {
+ char* str;
+ if (new_val > 0 && old_val != new_val) {
+ gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old_val, new_val);
+ } else {
+ gpr_asprintf(&str, "%" PRIu32 "", old_val);
+ }
+ char* str_lp = gpr_leftpad(str, ' ', kTracePadding);
+ gpr_free(str);
+ return str_lp;
+}
+} // namespace
+
+void FlowControlTrace::Init(const char* reason, TransportFlowControl* tfc,
+ StreamFlowControl* sfc) {
+ tfc_ = tfc;
+ sfc_ = sfc;
+ reason_ = reason;
+ remote_window_ = tfc->remote_window();
+ target_window_ = tfc->target_window();
+ announced_window_ = tfc->announced_window();
+ if (sfc != nullptr) {
+ remote_window_delta_ = sfc->remote_window_delta();
+ local_window_delta_ = sfc->local_window_delta();
+ announced_window_delta_ = sfc->announced_window_delta();
+ }
+}
+
+void FlowControlTrace::Finish() {
+ uint32_t acked_local_window =
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ uint32_t remote_window =
+ tfc_->transport()->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ char* trw_str = fmt_int64_diff_str(remote_window_, tfc_->remote_window());
+ char* tlw_str = fmt_int64_diff_str(target_window_, tfc_->target_window());
+ char* taw_str =
+ fmt_int64_diff_str(announced_window_, tfc_->announced_window());
+ char* srw_str;
+ char* slw_str;
+ char* saw_str;
+ if (sfc_ != nullptr) {
+ srw_str = fmt_int64_diff_str(remote_window_delta_ + remote_window,
+ sfc_->remote_window_delta() + remote_window);
+ slw_str = fmt_int64_diff_str(local_window_delta_ + acked_local_window,
+ local_window_delta_ + acked_local_window);
+ saw_str = fmt_int64_diff_str(announced_window_delta_ + acked_local_window,
+ announced_window_delta_ + acked_local_window);
+ } else {
+ srw_str = gpr_leftpad("", ' ', kTracePadding);
+ slw_str = gpr_leftpad("", ' ', kTracePadding);
+ saw_str = gpr_leftpad("", ' ', kTracePadding);
+ }
+ gpr_log(GPR_DEBUG,
+ "%p[%u][%s] | %s | trw:%s, ttw:%s, taw:%s, srw:%s, slw:%s, saw:%s",
+ tfc_, sfc_ != nullptr ? sfc_->stream()->id : 0,
+ tfc_->transport()->is_client ? "cli" : "svr", reason_, trw_str,
+ tlw_str, taw_str, srw_str, slw_str, saw_str);
+ gpr_free(trw_str);
+ gpr_free(tlw_str);
+ gpr_free(taw_str);
+ gpr_free(srw_str);
+ gpr_free(slw_str);
+ gpr_free(saw_str);
+}
+
+const char* FlowControlAction::UrgencyString(Urgency u) {
+ switch (u) {
+ case Urgency::NO_ACTION_NEEDED:
+ return "no action";
+ case Urgency::UPDATE_IMMEDIATELY:
+ return "update immediately";
+ case Urgency::QUEUE_UPDATE:
+ return "queue update";
+ default:
+ GPR_UNREACHABLE_CODE(return "unknown");
+ }
+ GPR_UNREACHABLE_CODE(return "unknown");
+}
+
+void FlowControlAction::Trace(grpc_chttp2_transport* t) const {
+ char* iw_str = fmt_uint32_diff_str(
+ t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
+ initial_window_size_);
+ char* mf_str = fmt_uint32_diff_str(
+ t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ max_frame_size_);
+ gpr_log(GPR_DEBUG, "t[%s], s[%s], iw:%s:%s mf:%s:%s",
+ UrgencyString(send_transport_update_),
+ UrgencyString(send_stream_update_),
+ UrgencyString(send_initial_window_update_), iw_str,
+ UrgencyString(send_max_frame_size_update_), mf_str);
+ gpr_free(iw_str);
+ gpr_free(mf_str);
+}
+
+TransportFlowControl::TransportFlowControl(grpc_exec_ctx* exec_ctx,
+ const grpc_chttp2_transport* t,
+ bool enable_bdp_probe)
+ : t_(t),
+ enable_bdp_probe_(enable_bdp_probe),
+ bdp_estimator_(t->peer_string),
+ pid_controller_(grpc_core::PidController::Args()
+ .set_gain_p(4)
+ .set_gain_i(8)
+ .set_gain_d(0)
+ .set_initial_control_value(TargetLogBdp())
+ .set_min_control_value(-1)
+ .set_max_control_value(25)
+ .set_integral_range(10)),
+ last_pid_update_(grpc_exec_ctx_now(exec_ctx)) {}
+
+uint32_t TransportFlowControl::MaybeSendUpdate(bool writing_anyway) {
+ FlowControlTrace trace("t updt sent", this, nullptr);
+ const uint32_t target_announced_window = (const uint32_t)target_window();
+ if ((writing_anyway || announced_window_ <= target_announced_window / 2) &&
+ announced_window_ != target_announced_window) {
+ const uint32_t announce = (uint32_t)GPR_CLAMP(
+ target_announced_window - announced_window_, 0, UINT32_MAX);
+ announced_window_ += announce;
+ return announce;
+ }
+ return 0;
+}
+
+grpc_error* TransportFlowControl::ValidateRecvData(
+ int64_t incoming_frame_size) {
+ if (incoming_frame_size > announced_window_) {
+ char* msg;
+ gpr_asprintf(&msg,
+ "frame of size %" PRId64 " overflows local window of %" PRId64,
+ incoming_frame_size, announced_window_);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ gpr_free(msg);
+ return err;
+ }
+ return GRPC_ERROR_NONE;
+}
+
+StreamFlowControl::StreamFlowControl(TransportFlowControl* tfc,
+ const grpc_chttp2_stream* s)
+ : tfc_(tfc), s_(s) {}
+
+grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) {
+ FlowControlTrace trace(" data recv", tfc_, this);
+
+ grpc_error* error = GRPC_ERROR_NONE;
+ error = tfc_->ValidateRecvData(incoming_frame_size);
+ if (error != GRPC_ERROR_NONE) return error;
+
+ uint32_t sent_init_window =
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ uint32_t acked_init_window =
+ tfc_->transport()->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+
+ int64_t acked_stream_window = announced_window_delta_ + acked_init_window;
+ int64_t sent_stream_window = announced_window_delta_ + sent_init_window;
+ if (incoming_frame_size > acked_stream_window) {
+ if (incoming_frame_size <= sent_stream_window) {
+ gpr_log(GPR_ERROR,
+ "Incoming frame of size %" PRId64
+ " exceeds local window size of %" PRId64
+ ".\n"
+ "The (un-acked, future) window size would be %" PRId64
+ " which is not exceeded.\n"
+ "This would usually cause a disconnection, but allowing it due to"
+ "broken HTTP2 implementations in the wild.\n"
+ "See (for example) https://github.com/netty/netty/issues/6520.",
+ incoming_frame_size, acked_stream_window, sent_stream_window);
+ } else {
+ char* msg;
+ gpr_asprintf(&msg, "frame of size %" PRId64
+ " overflows local window of %" PRId64,
+ incoming_frame_size, acked_stream_window);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ gpr_free(msg);
+ return err;
+ }
+ }
+
+ UpdateAnnouncedWindowDelta(tfc_, -incoming_frame_size);
+ local_window_delta_ -= incoming_frame_size;
+ tfc_->CommitRecvData(incoming_frame_size);
+ return GRPC_ERROR_NONE;
+}
+
+uint32_t StreamFlowControl::MaybeSendUpdate() {
+ FlowControlTrace trace("s updt sent", tfc_, this);
+ if (local_window_delta_ > announced_window_delta_) {
+ uint32_t announce = (uint32_t)GPR_CLAMP(
+ local_window_delta_ - announced_window_delta_, 0, UINT32_MAX);
+ UpdateAnnouncedWindowDelta(tfc_, announce);
+ return announce;
+ }
+ return 0;
+}
+
+void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint,
+ size_t have_already) {
+ FlowControlTrace trace("app st recv", tfc_, this);
+ uint32_t max_recv_bytes;
+ uint32_t sent_init_window =
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+
+ /* clamp max recv hint to an allowable size */
+ if (max_size_hint >= UINT32_MAX - sent_init_window) {
+ max_recv_bytes = UINT32_MAX - sent_init_window;
+ } else {
+ max_recv_bytes = (uint32_t)max_size_hint;
+ }
+
+ /* account for bytes already received but unknown to higher layers */
+ if (max_recv_bytes >= have_already) {
+ max_recv_bytes -= (uint32_t)have_already;
+ } else {
+ max_recv_bytes = 0;
+ }
+
+ /* add some small lookahead to keep pipelines flowing */
+ GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window);
+ if (local_window_delta_ < max_recv_bytes) {
+ uint32_t add_max_recv_bytes =
+ (uint32_t)(max_recv_bytes - local_window_delta_);
+ local_window_delta_ += add_max_recv_bytes;
+ }
+}
+
+// Take in a target and modifies it based on the memory pressure of the system
+static double AdjustForMemoryPressure(grpc_resource_quota* quota,
+ double target) {
+ // do not increase window under heavy memory pressure.
+ double memory_pressure = grpc_resource_quota_get_memory_pressure(quota);
+ static const double kLowMemPressure = 0.1;
+ static const double kZeroTarget = 22;
+ static const double kHighMemPressure = 0.8;
+ static const double kMaxMemPressure = 0.9;
+ if (memory_pressure < kLowMemPressure && target < kZeroTarget) {
+ target = (target - kZeroTarget) * memory_pressure / kLowMemPressure +
+ kZeroTarget;
+ } else if (memory_pressure > kHighMemPressure) {
+ target *= 1 - GPR_MIN(1, (memory_pressure - kHighMemPressure) /
+ (kMaxMemPressure - kHighMemPressure));
+ }
+ return target;
+}
+
+double TransportFlowControl::TargetLogBdp() {
+ return AdjustForMemoryPressure(
+ grpc_resource_user_quota(grpc_endpoint_get_resource_user(t_->ep)),
+ 1 + log2(bdp_estimator_.EstimateBdp()));
+}
+
+double TransportFlowControl::SmoothLogBdp(grpc_exec_ctx* exec_ctx,
+ double value) {
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ double bdp_error = value - pid_controller_.last_control_value();
+ const double dt = (double)(now - last_pid_update_) * 1e-3;
+ last_pid_update_ = now;
+ return pid_controller_.Update(bdp_error, dt);
+}
+
+FlowControlAction::Urgency TransportFlowControl::DeltaUrgency(
+ int32_t value, grpc_chttp2_setting_id setting_id) {
+ int64_t delta =
+ (int64_t)value - (int64_t)t_->settings[GRPC_LOCAL_SETTINGS][setting_id];
+ // TODO(ncteisen): tune this
+ if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) {
+ return FlowControlAction::Urgency::QUEUE_UPDATE;
+ } else {
+ return FlowControlAction::Urgency::NO_ACTION_NEEDED;
+ }
+}
+
+FlowControlAction TransportFlowControl::PeriodicUpdate(
+ grpc_exec_ctx* exec_ctx) {
+ FlowControlAction action;
+ if (enable_bdp_probe_) {
+ // get bdp estimate and update initial_window accordingly.
+ // target might change based on how much memory pressure we are under
+ // TODO(ncteisen): experiment with setting target to be huge under low
+ // memory pressure.
+ const double target = pow(2, SmoothLogBdp(exec_ctx, TargetLogBdp()));
+
+ // Though initial window 'could' drop to 0, we keep the floor at 128
+ target_initial_window_size_ = (int32_t)GPR_CLAMP(target, 128, INT32_MAX);
+
+ action.set_send_initial_window_update(
+ DeltaUrgency(target_initial_window_size_,
+ GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE),
+ target_initial_window_size_);
+
+ // get bandwidth estimate and update max_frame accordingly.
+ double bw_dbl = bdp_estimator_.EstimateBandwidth();
+ // we target the max of BDP or bandwidth in microseconds.
+ int32_t frame_size = (int32_t)GPR_CLAMP(
+ GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000,
+ target_initial_window_size_),
+ 16384, 16777215);
+ action.set_send_max_frame_size_update(
+ DeltaUrgency(frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE),
+ frame_size);
+ }
+ return UpdateAction(action);
+}
+
+FlowControlAction StreamFlowControl::UpdateAction(FlowControlAction action) {
+ // TODO(ncteisen): tune this
+ if (!s_->read_closed) {
+ uint32_t sent_init_window =
+ tfc_->transport()->settings[GRPC_SENT_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ if (local_window_delta_ > announced_window_delta_ &&
+ announced_window_delta_ + sent_init_window <= sent_init_window / 2) {
+ action.set_send_stream_update(
+ FlowControlAction::Urgency::UPDATE_IMMEDIATELY);
+ } else if (local_window_delta_ > announced_window_delta_) {
+ action.set_send_stream_update(FlowControlAction::Urgency::QUEUE_UPDATE);
+ }
+ }
+
+ return action;
+}
+
+} // namespace chttp2
+} // namespace grpc_core
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.h b/src/core/ext/transport/chttp2/transport/flow_control.h
new file mode 100644
index 0000000000..7dd348ed5f
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/flow_control.h
@@ -0,0 +1,336 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H
+#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H
+
+#include <stdint.h>
+
+#include <grpc/support/useful.h>
+#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
+#include "src/core/lib/support/manual_constructor.h"
+#include "src/core/lib/transport/bdp_estimator.h"
+#include "src/core/lib/transport/pid_controller.h"
+
+struct grpc_chttp2_transport;
+struct grpc_chttp2_stream;
+
+extern "C" grpc_tracer_flag grpc_flowctl_trace;
+
+namespace grpc {
+namespace testing {
+class TrickledCHTTP2; // to make this a friend
+} // namespace testing
+} // namespace grpc
+
+namespace grpc_core {
+namespace chttp2 {
+
+static constexpr uint32_t kDefaultWindow = 65535;
+
+class TransportFlowControl;
+class StreamFlowControl;
+
+class FlowControlAction {
+ public:
+ enum class Urgency : uint8_t {
+ // Nothing to be done.
+ NO_ACTION_NEEDED = 0,
+ // Initiate a write to update the initial window immediately.
+ UPDATE_IMMEDIATELY,
+ // Push the flow control update into a send buffer, to be sent
+ // out the next time a write is initiated.
+ QUEUE_UPDATE,
+ };
+
+ Urgency send_stream_update() const { return send_stream_update_; }
+ Urgency send_transport_update() const { return send_transport_update_; }
+ Urgency send_initial_window_update() const {
+ return send_initial_window_update_;
+ }
+ Urgency send_max_frame_size_update() const {
+ return send_max_frame_size_update_;
+ }
+ uint32_t initial_window_size() const { return initial_window_size_; }
+ uint32_t max_frame_size() const { return max_frame_size_; }
+
+ FlowControlAction& set_send_stream_update(Urgency u) {
+ send_stream_update_ = u;
+ return *this;
+ }
+ FlowControlAction& set_send_transport_update(Urgency u) {
+ send_transport_update_ = u;
+ return *this;
+ }
+ FlowControlAction& set_send_initial_window_update(Urgency u,
+ uint32_t update) {
+ send_initial_window_update_ = u;
+ initial_window_size_ = update;
+ return *this;
+ }
+ FlowControlAction& set_send_max_frame_size_update(Urgency u,
+ uint32_t update) {
+ send_max_frame_size_update_ = u;
+ max_frame_size_ = update;
+ return *this;
+ }
+
+ static const char* UrgencyString(Urgency u);
+ void Trace(grpc_chttp2_transport* t) const;
+
+ private:
+ Urgency send_stream_update_ = Urgency::NO_ACTION_NEEDED;
+ Urgency send_transport_update_ = Urgency::NO_ACTION_NEEDED;
+ Urgency send_initial_window_update_ = Urgency::NO_ACTION_NEEDED;
+ Urgency send_max_frame_size_update_ = Urgency::NO_ACTION_NEEDED;
+ uint32_t initial_window_size_ = 0;
+ uint32_t max_frame_size_ = 0;
+};
+
+class FlowControlTrace {
+ public:
+ FlowControlTrace(const char* reason, TransportFlowControl* tfc,
+ StreamFlowControl* sfc) {
+ if (enabled_) Init(reason, tfc, sfc);
+ }
+
+ ~FlowControlTrace() {
+ if (enabled_) Finish();
+ }
+
+ private:
+ void Init(const char* reason, TransportFlowControl* tfc,
+ StreamFlowControl* sfc);
+ void Finish();
+
+ const bool enabled_ = GRPC_TRACER_ON(grpc_flowctl_trace);
+
+ TransportFlowControl* tfc_;
+ StreamFlowControl* sfc_;
+ const char* reason_;
+ int64_t remote_window_;
+ int64_t target_window_;
+ int64_t announced_window_;
+ int64_t remote_window_delta_;
+ int64_t local_window_delta_;
+ int64_t announced_window_delta_;
+};
+
+class TransportFlowControl {
+ public:
+ TransportFlowControl(grpc_exec_ctx* exec_ctx, const grpc_chttp2_transport* t,
+ bool enable_bdp_probe);
+ ~TransportFlowControl() {}
+
+ bool bdp_probe() const { return enable_bdp_probe_; }
+
+ // returns an announce if we should send a transport update to our peer,
+ // else returns zero; writing_anyway indicates if a write would happen
+ // regardless of the send - if it is false and this function returns non-zero,
+ // this announce will cause a write to occur
+ uint32_t MaybeSendUpdate(bool writing_anyway);
+
+ // Reads the flow control data and returns and actionable struct that will
+ // tell chttp2 exactly what it needs to do
+ FlowControlAction MakeAction() { return UpdateAction(FlowControlAction()); }
+
+ // Call periodically (at a low-ish rate, 100ms - 10s makes sense)
+ // to perform more complex flow control calculations and return an action
+ // to let chttp2 change its parameters
+ FlowControlAction PeriodicUpdate(grpc_exec_ctx* exec_ctx);
+
+ void StreamSentData(int64_t size) { remote_window_ -= size; }
+
+ grpc_error* ValidateRecvData(int64_t incoming_frame_size);
+ void CommitRecvData(int64_t incoming_frame_size) {
+ announced_window_ -= incoming_frame_size;
+ }
+
+ grpc_error* RecvData(int64_t incoming_frame_size) {
+ FlowControlTrace trace(" data recv", this, nullptr);
+ grpc_error* error = ValidateRecvData(incoming_frame_size);
+ if (error != GRPC_ERROR_NONE) return error;
+ CommitRecvData(incoming_frame_size);
+ return GRPC_ERROR_NONE;
+ }
+
+ // we have received a WINDOW_UPDATE frame for a transport
+ void RecvUpdate(uint32_t size) {
+ FlowControlTrace trace("t updt recv", this, nullptr);
+ remote_window_ += size;
+ }
+
+ int64_t remote_window() const { return remote_window_; }
+ int64_t target_window() const {
+ return (uint32_t)GPR_MIN((int64_t)((1u << 31) - 1),
+ announced_stream_total_over_incoming_window_ +
+ target_initial_window_size_);
+ }
+ int64_t announced_window() const { return announced_window_; }
+
+ const grpc_chttp2_transport* transport() const { return t_; }
+
+ void PreUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) {
+ if (delta > 0) {
+ announced_stream_total_over_incoming_window_ -= delta;
+ } else {
+ announced_stream_total_under_incoming_window_ += -delta;
+ }
+ }
+
+ void PostUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) {
+ if (delta > 0) {
+ announced_stream_total_over_incoming_window_ += delta;
+ } else {
+ announced_stream_total_under_incoming_window_ -= -delta;
+ }
+ }
+
+ BdpEstimator* bdp_estimator() { return &bdp_estimator_; }
+
+ void TestOnlyForceHugeWindow() {
+ announced_window_ = 1024 * 1024 * 1024;
+ remote_window_ = 1024 * 1024 * 1024;
+ }
+
+ private:
+ friend class ::grpc::testing::TrickledCHTTP2;
+ double TargetLogBdp();
+ double SmoothLogBdp(grpc_exec_ctx* exec_ctx, double value);
+ FlowControlAction::Urgency DeltaUrgency(int32_t value,
+ grpc_chttp2_setting_id setting_id);
+
+ FlowControlAction UpdateAction(FlowControlAction action) {
+ if (announced_window_ < target_window() / 2) {
+ action.set_send_transport_update(
+ FlowControlAction::Urgency::UPDATE_IMMEDIATELY);
+ }
+ return action;
+ }
+
+ const grpc_chttp2_transport* const t_;
+
+ /** Our bookkeeping for the remote peer's available window */
+ int64_t remote_window_ = kDefaultWindow;
+
+ /** calculating what we should give for local window:
+ we track the total amount of flow control over initial window size
+ across all streams: this is data that we want to receive right now (it
+ has an outstanding read)
+ and the total amount of flow control under initial window size across all
+ streams: this is data we've read early
+ we want to adjust incoming_window such that:
+ incoming_window = total_over - max(bdp - total_under, 0) */
+ int64_t announced_stream_total_over_incoming_window_ = 0;
+ int64_t announced_stream_total_under_incoming_window_ = 0;
+
+ /** This is out window according to what we have sent to our remote peer. The
+ * difference between this and target window is what we use to decide when
+ * to send WINDOW_UPDATE frames. */
+ int64_t announced_window_ = kDefaultWindow;
+
+ int32_t target_initial_window_size_ = kDefaultWindow;
+
+ /** should we probe bdp? */
+ const bool enable_bdp_probe_;
+
+ /* bdp estimation */
+ grpc_core::BdpEstimator bdp_estimator_;
+
+ /* pid controller */
+ grpc_core::PidController pid_controller_;
+ grpc_millis last_pid_update_ = 0;
+};
+
+class StreamFlowControl {
+ public:
+ StreamFlowControl(TransportFlowControl* tfc, const grpc_chttp2_stream* s);
+ ~StreamFlowControl() {
+ tfc_->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
+ }
+
+ FlowControlAction UpdateAction(FlowControlAction action);
+ FlowControlAction MakeAction() { return UpdateAction(tfc_->MakeAction()); }
+
+ // we have sent data on the wire, we must track this in our bookkeeping for
+ // the remote peer's flow control.
+ void SentData(int64_t outgoing_frame_size) {
+ FlowControlTrace tracer(" data sent", tfc_, this);
+ tfc_->StreamSentData(outgoing_frame_size);
+ remote_window_delta_ -= outgoing_frame_size;
+ }
+
+ // we have received data from the wire
+ grpc_error* RecvData(int64_t incoming_frame_size);
+
+ // returns an announce if we should send a stream update to our peer, else
+ // returns zero
+ uint32_t MaybeSendUpdate();
+
+ // we have received a WINDOW_UPDATE frame for a stream
+ void RecvUpdate(uint32_t size) {
+ FlowControlTrace trace("s updt recv", tfc_, this);
+ remote_window_delta_ += size;
+ }
+
+ // the application is asking for a certain amount of bytes
+ void IncomingByteStreamUpdate(size_t max_size_hint, size_t have_already);
+
+ int64_t remote_window_delta() const { return remote_window_delta_; }
+ int64_t local_window_delta() const { return local_window_delta_; }
+ int64_t announced_window_delta() const { return announced_window_delta_; }
+
+ const grpc_chttp2_stream* stream() const { return s_; }
+
+ void TestOnlyForceHugeWindow() {
+ announced_window_delta_ = 1024 * 1024 * 1024;
+ local_window_delta_ = 1024 * 1024 * 1024;
+ remote_window_delta_ = 1024 * 1024 * 1024;
+ }
+
+ private:
+ friend class ::grpc::testing::TrickledCHTTP2;
+ TransportFlowControl* const tfc_;
+ const grpc_chttp2_stream* const s_;
+
+ void UpdateAnnouncedWindowDelta(TransportFlowControl* tfc, int64_t change) {
+ tfc->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
+ announced_window_delta_ += change;
+ tfc->PostUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_);
+ }
+
+ /** window available for us to send to peer, over or under the initial
+ * window
+ * size of the transport... ie:
+ * remote_window = remote_window_delta + transport.initial_window_size */
+ int64_t remote_window_delta_ = 0;
+
+ /** window available for peer to send to us (as a delta on
+ * transport.initial_window_size)
+ * local_window = local_window_delta + transport.initial_window_size */
+ int64_t local_window_delta_ = 0;
+
+ /** window available for peer to send to us over this stream that we have
+ * announced to the peer */
+ int64_t announced_window_delta_ = 0;
+};
+
+} // namespace chttp2
+} // namespace grpc_core
+
+#endif
diff --git a/src/core/ext/transport/chttp2/transport/frame.h b/src/core/ext/transport/chttp2/transport/frame.h
index dba4c004ec..e7debdad79 100644
--- a/src/core/ext/transport/chttp2/transport/frame.h
+++ b/src/core/ext/transport/chttp2/transport/frame.h
@@ -24,6 +24,10 @@
#include "src/core/lib/iomgr/error.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* defined in internal.h */
typedef struct grpc_chttp2_stream grpc_chttp2_stream;
typedef struct grpc_chttp2_transport grpc_chttp2_transport;
@@ -43,4 +47,8 @@ typedef struct grpc_chttp2_transport grpc_chttp2_transport;
#define GRPC_CHTTP2_DATA_FLAG_PADDED 8
#define GRPC_CHTTP2_FLAG_HAS_PRIORITY 0x20
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.cc
index 222d2177b2..73aaab1802 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.cc
@@ -210,7 +210,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
if (cur != end) {
grpc_slice_buffer_undo_take_first(
- &s->unprocessed_incoming_frames_buffer,
+ slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
}
grpc_slice_unref_internal(exec_ctx, slice);
@@ -277,7 +277,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
p->state = GRPC_CHTTP2_DATA_FH_0;
cur += p->frame_size;
grpc_slice_buffer_undo_take_first(
- &s->unprocessed_incoming_frames_buffer,
+ slices,
grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
grpc_slice_unref_internal(exec_ctx, slice);
return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index 3f1c787847..81ec5361a3 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -28,6 +28,10 @@
#include "src/core/lib/transport/byte_stream.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
GRPC_CHTTP2_DATA_FH_0,
GRPC_CHTTP2_DATA_FH_1,
@@ -80,4 +84,8 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
grpc_slice_buffer *slices, grpc_slice *slice_out,
grpc_byte_stream **stream_out);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_DATA_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.cc
index 4bce84f21c..78ec08e177 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.cc
@@ -46,7 +46,7 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
gpr_free(p->debug_data);
p->debug_length = length - 8;
- p->debug_data = gpr_malloc(p->debug_length);
+ p->debug_data = (char *)gpr_malloc(p->debug_length);
p->debug_pos = 0;
p->state = GRPC_CHTTP2_GOAWAY_LSI0;
return GRPC_ERROR_NONE;
@@ -60,7 +60,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_goaway_parser *p = parser;
+ grpc_chttp2_goaway_parser *p = (grpc_chttp2_goaway_parser *)parser;
switch (p->state) {
case GRPC_CHTTP2_GOAWAY_LSI0:
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h
index abc48f30c6..7b3aa45f3f 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.h
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h
@@ -25,6 +25,10 @@
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
GRPC_CHTTP2_GOAWAY_LSI0,
GRPC_CHTTP2_GOAWAY_LSI1,
@@ -60,4 +64,8 @@ void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
grpc_slice_buffer *slice_buffer);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.cc
index 3d7c6fbfad..1cfa883ee1 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.cc
@@ -75,7 +75,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_ping_parser *p = parser;
+ grpc_chttp2_ping_parser *p = (grpc_chttp2_ping_parser *)parser;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte));
@@ -89,10 +89,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
} else {
if (!t->is_client) {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec next_allowed_ping =
- gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- t->ping_policy.min_ping_interval_without_data);
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ grpc_millis next_allowed_ping =
+ t->ping_recv_state.last_ping_recv_time +
+ t->ping_policy.min_recv_ping_interval_without_data;
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
@@ -100,11 +100,10 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
no less than two hours. When there is no outstanding streams, we
restrict the number of PINGS equivalent to TCP Keep-Alive. */
next_allowed_ping =
- gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- gpr_time_from_seconds(7200, GPR_TIMESPAN));
+ t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
}
- if (gpr_time_cmp(next_allowed_ping, now) > 0) {
+ if (next_allowed_ping > now) {
grpc_chttp2_add_ping_strike(exec_ctx, t);
}
@@ -113,11 +112,12 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
if (!g_disable_ping_ack) {
if (t->ping_ack_count == t->ping_ack_capacity) {
t->ping_ack_capacity = GPR_MAX(t->ping_ack_capacity * 3 / 2, 3);
- t->ping_acks = gpr_realloc(
+ t->ping_acks = (uint64_t *)gpr_realloc(
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
- grpc_chttp2_initiate_write(exec_ctx, t, "ping response");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h
index 5969ace9bd..ffc2f0cf2f 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.h
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.h
@@ -23,6 +23,10 @@
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
uint8_t byte;
uint8_t is_ack;
@@ -41,4 +45,8 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
/* Test-only function for disabling ping ack */
void grpc_set_disable_ping_ack(bool disable_ping_ack);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
index 689dc8935c..0133b6efa2 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
@@ -77,7 +77,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_rst_stream_parser *p = parser;
+ grpc_chttp2_rst_stream_parser *p = (grpc_chttp2_rst_stream_parser *)parser;
while (p->byte != 4 && cur != end) {
p->reason_bytes[p->byte] = *cur;
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
index d088221b52..102ffdb3f3 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
@@ -24,6 +24,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
uint8_t byte;
uint8_t reason_bytes[4];
@@ -40,4 +44,8 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.cc
index 057d3d9ed3..db0245bb57 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.cc
@@ -44,7 +44,8 @@ static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
return out;
}
-grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
+grpc_slice grpc_chttp2_settings_create(uint32_t *old_settings,
+ const uint32_t *new_settings,
uint32_t force_mask, size_t count) {
size_t i;
uint32_t n = 0;
@@ -52,21 +53,21 @@ grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *new,
uint8_t *p;
for (i = 0; i < count; i++) {
- n += (new[i] != old[i] || (force_mask & (1u << i)) != 0);
+ n += (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0);
}
output = GRPC_SLICE_MALLOC(9 + 6 * n);
p = fill_header(GRPC_SLICE_START_PTR(output), 6 * n, 0);
for (i = 0; i < count; i++) {
- if (new[i] != old[i] || (force_mask & (1u << i)) != 0) {
+ if (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0) {
*p++ = (uint8_t)(grpc_setting_id_to_wire_id[i] >> 8);
*p++ = (uint8_t)(grpc_setting_id_to_wire_id[i]);
- *p++ = (uint8_t)(new[i] >> 24);
- *p++ = (uint8_t)(new[i] >> 16);
- *p++ = (uint8_t)(new[i] >> 8);
- *p++ = (uint8_t)(new[i]);
- old[i] = new[i];
+ *p++ = (uint8_t)(new_settings[i] >> 24);
+ *p++ = (uint8_t)(new_settings[i] >> 16);
+ *p++ = (uint8_t)(new_settings[i] >> 8);
+ *p++ = (uint8_t)(new_settings[i]);
+ old_settings[i] = new_settings[i];
}
}
@@ -111,7 +112,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
- grpc_chttp2_settings_parser *parser = p;
+ grpc_chttp2_settings_parser *parser = (grpc_chttp2_settings_parser *)p;
const uint8_t *cur = GRPC_SLICE_START_PTR(slice);
const uint8_t *end = GRPC_SLICE_END_PTR(slice);
char *msg;
@@ -201,13 +202,13 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
}
if (id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE &&
parser->incoming_settings[id] != parser->value) {
- t->flow_control.initial_window_update +=
+ t->initial_window_update +=
(int64_t)parser->value - parser->incoming_settings[id];
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_flowctl_trace)) {
gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change",
t, t->is_client ? "cli" : "svr",
- (int)t->flow_control.initial_window_update);
+ (int)t->initial_window_update);
}
}
parser->incoming_settings[id] = parser->value;
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index 47479d675d..3364da1520 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -25,6 +25,10 @@
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
GRPC_CHTTP2_SPS_ID0,
GRPC_CHTTP2_SPS_ID1,
@@ -58,4 +62,8 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
index 65f3b01d77..15eaf59285 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
@@ -70,7 +70,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
- grpc_chttp2_window_update_parser *p = parser;
+ grpc_chttp2_window_update_parser *p =
+ (grpc_chttp2_window_update_parser *)parser;
while (p->byte != 4 && cur != end) {
p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte));
@@ -95,21 +96,22 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
if (t->incoming_stream_id != 0) {
if (s != NULL) {
- grpc_chttp2_flowctl_recv_stream_update(
- &t->flow_control, &s->flow_control, received_update);
+ s->flow_control->RecvUpdate(received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
- grpc_chttp2_become_writable(
- exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
- "stream.read_flow_control");
+ grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+ grpc_chttp2_initiate_write(
+ exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE);
}
}
} else {
- bool was_zero = t->flow_control.remote_window <= 0;
- grpc_chttp2_flowctl_recv_transport_update(&t->flow_control,
- received_update);
- bool is_zero = t->flow_control.remote_window <= 0;
+ bool was_zero = t->flow_control->remote_window() <= 0;
+ t->flow_control->RecvUpdate(received_update);
+ bool is_zero = t->flow_control->remote_window() <= 0;
if (was_zero && !is_zero) {
- grpc_chttp2_initiate_write(exec_ctx, t, "new_global_flow_control");
+ grpc_chttp2_initiate_write(
+ exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED);
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h
index 698da4e351..400f9f5398 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.h
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h
@@ -24,6 +24,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
uint8_t byte;
uint8_t is_connection_update;
@@ -39,4 +43,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_slice slice, int is_last);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
index a0e748e7b1..0ea50e394b 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
@@ -33,6 +33,7 @@
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/ext/transport/chttp2/transport/hpack_table.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/metadata.h"
@@ -51,10 +52,12 @@
#define MAX_DECODER_SPACE_USAGE 512
static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL};
-static const grpc_slice terminal_slice = {&terminal_slice_refcount,
- .data.refcounted = {0, 0}};
+static const grpc_slice terminal_slice = {
+ &terminal_slice_refcount, /* refcount */
+ {{0, 0}} /* data.refcounted */
+};
-extern grpc_tracer_flag grpc_http_trace;
+extern "C" grpc_tracer_flag grpc_http_trace;
typedef struct {
int is_first_frame;
@@ -175,24 +178,19 @@ static void evict_entry(grpc_chttp2_hpack_compressor *c) {
c->table_elems--;
}
-/* add an element to the decoder table */
-static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
- grpc_mdelem elem) {
- GPR_ASSERT(GRPC_MDELEM_IS_INTERNED(elem));
-
- uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
- uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem));
- uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash);
+// Reserve space in table for the new element, evict entries if needed.
+// Return the new index of the element. Return 0 to indicate not adding to
+// table.
+static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor *c,
+ size_t elem_size) {
uint32_t new_index = c->tail_remote_index + c->table_elems + 1;
- size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem);
-
GPR_ASSERT(elem_size < 65536);
if (elem_size > c->max_table_size) {
while (c->table_size > 0) {
evict_entry(c);
}
- return;
+ return 0;
}
/* Reserve space for this element in the remote table: if this overflows
@@ -206,37 +204,26 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
c->table_size = (uint16_t)(c->table_size + elem_size);
c->table_elems++;
- /* Store this element into {entries,indices}_elem */
- if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem)) {
- /* already there: update with new index */
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- } else if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)],
- elem)) {
- /* already there (cuckoo): update with new index */
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) {
- /* not there, but a free element: add */
- c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) {
- /* not there (cuckoo), but a free element: add */
- c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
- } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
- /* not there: replace oldest */
- GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
- c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
- } else {
- /* not there: replace oldest */
- GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
- c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ return new_index;
+}
+
+/* dummy function */
+static void add_nothing(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c, grpc_mdelem elem,
+ size_t elem_size) {}
+
+// Add a key to the dynamic table. Both key and value will be added to table at
+// the decoder.
+static void add_key_with_index(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem elem, uint32_t new_index) {
+ if (new_index == 0) {
+ return;
}
- /* do exactly the same for the key (so we can find by that again too) */
+ uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
+ /* Store the key into {entries,indices}_keys */
if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_2(key_hash)],
GRPC_MDKEY(elem))) {
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
@@ -269,8 +256,67 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
}
}
-static void emit_indexed(grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
+/* add an element to the decoder table */
+static void add_elem_with_index(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem elem, uint32_t new_index) {
+ if (new_index == 0) {
+ return;
+ }
+ GPR_ASSERT(GRPC_MDELEM_IS_INTERNED(elem));
+
+ uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
+ uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem));
+ uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash);
+
+ /* Store this element into {entries,indices}_elem */
+ if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem)) {
+ /* already there: update with new index */
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
+ } else if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)],
+ elem)) {
+ /* already there (cuckoo): update with new index */
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) {
+ /* not there, but a free element: add */
+ c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
+ } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) {
+ /* not there (cuckoo), but a free element: add */
+ c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
+ /* not there: replace oldest */
+ GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
+ c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
+ } else {
+ /* not there: replace oldest */
+ GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
+ c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
+ }
+
+ add_key_with_index(exec_ctx, c, elem, new_index);
+}
+
+static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem elem, size_t elem_size) {
+ uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
+ add_elem_with_index(exec_ctx, c, elem, new_index);
+}
+
+static void add_key(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
+ grpc_mdelem elem, size_t elem_size) {
+ uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
+ add_key_with_index(exec_ctx, c, elem, new_index);
+}
+
+static void emit_indexed(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx);
uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
len);
@@ -282,30 +328,31 @@ typedef struct {
bool insert_null_before_wire_value;
} wire_value;
-static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
+static wire_value get_wire_value(grpc_exec_ctx *exec_ctx, grpc_mdelem elem,
+ bool true_binary_enabled) {
+ wire_value wire_val;
if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
if (true_binary_enabled) {
- return (wire_value){
- .huffman_prefix = 0x00,
- .insert_null_before_wire_value = true,
- .data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
- };
+ GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx);
+ wire_val.huffman_prefix = 0x00;
+ wire_val.insert_null_before_wire_value = true;
+ wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
+
} else {
- return (wire_value){
- .huffman_prefix = 0x80,
- .insert_null_before_wire_value = false,
- .data = grpc_chttp2_base64_encode_and_huffman_compress(
- GRPC_MDVALUE(elem)),
- };
+ GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx);
+ wire_val.huffman_prefix = 0x80;
+ wire_val.insert_null_before_wire_value = false;
+ wire_val.data =
+ grpc_chttp2_base64_encode_and_huffman_compress(GRPC_MDVALUE(elem));
}
} else {
/* TODO(ctiller): opportunistically compress non-binary headers */
- return (wire_value){
- .huffman_prefix = 0x00,
- .insert_null_before_wire_value = false,
- .data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)),
- };
+ GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
+ wire_val.huffman_prefix = 0x00;
+ wire_val.insert_null_before_wire_value = false;
+ wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
}
+ return wire_val;
}
static size_t wire_value_length(wire_value v) {
@@ -317,11 +364,14 @@ static void add_wire_value(framer_state *st, wire_value v) {
add_header_data(st, v.data);
}
-static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
size_t len_val = wire_value_length(value);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
@@ -333,11 +383,14 @@ static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
-static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
uint32_t key_index, grpc_mdelem elem,
framer_state *st) {
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
size_t len_val = wire_value_length(value);
uint32_t len_val_len;
GPR_ASSERT(len_val <= UINT32_MAX);
@@ -349,10 +402,16 @@ static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
-static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem elem, framer_state *st) {
+static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
+ uint32_t unused_index, grpc_mdelem elem,
+ framer_state *st) {
+ GPR_ASSERT(unused_index == 0);
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx);
+ GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
uint32_t len_val = (uint32_t)wire_value_length(value);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@@ -367,10 +426,16 @@ static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor *c,
add_wire_value(st, value);
}
-static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
- grpc_mdelem elem, framer_state *st) {
+static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_hpack_compressor *c,
+ uint32_t unused_index, grpc_mdelem elem,
+ framer_state *st) {
+ GPR_ASSERT(unused_index == 0);
+ GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx);
+ GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
- wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
+ wire_value value =
+ get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
uint32_t len_val = (uint32_t)wire_value_length(value);
uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@@ -410,9 +475,14 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
"Reserved header (colon-prefixed) happening after regular ones.");
}
- if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(elem)) {
+ if (GRPC_TRACER_ON(grpc_http_trace)) {
char *k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
- char *v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
+ char *v = NULL;
+ if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
+ v = grpc_dump_slice(GRPC_MDVALUE(elem), GPR_DUMP_HEX);
+ } else {
+ v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
+ }
gpr_log(
GPR_DEBUG,
"Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
@@ -422,64 +492,70 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
gpr_free(k);
gpr_free(v);
}
- if (!GRPC_MDELEM_IS_INTERNED(elem)) {
- emit_lithdr_noidx_v(c, elem, st);
+
+ bool elem_interned = GRPC_MDELEM_IS_INTERNED(elem);
+ bool key_interned = elem_interned || grpc_slice_is_interned(GRPC_MDKEY(elem));
+
+ // Key is not interned, emit literals.
+ if (!key_interned) {
+ emit_lithdr_noidx_v(exec_ctx, c, 0, elem, st);
return;
}
- uint32_t key_hash;
- uint32_t value_hash;
- uint32_t elem_hash;
- size_t decoder_space_usage;
- uint32_t indices_key;
- int should_add_elem;
+ uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
+ uint32_t elem_hash = 0;
- key_hash = grpc_slice_hash(GRPC_MDKEY(elem));
- value_hash = grpc_slice_hash(GRPC_MDVALUE(elem));
- elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash);
+ if (elem_interned) {
+ uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem));
+ elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash);
- inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
+ inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum,
+ c->filter_elems);
- /* is this elem currently in the decoders table? */
+ /* is this elem currently in the decoders table? */
- if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) &&
- c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
- /* HIT: complete element (first cuckoo hash) */
- emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
- st);
- return;
- }
+ if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) &&
+ c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
+ /* HIT: complete element (first cuckoo hash) */
+ emit_indexed(exec_ctx, c,
+ dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), st);
+ return;
+ }
- if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) &&
- c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
- /* HIT: complete element (second cuckoo hash) */
- emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
- st);
- return;
+ if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) &&
+ c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
+ /* HIT: complete element (second cuckoo hash) */
+ emit_indexed(exec_ctx, c,
+ dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]), st);
+ return;
+ }
}
+ uint32_t indices_key;
+
/* should this elem be in the table? */
- decoder_space_usage = grpc_mdelem_get_size_in_hpack_table(elem);
- should_add_elem = decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
- c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
- c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
+ size_t decoder_space_usage =
+ grpc_mdelem_get_size_in_hpack_table(elem, st->use_true_binary_metadata);
+ bool should_add_elem = elem_interned &&
+ decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
+ c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
+ c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
+ void (*maybe_add)(grpc_exec_ctx *, grpc_chttp2_hpack_compressor *,
+ grpc_mdelem, size_t) =
+ should_add_elem ? add_elem : add_nothing;
+ void (*emit)(grpc_exec_ctx *, grpc_chttp2_hpack_compressor *, uint32_t,
+ grpc_mdelem, framer_state *) =
+ should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx;
/* no hits for the elem... maybe there's a key? */
-
indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)];
if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_2(key_hash)],
GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
- if (should_add_elem) {
- emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
- add_elem(exec_ctx, c, elem);
- return;
- } else {
- emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
- return;
- }
- GPR_UNREACHABLE_CODE(return );
+ emit(exec_ctx, c, dynidx(c, indices_key), elem, st);
+ maybe_add(exec_ctx, c, elem, decoder_space_usage);
+ return;
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
@@ -487,40 +563,32 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
- if (should_add_elem) {
- emit_lithdr_incidx(c, dynidx(c, indices_key), elem, st);
- add_elem(exec_ctx, c, elem);
- return;
- } else {
- emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
- return;
- }
- GPR_UNREACHABLE_CODE(return );
+ emit(exec_ctx, c, dynidx(c, indices_key), elem, st);
+ maybe_add(exec_ctx, c, elem, decoder_space_usage);
+ return;
}
/* no elem, key in the table... fall back to literal emission */
-
- if (should_add_elem) {
- emit_lithdr_incidx_v(c, elem, st);
- add_elem(exec_ctx, c, elem);
- return;
- } else {
- emit_lithdr_noidx_v(c, elem, st);
- return;
- }
- GPR_UNREACHABLE_CODE(return );
+ bool should_add_key =
+ !elem_interned && decoder_space_usage < MAX_DECODER_SPACE_USAGE;
+ emit = (should_add_elem || should_add_key) ? emit_lithdr_incidx_v
+ : emit_lithdr_noidx_v;
+ maybe_add =
+ should_add_elem ? add_elem : (should_add_key ? add_key : add_nothing);
+ emit(exec_ctx, c, 0, elem, st);
+ maybe_add(exec_ctx, c, elem, decoder_space_usage);
}
#define STRLEN_LIT(x) (sizeof(x) - 1)
#define TIMEOUT_KEY "grpc-timeout"
static void deadline_enc(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
+ grpc_chttp2_hpack_compressor *c, grpc_millis deadline,
framer_state *st) {
char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_mdelem mdelem;
- grpc_http2_encode_timeout(
- gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
+ grpc_http2_encode_timeout(deadline - grpc_exec_ctx_now(exec_ctx),
+ timeout_str);
mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT,
grpc_slice_from_copied_string(timeout_str));
hpack_enc(exec_ctx, c, mdelem, st);
@@ -536,7 +604,7 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
c->max_table_elems = c->cap_table_elems;
c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->table_elem_size =
- gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
+ (uint16_t *)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
memset(c->table_elem_size, 0,
sizeof(*c->table_elem_size) * c->cap_table_elems);
for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
@@ -564,7 +632,8 @@ void grpc_chttp2_hpack_compressor_set_max_usable_size(
}
static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
- uint16_t *table_elem_size = gpr_malloc(sizeof(*table_elem_size) * new_cap);
+ uint16_t *table_elem_size =
+ (uint16_t *)gpr_malloc(sizeof(*table_elem_size) * new_cap);
uint32_t i;
memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
@@ -639,8 +708,8 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) {
hpack_enc(exec_ctx, c, l->md, &st);
}
- gpr_timespec deadline = metadata->deadline;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
+ grpc_millis deadline = metadata->deadline;
+ if (deadline != GRPC_MILLIS_INF_FUTURE) {
deadline_enc(exec_ctx, c, deadline, &st);
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 271192f894..16316b63f7 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -34,6 +34,10 @@
/* maximum table size we'll actually use */
#define GRPC_CHTTP2_HPACKC_MAX_TABLE_SIZE (1024 * 1024)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
uint32_t filter_elems_sum;
uint32_t max_table_size;
@@ -91,4 +95,8 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
const grpc_encode_header_options *options,
grpc_slice_buffer *outbuf);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
index c21d76ba71..7c17229122 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
@@ -30,8 +30,10 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/http2_errors.h"
@@ -649,9 +651,14 @@ static const uint8_t inverse_base64[256] = {
/* emission helpers */
static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
grpc_mdelem md, int add_to_table) {
- if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(md)) {
+ if (GRPC_TRACER_ON(grpc_http_trace)) {
char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
- char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char *v = NULL;
+ if (grpc_is_binary_header(GRPC_MDKEY(md))) {
+ v = grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX);
+ } else {
+ v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ }
gpr_log(
GPR_DEBUG,
"Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
@@ -777,8 +784,7 @@ static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
return parse_stream_dep1(exec_ctx, p, cur + 1, end);
}
-/* emit an indexed field; for now just logs it to console; jumps to
- begin the next field on completion */
+/* emit an indexed field; jumps to begin the next field on completion */
static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
@@ -792,6 +798,7 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
GRPC_MDELEM_REF(md);
+ GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx);
grpc_error *err = on_hdr(exec_ctx, p, md, 0);
if (err != GRPC_ERROR_NONE) return err;
return parse_begin(exec_ctx, p, cur, end);
@@ -820,14 +827,14 @@ static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
return parse_value0(exec_ctx, p, cur + 1, end);
}
-/* finish a literal header with incremental indexing: just log, and jump to '
- begin */
+/* finish a literal header with incremental indexing */
static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@@ -842,6 +849,7 @@ static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@@ -898,6 +906,7 @@ static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@@ -912,6 +921,7 @@ static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@@ -968,6 +978,7 @@ static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
const uint8_t *end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
@@ -982,6 +993,7 @@ static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
const uint8_t *cur,
const uint8_t *end) {
+ GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx);
grpc_error *err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
@@ -1284,7 +1296,7 @@ static void append_bytes(grpc_chttp2_hpack_parser_string *str,
GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX);
str->data.copied.capacity = (uint32_t)(str->data.copied.length + length);
str->data.copied.str =
- gpr_realloc(str->data.copied.str, str->data.copied.capacity);
+ (char *)gpr_realloc(str->data.copied.str, str->data.copied.capacity);
}
memcpy(str->data.copied.str + str->data.copied.length, data, length);
GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length);
@@ -1310,9 +1322,11 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
/* 'true-binary' case */
++cur;
p->binary = NOT_BINARY;
+ GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx);
append_bytes(str, cur, (size_t)(end - cur));
return GRPC_ERROR_NONE;
}
+ GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx);
/* fallthrough */
b64_byte0:
case B64_BYTE0:
@@ -1510,6 +1524,7 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser_string *str) {
if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen &&
p->current_slice_refcount != NULL) {
+ GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
str->copied = false;
str->data.referenced.refcount = p->current_slice_refcount;
str->data.referenced.data.refcounted.bytes = (uint8_t *)cur;
@@ -1523,6 +1538,20 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
p->parsing.str = str;
p->huff_state = 0;
p->binary = binary;
+ switch (p->binary) {
+ case NOT_BINARY:
+ if (p->huff) {
+ GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx);
+ } else {
+ GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
+ }
+ break;
+ case BINARY_BEGIN:
+ /* stats incremented later: don't know true binary or not */
+ break;
+ default:
+ abort();
+ }
return parse_string(exec_ctx, p, cur, end);
}
@@ -1643,13 +1672,14 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
- grpc_chttp2_stream *s = sp;
+ grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
if (!s->write_closed) {
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
- grpc_chttp2_initiate_write(exec_ctx, t, "force_rst_stream");
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
@@ -1659,16 +1689,12 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_metadata_batch *initial_metadata) {
- if (initial_metadata->idx.named.content_encoding != NULL) {
- grpc_slice content_encoding =
- GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md);
- if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) {
- if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) {
- s->stream_compression_recv_enabled = true;
- s->decompressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
- grpc_slice_buffer_init(s->decompressed_data_buffer);
- }
- }
+ if (initial_metadata->idx.named.content_encoding == NULL ||
+ grpc_stream_compression_method_parse(
+ GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false,
+ &s->stream_decompression_method) == 0) {
+ s->stream_decompression_method =
+ GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
}
}
@@ -1677,7 +1703,7 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
- grpc_chttp2_hpack_parser *parser = hpack_parser;
+ grpc_chttp2_hpack_parser *parser = (grpc_chttp2_hpack_parser *)hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (s != NULL) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 8fbc6a602b..52014175a0 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -27,6 +27,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/metadata.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
@@ -111,4 +115,8 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.cc
index 944d778011..82c284b36e 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.cc
@@ -28,7 +28,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/support/murmur_hash.h"
-extern grpc_tracer_flag grpc_http_trace;
+extern "C" grpc_tracer_flag grpc_http_trace;
static struct {
const char *key;
@@ -173,7 +173,7 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
tbl->max_entries = tbl->cap_entries =
entries_for_bytes(tbl->current_table_bytes);
- tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
+ tbl->ents = (grpc_mdelem *)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
@@ -228,7 +228,7 @@ static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
}
static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
- grpc_mdelem *ents = gpr_malloc(sizeof(*ents) * new_cap);
+ grpc_mdelem *ents = (grpc_mdelem *)gpr_malloc(sizeof(*ents) * new_cap);
uint32_t i;
for (i = 0; i < tbl->num_ents; i++) {
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.h b/src/core/ext/transport/chttp2/transport/hpack_table.h
index 2cf8f68506..a3ce2730a8 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.h
@@ -24,6 +24,10 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/transport/metadata.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* HPACK header table */
/* last index in the static table */
@@ -94,4 +98,8 @@ typedef struct {
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
const grpc_chttp2_hptbl *tbl, grpc_mdelem md);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H */
diff --git a/src/core/ext/transport/chttp2/transport/http2_settings.c b/src/core/ext/transport/chttp2/transport/http2_settings.cc
index 46b7c0c49c..46b7c0c49c 100644
--- a/src/core/ext/transport/chttp2/transport/http2_settings.c
+++ b/src/core/ext/transport/chttp2/transport/http2_settings.cc
diff --git a/src/core/ext/transport/chttp2/transport/http2_settings.h b/src/core/ext/transport/chttp2/transport/http2_settings.h
index 706dfc3139..0f76106dce 100644
--- a/src/core/ext/transport/chttp2/transport/http2_settings.h
+++ b/src/core/ext/transport/chttp2/transport/http2_settings.h
@@ -35,6 +35,10 @@ typedef enum {
} grpc_chttp2_setting_id;
#define GRPC_CHTTP2_NUM_SETTINGS 7
+
+#ifdef __cplusplus
+extern "C" {
+#endif
extern const uint16_t grpc_setting_id_to_wire_id[];
bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);
@@ -56,4 +60,8 @@ typedef struct {
extern const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/huffsyms.c b/src/core/ext/transport/chttp2/transport/huffsyms.cc
index f28d8cc30a..f28d8cc30a 100644
--- a/src/core/ext/transport/chttp2/transport/huffsyms.c
+++ b/src/core/ext/transport/chttp2/transport/huffsyms.cc
diff --git a/src/core/ext/transport/chttp2/transport/huffsyms.h b/src/core/ext/transport/chttp2/transport/huffsyms.h
index 2e2a5dacae..4002706bc0 100644
--- a/src/core/ext/transport/chttp2/transport/huffsyms.h
+++ b/src/core/ext/transport/chttp2/transport/huffsyms.h
@@ -19,6 +19,10 @@
#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HUFFSYMS_H
#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HUFFSYMS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* HPACK static huffman table */
#define GRPC_CHTTP2_NUM_HUFFSYMS 257
@@ -30,4 +34,8 @@ typedef struct {
extern const grpc_chttp2_huffsym grpc_chttp2_huffsyms[GRPC_CHTTP2_NUM_HUFFSYMS];
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HUFFSYMS_H */
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index cf0a9ca920..187ce0ea87 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -29,7 +29,7 @@ void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) {
buffer->arena = arena;
grpc_metadata_batch_init(&buffer->batch);
- buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE;
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
@@ -42,8 +42,9 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
return grpc_metadata_batch_add_tail(
- exec_ctx, &buffer->batch,
- gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)), elem);
+ exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc(
+ buffer->arena, sizeof(grpc_linked_mdelem)),
+ elem);
}
grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
@@ -61,7 +62,7 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
}
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
- grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline) {
buffer->batch.deadline = deadline;
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index a951d8764c..a0e01f2c4d 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -21,6 +21,10 @@
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
gpr_arena *arena;
grpc_metadata_batch batch;
@@ -43,6 +47,10 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_mdelem elem) GRPC_MUST_USE_RESULT;
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
- grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
+ grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H */
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 3c41a8958f..9e0796e820 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -22,6 +22,7 @@
#include <assert.h>
#include <stdbool.h>
+#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame.h"
#include "src/core/ext/transport/chttp2/transport/frame_data.h"
#include "src/core/ext/transport/chttp2/transport/frame_goaway.h"
@@ -37,11 +38,14 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/transport/bdp_estimator.h"
+#include "src/core/lib/support/manual_constructor.h"
#include "src/core/lib/transport/connectivity_state.h"
-#include "src/core/lib/transport/pid_controller.h"
#include "src/core/lib/transport/transport_impl.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* streams are kept in various linked lists depending on what things need to
happen to them... this enum labels each list */
typedef enum {
@@ -62,12 +66,6 @@ typedef enum {
} grpc_chttp2_write_state;
typedef enum {
- GRPC_CHTTP2_PING_ON_NEXT_WRITE = 0,
- GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE,
- GRPC_CHTTP2_PING_TYPE_COUNT /* must be last */
-} grpc_chttp2_ping_type;
-
-typedef enum {
GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY,
GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT,
} grpc_chttp2_optimization_target;
@@ -79,27 +77,53 @@ typedef enum {
GRPC_CHTTP2_PCL_COUNT /* must be last */
} grpc_chttp2_ping_closure_list;
+typedef enum {
+ GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE,
+ GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA,
+ GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS,
+ GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT,
+ GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM,
+ GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API,
+ GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL,
+ GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING,
+ GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
+ GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING,
+ GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED,
+ GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE,
+ GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM,
+} grpc_chttp2_initiate_write_reason;
+
+const char *grpc_chttp2_initiate_write_reason_string(
+ grpc_chttp2_initiate_write_reason reason);
+
typedef struct {
grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT];
uint64_t inflight_id;
} grpc_chttp2_ping_queue;
typedef struct {
- gpr_timespec min_time_between_pings;
int max_pings_without_data;
int max_ping_strikes;
- gpr_timespec min_ping_interval_without_data;
+ grpc_millis min_sent_ping_interval_without_data;
+ grpc_millis min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy;
typedef struct {
- gpr_timespec last_ping_sent_time;
+ grpc_millis last_ping_sent_time;
int pings_before_data_required;
grpc_timer delayed_ping_timer;
bool is_delayed_ping_timer_set;
} grpc_chttp2_repeated_ping_state;
typedef struct {
- gpr_timespec last_ping_recv_time;
+ grpc_millis last_ping_recv_time;
int ping_strikes;
} grpc_chttp2_server_ping_recv_state;
@@ -213,45 +237,6 @@ typedef enum {
GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED,
} grpc_chttp2_keepalive_state;
-typedef struct {
- /** initial window change. This is tracked as we parse settings frames from
- * the remote peer. If there is a positive delta, then we will make all
- * streams readable since they may have become unstalled */
- int64_t initial_window_update;
-
- /** Our bookkeeping for the remote peer's available window */
- int64_t remote_window;
-
- /** calculating what we should give for local window:
- we track the total amount of flow control over initial window size
- across all streams: this is data that we want to receive right now (it
- has an outstanding read)
- and the total amount of flow control under initial window size across all
- streams: this is data we've read early
- we want to adjust incoming_window such that:
- incoming_window = total_over - max(bdp - total_under, 0) */
- int64_t announced_stream_total_over_incoming_window;
- int64_t announced_stream_total_under_incoming_window;
-
- /** This is out window according to what we have sent to our remote peer. The
- * difference between this and target window is what we use to decide when
- * to send WINDOW_UPDATE frames. */
- int64_t announced_window;
-
- /** should we probe bdp? */
- bool enable_bdp_probe;
-
- /* bdp estimation */
- grpc_bdp_estimator bdp_estimator;
-
- /* pid controller */
- grpc_pid_controller pid_controller;
- gpr_timespec last_pid_update;
-
- // pointer back to transport for tracing
- const grpc_chttp2_transport *t;
-} grpc_chttp2_transport_flowctl;
-
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
@@ -262,11 +247,15 @@ struct grpc_chttp2_transport {
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
+ /** is this the first write in a series of writes?
+ set when we initiate writing from idle, cleared when we
+ initiate writing from writing+more */
+ bool is_first_write_in_batch;
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
- uint8_t closed;
+ grpc_error *closed_with_error;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
@@ -308,7 +297,7 @@ struct grpc_chttp2_transport {
/** hpack encoding */
grpc_chttp2_hpack_compressor hpack_compressor;
/** is this a client? */
- uint8_t is_client;
+ bool is_client;
/** data to write next write */
grpc_slice_buffer qbuf;
@@ -318,14 +307,14 @@ struct grpc_chttp2_transport {
uint32_t write_buffer_size;
/** have we seen a goaway */
- uint8_t seen_goaway;
+ bool seen_goaway;
/** have we sent a goaway */
grpc_chttp2_sent_goaway_state sent_goaway_state;
/** are the local settings dirty and need to be sent? */
- uint8_t dirtied_local_settings;
+ bool dirtied_local_settings;
/** have local settings been sent? */
- uint8_t sent_local_settings;
+ bool sent_local_settings;
/** bitmask of setting indexes to send out */
uint32_t force_send_settings;
/** settings values */
@@ -339,7 +328,7 @@ struct grpc_chttp2_transport {
uint32_t last_new_stream_id;
/** ping queues for various ping insertion points */
- grpc_chttp2_ping_queue ping_queues[GRPC_CHTTP2_PING_TYPE_COUNT];
+ grpc_chttp2_ping_queue ping_queue;
grpc_chttp2_repeated_ping_policy ping_policy;
grpc_chttp2_repeated_ping_state ping_state;
uint64_t ping_ctr; /* unique id for pings */
@@ -363,7 +352,12 @@ struct grpc_chttp2_transport {
/** parser for goaway frames */
grpc_chttp2_goaway_parser goaway_parser;
- grpc_chttp2_transport_flowctl flow_control;
+ grpc_core::ManualConstructor<grpc_core::chttp2::TransportFlowControl>
+ flow_control;
+ /** initial window change. This is tracked as we parse settings frames from
+ * the remote peer. If there is a positive delta, then we will make all
+ * streams readable since they may have become unstalled */
+ int64_t initial_window_update = 0;
/* deframing */
grpc_chttp2_deframe_transport_state deframe_state;
@@ -390,6 +384,7 @@ struct grpc_chttp2_transport {
grpc_chttp2_write_cb *write_cb_pool;
/* bdp estimator */
+ grpc_closure next_bdp_ping_timer_expired_locked;
grpc_closure start_bdp_ping_locked;
grpc_closure finish_bdp_ping_locked;
@@ -410,6 +405,10 @@ struct grpc_chttp2_transport {
/** destructive cleanup closure */
grpc_closure destructive_reclaimer_locked;
+ /* next bdp ping timer */
+ bool have_next_bdp_ping_timer;
+ grpc_timer next_bdp_ping_timer;
+
/* keep-alive ping support */
/** Closure to initialize a keepalive ping */
grpc_closure init_keepalive_ping_locked;
@@ -424,9 +423,9 @@ struct grpc_chttp2_transport {
/** watchdog to kill the transport when waiting for the keepalive ping */
grpc_timer keepalive_watchdog_timer;
/** time duration in between pings */
- gpr_timespec keepalive_time;
+ grpc_millis keepalive_time;
/** grace period for a ping to complete before watchdog kicks in */
- gpr_timespec keepalive_timeout;
+ grpc_millis keepalive_timeout;
/** if keepalive pings are allowed when there's no outstanding streams */
bool keepalive_permit_without_calls;
/** keep-alive state machine state */
@@ -440,25 +439,6 @@ typedef enum {
GPRC_METADATA_PUBLISHED_AT_CLOSE
} grpc_published_metadata_method;
-typedef struct {
- /** window available for us to send to peer, over or under the initial window
- * size of the transport... ie:
- * remote_window = remote_window_delta + transport.initial_window_size */
- int64_t remote_window_delta;
-
- /** window available for peer to send to us (as a delta on
- * transport.initial_window_size)
- * local_window = local_window_delta + transport.initial_window_size */
- int64_t local_window_delta;
-
- /** window available for peer to send to us over this stream that we have
- * announced to the peer */
- int64_t announced_window_delta;
-
- // read only pointer back to stream for data
- const grpc_chttp2_stream *s;
-} grpc_chttp2_stream_flowctl;
-
struct grpc_chttp2_stream {
grpc_chttp2_transport *t;
grpc_stream_refcount *refcount;
@@ -483,6 +463,7 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
+ int64_t flow_controlled_bytes_flowed;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
@@ -509,6 +490,8 @@ struct grpc_chttp2_stream {
/** Are we buffering writes on this stream? If yes, we won't become writable
until there's enough queued up in the flow_controlled_buffer */
bool write_buffering;
+ /** Has trailing metadata been received. */
+ bool received_trailing_metadata;
/** the error that resulted in this stream being read-closed */
grpc_error *read_closed_error;
@@ -532,7 +515,7 @@ struct grpc_chttp2_stream {
grpc_error *byte_stream_error; /* protected by t combiner */
bool received_last_frame; /* protected by t combiner */
- gpr_timespec deadline;
+ grpc_millis deadline;
/** saw some stream level error */
grpc_error *forced_close_error;
@@ -549,33 +532,37 @@ struct grpc_chttp2_stream {
bool sent_initial_metadata;
bool sent_trailing_metadata;
- grpc_chttp2_stream_flowctl flow_control;
+ grpc_core::ManualConstructor<grpc_core::chttp2::StreamFlowControl>
+ flow_control;
grpc_slice_buffer flow_controlled_buffer;
+ grpc_chttp2_write_cb *on_flow_controlled_cbs;
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
- /** Whether stream compression send is enabled */
- bool stream_compression_recv_enabled;
- /** Whether stream compression recv is enabled */
- bool stream_compression_send_enabled;
- /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
- */
- bool unprocessed_incoming_frames_decompressed;
+ /* Stream compression method to be used. */
+ grpc_stream_compression_method stream_compression_method;
+ /* Stream decompression method to be used. */
+ grpc_stream_compression_method stream_decompression_method;
/** Stream compression decompress context */
grpc_stream_compression_context *stream_decompression_ctx;
/** Stream compression compress context */
grpc_stream_compression_context *stream_compression_ctx;
/** Buffer storing data that is compressed but not sent */
- grpc_slice_buffer *compressed_data_buffer;
+ grpc_slice_buffer compressed_data_buffer;
/** Amount of uncompressed bytes sent out when compressed_data_buffer is
* emptied */
size_t uncompressed_data_size;
/** Temporary buffer storing decompressed data */
- grpc_slice_buffer *decompressed_data_buffer;
+ grpc_slice_buffer decompressed_data_buffer;
+ /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
+ */
+ bool unprocessed_incoming_frames_decompressed;
+ /** gRPC header bytes that are already decompressed */
+ size_t decompressed_header_bytes;
};
/** Transport writing call flow:
@@ -591,12 +578,16 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, const char *reason);
+ grpc_chttp2_transport *t,
+ grpc_chttp2_initiate_write_reason reason);
-typedef enum {
- GRPC_CHTTP2_NOTHING_TO_WRITE,
- GRPC_CHTTP2_PARTIAL_WRITE,
- GRPC_CHTTP2_FULL_WRITE,
+typedef struct {
+ /** are we writing? */
+ bool writing;
+ /** if writing: was it a complete flush (false) or a partial flush (true) */
+ bool partial;
+ /** did we queue any completions as part of beginning the write */
+ bool early_results_scheduled;
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
@@ -616,8 +607,8 @@ bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
returns non-zero if there was a stream available */
bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s);
-bool grpc_chttp2_list_remove_writable_stream(
- grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT;
+bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s);
@@ -653,76 +644,10 @@ bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
/********* Flow Control ***************/
-// we have sent data on the wire
-void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- int64_t size);
-
-// we have received data from the wire
-grpc_error *grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- int64_t incoming_frame_size);
-
-// returns an announce if we should send a transport update to our peer,
-// else returns zero
-uint32_t grpc_chttp2_flowctl_maybe_send_transport_update(
- grpc_chttp2_transport_flowctl *tfc);
-
-// returns an announce if we should send a stream update to our peer, else
-// returns zero
-uint32_t grpc_chttp2_flowctl_maybe_send_stream_update(
- grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc);
-
-// we have received a WINDOW_UPDATE frame for a transport
-void grpc_chttp2_flowctl_recv_transport_update(
- grpc_chttp2_transport_flowctl *tfc, uint32_t size);
-
-// we have received a WINDOW_UPDATE frame for a stream
-void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- uint32_t size);
-
-// the application is asking for a certain amount of bytes
-void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc,
- size_t max_size_hint,
- size_t have_already);
-
-void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl *tfc,
- grpc_chttp2_stream_flowctl *sfc);
-
-typedef enum {
- // Nothing to be done.
- GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED = 0,
- // Initiate a write to update the initial window immediately.
- GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY,
- // Push the flow control update into a send buffer, to be sent
- // out the next time a write is initiated.
- GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE,
-} grpc_chttp2_flowctl_urgency;
-
-typedef struct {
- grpc_chttp2_flowctl_urgency send_stream_update;
- grpc_chttp2_flowctl_urgency send_transport_update;
- grpc_chttp2_flowctl_urgency send_setting_update;
- uint32_t initial_window_size;
- uint32_t max_frame_size;
- bool need_ping;
-} grpc_chttp2_flowctl_action;
-
-// Reads the flow control data and returns and actionable struct that will tell
-// chttp2 exactly what it needs to do
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action(
- grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc);
-
-grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action(
- grpc_chttp2_transport_flowctl *tfc);
-
// Takes in a flow control action and performs all the needed operations.
-void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_flowctl_action action,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
+void grpc_chttp2_act_on_flowctl_action(
+ grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
+ grpc_chttp2_transport *t, grpc_chttp2_stream *s);
/********* End of Flow Control ***************/
@@ -756,16 +681,6 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
-#ifndef NDEBUG
-#define GRPC_FLOW_CONTROL_IF_TRACING(stmt) \
- if (!(GRPC_TRACER_ON(grpc_flowctl_trace))) \
- ; \
- else \
- stmt
-#else
-#define GRPC_FLOW_CONTROL_IF_TRACING(stmt)
-#endif
-
#define GRPC_CHTTP2_IF_TRACING(stmt) \
if (!(GRPC_TRACER_ON(grpc_http_trace))) \
; \
@@ -838,22 +753,11 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
-typedef enum {
- /* don't initiate a transport write, but piggyback on the next one */
- GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
- /* initiate a covered write */
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
- /* initiate an uncovered write */
- GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED
-} grpc_chttp2_stream_write_type;
-
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
-void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- grpc_chttp2_stream_write_type type,
- const char *reason);
+void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,
@@ -878,4 +782,8 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
bool is_client);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.cc
index 18d163ee98..efa5791d3f 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.cc
@@ -106,7 +106,8 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
return err;
}
++cur;
- ++t->deframe_state;
+ t->deframe_state =
+ (grpc_chttp2_deframe_transport_state)(1 + (int)t->deframe_state);
}
if (cur == end) {
return GRPC_ERROR_NONE;
@@ -354,13 +355,15 @@ static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
grpc_error *err = GRPC_ERROR_NONE;
- err = grpc_chttp2_flowctl_recv_data(&t->flow_control,
- s == NULL ? NULL : &s->flow_control,
- t->incoming_frame_size);
- grpc_chttp2_act_on_flowctl_action(
- exec_ctx, grpc_chttp2_flowctl_get_action(
- &t->flow_control, s == NULL ? NULL : &s->flow_control),
- t, s);
+ grpc_core::chttp2::FlowControlAction action;
+ if (s == nullptr) {
+ err = t->flow_control->RecvData(t->incoming_frame_size);
+ action = t->flow_control->MakeAction();
+ } else {
+ err = s->flow_control->RecvData(t->incoming_frame_size);
+ action = s->flow_control->MakeAction();
+ }
+ grpc_chttp2_act_on_flowctl_action(exec_ctx, action, t, s);
if (err != GRPC_ERROR_NONE) {
goto error_handler;
}
@@ -382,6 +385,9 @@ error_handler:
/* t->parser = grpc_chttp2_data_parser_parse;*/
t->parser = grpc_chttp2_data_parser_parse;
t->parser_data = &s->data_parser;
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
@@ -402,7 +408,7 @@ static void free_timeout(void *p) { gpr_free(p); }
static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
@@ -426,25 +432,29 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
- gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
- gpr_timespec timeout;
- if (cached_timeout == NULL) {
- /* not already parsed: parse it now, and store the result away */
- cached_timeout = gpr_malloc(sizeof(gpr_timespec));
- if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
+ grpc_millis *cached_timeout =
+ static_cast<grpc_millis *>(grpc_mdelem_get_user_data(md, free_timeout));
+ grpc_millis timeout;
+ if (cached_timeout != NULL) {
+ timeout = *cached_timeout;
+ } else {
+ if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), &timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
gpr_free(val);
- *cached_timeout = gpr_inf_future(GPR_TIMESPAN);
+ timeout = GRPC_MILLIS_INF_FUTURE;
+ }
+ if (GRPC_MDELEM_IS_INTERNED(md)) {
+ /* store the result */
+ cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
+ *cached_timeout = timeout;
+ grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
- timeout = *cached_timeout;
- grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
- } else {
- timeout = *cached_timeout;
}
- grpc_chttp2_incoming_metadata_buffer_set_deadline(
- &s->metadata_buffer[0],
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), timeout));
+ if (timeout != GRPC_MILLIS_INF_FUTURE) {
+ grpc_chttp2_incoming_metadata_buffer_set_deadline(
+ &s->metadata_buffer[0], grpc_exec_ctx_now(exec_ctx) + timeout);
+ }
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
@@ -482,7 +492,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = tp;
+ grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
@@ -557,6 +567,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
+
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == NULL) {
@@ -623,6 +637,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
*s->trailing_metadata_available = true;
}
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
} else {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
t->hpack_parser.on_header = on_initial_header;
@@ -631,6 +646,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
case 1:
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
+ s->received_trailing_metadata = true;
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.cc
index 7cc85dea9c..9f731a397f 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.cc
@@ -16,10 +16,32 @@
*
*/
+#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include <grpc/support/log.h>
+static const char *stream_list_id_string(grpc_chttp2_stream_list_id id) {
+ switch (id) {
+ case GRPC_CHTTP2_LIST_WRITABLE:
+ return "writable";
+ case GRPC_CHTTP2_LIST_WRITING:
+ return "writing";
+ case GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT:
+ return "stalled_by_transport";
+ case GRPC_CHTTP2_LIST_STALLED_BY_STREAM:
+ return "stalled_by_stream";
+ case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY:
+ return "waiting_for_concurrency";
+ case STREAM_LIST_COUNT:
+ GPR_UNREACHABLE_CODE(return "unknown");
+ }
+ GPR_UNREACHABLE_CODE(return "unknown");
+}
+
+grpc_tracer_flag grpc_trace_http2_stream_state =
+ GRPC_TRACER_INITIALIZER(false, "http2_stream_state");
+
/* core list management */
static bool stream_list_empty(grpc_chttp2_transport *t,
@@ -44,6 +66,10 @@ static bool stream_list_pop(grpc_chttp2_transport *t,
s->included[id] = 0;
}
*stream = s;
+ if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
return s != 0;
}
@@ -62,6 +88,10 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
} else {
t->lists[id].tail = s->links[id].prev;
}
+ if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
}
static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
@@ -90,6 +120,10 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
}
t->lists[id].tail = s;
s->included[id] = 1;
+ if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
}
static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
@@ -150,17 +184,12 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "stream %u stalled by transport", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
- bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
- GRPC_FLOW_CONTROL_IF_TRACING(if (ret) gpr_log(
- GPR_DEBUG, "stream %u un-stalled by transport", (*s)->id));
- return ret;
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
@@ -170,23 +199,15 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "stream %u stalled by stream", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
- bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
- GRPC_FLOW_CONTROL_IF_TRACING(
- if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", (*s)->id));
- return ret;
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- bool ret = stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
- GRPC_FLOW_CONTROL_IF_TRACING(
- if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", s->id));
- return ret;
+ return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.c b/src/core/ext/transport/chttp2/transport/stream_map.cc
index e2f10bc208..d6079a9a33 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.c
+++ b/src/core/ext/transport/chttp2/transport/stream_map.cc
@@ -27,8 +27,8 @@
void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
size_t initial_capacity) {
GPR_ASSERT(initial_capacity > 1);
- map->keys = gpr_malloc(sizeof(uint32_t) * initial_capacity);
- map->values = gpr_malloc(sizeof(void *) * initial_capacity);
+ map->keys = (uint32_t *)gpr_malloc(sizeof(uint32_t) * initial_capacity);
+ map->values = (void **)gpr_malloc(sizeof(void *) * initial_capacity);
map->count = 0;
map->free = 0;
map->capacity = initial_capacity;
@@ -72,8 +72,10 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
/* resize when less than 25% of the table is free, because compaction
won't help much */
map->capacity = capacity = 3 * capacity / 2;
- map->keys = keys = gpr_realloc(keys, capacity * sizeof(uint32_t));
- map->values = values = gpr_realloc(values, capacity * sizeof(void *));
+ map->keys = keys =
+ (uint32_t *)gpr_realloc(keys, capacity * sizeof(uint32_t));
+ map->values = values =
+ (void **)gpr_realloc(values, capacity * sizeof(void *));
}
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.h b/src/core/ext/transport/chttp2/transport/stream_map.h
index 30c50ba32e..7ab6a4f5ed 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.h
+++ b/src/core/ext/transport/chttp2/transport/stream_map.h
@@ -23,6 +23,10 @@
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Data structure to map a uint32_t to a data object (represented by a void*)
Represented as a sorted array of keys, and a corresponding array of values.
@@ -65,4 +69,8 @@ void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
void *value),
void *user_data);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STREAM_MAP_H */
diff --git a/src/core/ext/transport/chttp2/transport/varint.c b/src/core/ext/transport/chttp2/transport/varint.cc
index 0d94ddcbc3..0d94ddcbc3 100644
--- a/src/core/ext/transport/chttp2/transport/varint.c
+++ b/src/core/ext/transport/chttp2/transport/varint.cc
diff --git a/src/core/ext/transport/chttp2/transport/varint.h b/src/core/ext/transport/chttp2/transport/varint.h
index 5a2b670f06..d3a9d902c4 100644
--- a/src/core/ext/transport/chttp2/transport/varint.h
+++ b/src/core/ext/transport/chttp2/transport/varint.h
@@ -21,6 +21,10 @@
#include <grpc/support/port_platform.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Helpers for hpack varint encoding */
/* length of a value that needs varint tail encoding (it's bigger than can be
@@ -57,4 +61,8 @@ void grpc_chttp2_hpack_write_varint_tail(uint32_t tail_value, uint8_t* target,
} \
} while (0)
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_VARINT_H */
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
deleted file mode 100644
index 711938b278..0000000000
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/ext/transport/chttp2/transport/internal.h"
-
-#include <limits.h>
-
-#include <grpc/support/log.h>
-
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/slice/slice_internal.h"
-#include "src/core/lib/transport/http2_errors.h"
-
-static void add_to_write_list(grpc_chttp2_write_cb **list,
- grpc_chttp2_write_cb *cb) {
- cb->next = *list;
- *list = cb;
-}
-
-static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
- grpc_error *error) {
- grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
- "finish_write_cb");
- cb->next = t->write_cb_pool;
- t->write_cb_pool = cb;
-}
-
-static void collapse_pings_from_into(grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type,
- grpc_chttp2_ping_queue *pq) {
- for (size_t i = 0; i < GRPC_CHTTP2_PCL_COUNT; i++) {
- grpc_closure_list_move(&t->ping_queues[ping_type].lists[i], &pq->lists[i]);
- }
-}
-
-static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_ping_type ping_type) {
- grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type];
- if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
- /* no ping needed: wait */
- return;
- }
- if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
- /* ping already in-flight: wait */
- if (GRPC_TRACER_ON(grpc_http_trace) ||
- GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "Ping delayed [%p]: already pinging", t->peer_string);
- }
- return;
- }
- if (t->ping_state.pings_before_data_required == 0 &&
- t->ping_policy.max_pings_without_data != 0) {
- /* need to send something of substance before sending a ping again */
- if (GRPC_TRACER_ON(grpc_http_trace) ||
- GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
- t->peer_string, t->ping_state.pings_before_data_required,
- t->ping_policy.max_pings_without_data);
- }
- return;
- }
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec elapsed = gpr_time_sub(now, t->ping_state.last_ping_sent_time);
- /*gpr_log(GPR_DEBUG, "elapsed:%d.%09d min:%d.%09d", (int)elapsed.tv_sec,
- elapsed.tv_nsec, (int)t->ping_policy.min_time_between_pings.tv_sec,
- (int)t->ping_policy.min_time_between_pings.tv_nsec);*/
- if (gpr_time_cmp(elapsed, t->ping_policy.min_time_between_pings) < 0) {
- /* not enough elapsed time between successive pings */
- if (GRPC_TRACER_ON(grpc_http_trace) ||
- GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG,
- "Ping delayed [%p]: not enough time elapsed since last ping",
- t->peer_string);
- }
- if (!t->ping_state.is_delayed_ping_timer_set) {
- t->ping_state.is_delayed_ping_timer_set = true;
- grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
- gpr_time_add(t->ping_state.last_ping_sent_time,
- t->ping_policy.min_time_between_pings),
- &t->retry_initiate_ping_locked,
- gpr_now(GPR_CLOCK_MONOTONIC));
- }
- return;
- }
- /* coalesce equivalent pings into this one */
- switch (ping_type) {
- case GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE:
- collapse_pings_from_into(t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, pq);
- break;
- case GRPC_CHTTP2_PING_ON_NEXT_WRITE:
- break;
- case GRPC_CHTTP2_PING_TYPE_COUNT:
- GPR_UNREACHABLE_CODE(break);
- }
- pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type;
- t->ping_ctr++;
- GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
- grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
- &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
- grpc_slice_buffer_add(&t->outbuf,
- grpc_chttp2_ping_create(false, pq->inflight_id));
- t->ping_state.last_ping_sent_time = now;
- t->ping_state.pings_before_data_required -=
- (t->ping_state.pings_before_data_required != 0);
-}
-
-static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, int64_t send_bytes,
- grpc_chttp2_write_cb **list, grpc_error *error) {
- grpc_chttp2_write_cb *cb = *list;
- *list = NULL;
- s->flow_controlled_bytes_written += send_bytes;
- while (cb) {
- grpc_chttp2_write_cb *next = cb->next;
- if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
- finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
- } else {
- add_to_write_list(list, cb);
- }
- cb = next;
- }
- GRPC_ERROR_UNREF(error);
-}
-
-static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
- gpr_atm count;
- do {
- count = gpr_atm_acq_load(&r->count);
- if (count == 0) return false;
- } while (!gpr_atm_rel_cas(&r->count, count, count + 1));
- return true;
-}
-
-/* How many bytes would we like to put on the wire during a single syscall */
-static uint32_t target_write_size(grpc_chttp2_transport *t) {
- return 1024 * 1024;
-}
-
-// Returns true if initial_metadata contains only default headers.
-static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
- return initial_metadata->list.default_count == initial_metadata->list.count;
-}
-
-grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
- grpc_chttp2_stream *s;
-
- GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
-
- if (t->dirtied_local_settings && !t->sent_local_settings) {
- grpc_slice_buffer_add(
- &t->outbuf,
- grpc_chttp2_settings_create(
- t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS],
- t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
- t->force_send_settings = 0;
- t->dirtied_local_settings = 0;
- t->sent_local_settings = 1;
- }
-
- /* simple writes are queued to qbuf, and flushed here */
- grpc_slice_buffer_move_into(&t->qbuf, &t->outbuf);
- GPR_ASSERT(t->qbuf.count == 0);
-
- grpc_chttp2_hpack_compressor_set_max_table_size(
- &t->hpack_compressor,
- t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
-
- if (t->flow_control.remote_window > 0) {
- while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) {
- if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s) &&
- stream_ref_if_not_destroyed(&s->refcount->refs)) {
- grpc_chttp2_initiate_write(exec_ctx, t, "transport.read_flow_control");
- }
- }
- }
-
- bool partial_write = false;
-
- /* for each grpc_chttp2_stream that's become writable, frame it's data
- (according to available window sizes) and add to the output buffer */
- while (true) {
- if (t->outbuf.length > target_write_size(t)) {
- partial_write = true;
- break;
- }
-
- if (!grpc_chttp2_list_pop_writable_stream(t, &s)) {
- break;
- }
-
- bool sent_initial_metadata = s->sent_initial_metadata;
- bool now_writing = false;
-
- GRPC_CHTTP2_IF_TRACING(
- gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t,
- t->is_client ? "CLIENT" : "SERVER", s->id,
- sent_initial_metadata, s->send_initial_metadata != NULL,
- (int)(s->flow_control.local_window_delta -
- s->flow_control.announced_window_delta)));
-
- grpc_mdelem *extra_headers_for_trailing_metadata[2];
- size_t num_extra_headers_for_trailing_metadata = 0;
-
- /* send initial metadata if it's available */
- if (!sent_initial_metadata && s->send_initial_metadata != NULL) {
- // We skip this on the server side if there is no custom initial
- // metadata, there are no messages to send, and we are also sending
- // trailing metadata. This results in a Trailers-Only response,
- // which is required for retries, as per:
- // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid
- if (t->is_client || s->fetching_send_message != NULL ||
- s->flow_controlled_buffer.length != 0 ||
- s->send_trailing_metadata == NULL ||
- !is_default_initial_metadata(s->send_initial_metadata)) {
- grpc_encode_header_options hopt = {
- .stream_id = s->id,
- .is_eof = false,
- .use_true_binary_metadata =
- t->settings
- [GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != 0,
- .max_frame_size = t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- .stats = &s->stats.outgoing};
- grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
- s->send_initial_metadata, &hopt, &t->outbuf);
- now_writing = true;
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
- if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
- t->ping_recv_state.ping_strikes = 0;
- }
- } else {
- GRPC_CHTTP2_IF_TRACING(
- gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
- // When sending Trailers-Only, we need to move the :status and
- // content-type headers to the trailers.
- if (s->send_initial_metadata->idx.named.status != NULL) {
- extra_headers_for_trailing_metadata
- [num_extra_headers_for_trailing_metadata++] =
- &s->send_initial_metadata->idx.named.status->md;
- }
- if (s->send_initial_metadata->idx.named.content_type != NULL) {
- extra_headers_for_trailing_metadata
- [num_extra_headers_for_trailing_metadata++] =
- &s->send_initial_metadata->idx.named.content_type->md;
- }
- }
- s->send_initial_metadata = NULL;
- s->sent_initial_metadata = true;
- sent_initial_metadata = true;
- }
- /* send any window updates */
- uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update(
- &t->flow_control, &s->flow_control);
- if (stream_announce > 0) {
- grpc_slice_buffer_add(
- &t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
- &s->stats.outgoing));
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
- if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
- t->ping_recv_state.ping_strikes = 0;
- }
- }
- if (sent_initial_metadata) {
- /* send any body bytes, if allowed by flow control */
- if (s->flow_controlled_buffer.length > 0 ||
- (s->stream_compression_send_enabled &&
- s->compressed_data_buffer->length > 0)) {
- uint32_t stream_remote_window = (uint32_t)GPR_MAX(
- 0,
- s->flow_control.remote_window_delta +
- (int64_t)t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
- uint32_t max_outgoing = (uint32_t)GPR_MIN(
- t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- GPR_MIN(stream_remote_window, t->flow_control.remote_window));
- if (max_outgoing > 0) {
- bool is_last_data_frame = false;
- bool is_last_frame = false;
- if (s->stream_compression_send_enabled) {
- while ((s->flow_controlled_buffer.length > 0 ||
- s->compressed_data_buffer->length > 0) &&
- max_outgoing > 0) {
- if (s->compressed_data_buffer->length > 0) {
- uint32_t send_bytes = (uint32_t)GPR_MIN(
- max_outgoing, s->compressed_data_buffer->length);
- is_last_data_frame =
- (send_bytes == s->compressed_data_buffer->length &&
- s->flow_controlled_buffer.length == 0 &&
- s->fetching_send_message == NULL);
- is_last_frame =
- is_last_data_frame && s->send_trailing_metadata != NULL &&
- grpc_metadata_batch_is_empty(s->send_trailing_metadata);
- grpc_chttp2_encode_data(s->id, s->compressed_data_buffer,
- send_bytes, is_last_frame,
- &s->stats.outgoing, &t->outbuf);
- grpc_chttp2_flowctl_sent_data(&t->flow_control,
- &s->flow_control, send_bytes);
- max_outgoing -= send_bytes;
- if (s->compressed_data_buffer->length == 0) {
- s->sending_bytes += s->uncompressed_data_size;
- }
- } else {
- if (s->stream_compression_ctx == NULL) {
- s->stream_compression_ctx =
- grpc_stream_compression_context_create(
- GRPC_STREAM_COMPRESSION_COMPRESS);
- }
- s->uncompressed_data_size = s->flow_controlled_buffer.length;
- GPR_ASSERT(grpc_stream_compress(
- s->stream_compression_ctx, &s->flow_controlled_buffer,
- s->compressed_data_buffer, NULL, MAX_SIZE_T,
- GRPC_STREAM_COMPRESSION_FLUSH_SYNC));
- }
- }
- } else {
- uint32_t send_bytes = (uint32_t)GPR_MIN(
- max_outgoing, s->flow_controlled_buffer.length);
- is_last_data_frame = s->fetching_send_message == NULL &&
- send_bytes == s->flow_controlled_buffer.length;
- is_last_frame =
- is_last_data_frame && s->send_trailing_metadata != NULL &&
- grpc_metadata_batch_is_empty(s->send_trailing_metadata);
- grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer,
- send_bytes, is_last_frame,
- &s->stats.outgoing, &t->outbuf);
- grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control,
- send_bytes);
- s->sending_bytes += send_bytes;
- }
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
- if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
- t->ping_recv_state.ping_strikes = 0;
- }
- if (is_last_frame) {
- s->send_trailing_metadata = NULL;
- s->sent_trailing_metadata = true;
- if (!t->is_client && !s->read_closed) {
- grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(
- s->id, GRPC_HTTP2_NO_ERROR,
- &s->stats.outgoing));
- }
- }
- now_writing = true;
- if (s->flow_controlled_buffer.length > 0 ||
- (s->stream_compression_send_enabled &&
- s->compressed_data_buffer->length > 0)) {
- GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
- grpc_chttp2_list_add_writable_stream(t, s);
- }
- } else if (t->flow_control.remote_window == 0) {
- grpc_chttp2_list_add_stalled_by_transport(t, s);
- now_writing = true;
- } else if (stream_remote_window == 0) {
- grpc_chttp2_list_add_stalled_by_stream(t, s);
- now_writing = true;
- }
- }
- if (s->send_trailing_metadata != NULL &&
- s->fetching_send_message == NULL &&
- s->flow_controlled_buffer.length == 0 &&
- (!s->stream_compression_send_enabled ||
- s->compressed_data_buffer->length == 0)) {
- GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
- if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) {
- grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true,
- &s->stats.outgoing, &t->outbuf);
- } else {
- grpc_encode_header_options hopt = {
- .stream_id = s->id,
- .is_eof = true,
- .use_true_binary_metadata =
- t->settings
- [GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] !=
- 0,
- .max_frame_size =
- t->settings[GRPC_PEER_SETTINGS]
- [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
- .stats = &s->stats.outgoing};
- grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor,
- extra_headers_for_trailing_metadata,
- num_extra_headers_for_trailing_metadata,
- s->send_trailing_metadata, &hopt,
- &t->outbuf);
- }
- s->send_trailing_metadata = NULL;
- s->sent_trailing_metadata = true;
- if (!t->is_client && !s->read_closed) {
- grpc_slice_buffer_add(
- &t->outbuf, grpc_chttp2_rst_stream_create(
- s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
- }
- now_writing = true;
- }
- }
-
- if (now_writing) {
- if (!grpc_chttp2_list_add_writing_stream(t, s)) {
- /* already in writing list: drop ref */
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
- }
- } else {
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
- }
- }
-
- uint32_t transport_announce =
- grpc_chttp2_flowctl_maybe_send_transport_update(&t->flow_control);
- if (transport_announce) {
- maybe_initiate_ping(exec_ctx, t,
- GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE);
- grpc_transport_one_way_stats throwaway_stats;
- grpc_slice_buffer_add(
- &t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
- &throwaway_stats));
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
- if (!t->is_client) {
- t->ping_recv_state.last_ping_recv_time =
- gpr_inf_past(GPR_CLOCK_MONOTONIC);
- t->ping_recv_state.ping_strikes = 0;
- }
- }
-
- for (size_t i = 0; i < t->ping_ack_count; i++) {
- grpc_slice_buffer_add(&t->outbuf,
- grpc_chttp2_ping_create(1, t->ping_acks[i]));
- }
- t->ping_ack_count = 0;
-
- maybe_initiate_ping(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE);
-
- GPR_TIMER_END("grpc_chttp2_begin_write", 0);
-
- return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
- : GRPC_CHTTP2_FULL_WRITE)
- : GRPC_CHTTP2_NOTHING_TO_WRITE;
-}
-
-void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
- GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
- grpc_chttp2_stream *s;
-
- while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
- if (s->sent_initial_metadata) {
- grpc_chttp2_complete_closure_step(
- exec_ctx, t, s, &s->send_initial_metadata_finished,
- GRPC_ERROR_REF(error), "send_initial_metadata_finished");
- }
- if (s->sending_bytes != 0) {
- update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
- &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
- s->sending_bytes = 0;
- }
- if (s->sent_trailing_metadata) {
- grpc_chttp2_complete_closure_step(
- exec_ctx, t, s, &s->send_trailing_metadata_finished,
- GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
- grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
- GRPC_ERROR_REF(error));
- }
- GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
- }
- grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->outbuf);
- GRPC_ERROR_UNREF(error);
- GPR_TIMER_END("grpc_chttp2_end_write", 0);
-}
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
new file mode 100644
index 0000000000..ff76a5fcdb
--- /dev/null
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -0,0 +1,639 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/ext/transport/chttp2/transport/internal.h"
+
+#include <limits.h>
+
+#include <grpc/support/log.h>
+
+#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/transport/http2_errors.h"
+
+static void add_to_write_list(grpc_chttp2_write_cb **list,
+ grpc_chttp2_write_cb *cb) {
+ cb->next = *list;
+ *list = cb;
+}
+
+static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
+ grpc_error *error) {
+ grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
+ "finish_write_cb");
+ cb->next = t->write_cb_pool;
+ t->write_cb_pool = cb;
+}
+
+static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
+ grpc_chttp2_transport *t) {
+ grpc_chttp2_ping_queue *pq = &t->ping_queue;
+ if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
+ /* no ping needed: wait */
+ return;
+ }
+ if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
+ /* ping already in-flight: wait */
+ if (GRPC_TRACER_ON(grpc_http_trace) ||
+ GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string);
+ }
+ return;
+ }
+ if (t->ping_state.pings_before_data_required == 0 &&
+ t->ping_policy.max_pings_without_data != 0) {
+ /* need to receive something of substance before sending a ping again */
+ if (GRPC_TRACER_ON(grpc_http_trace) ||
+ GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string,
+ t->ping_state.pings_before_data_required,
+ t->ping_policy.max_pings_without_data);
+ }
+ return;
+ }
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ grpc_millis next_allowed_ping =
+ t->ping_state.last_ping_sent_time +
+ t->ping_policy.min_sent_ping_interval_without_data;
+ if (t->keepalive_permit_without_calls == 0 &&
+ grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ next_allowed_ping =
+ t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
+ }
+ if (next_allowed_ping > now) {
+ /* not enough elapsed time between successive pings */
+ if (GRPC_TRACER_ON(grpc_http_trace) ||
+ GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG,
+ "%s: Ping delayed [%p]: not enough time elapsed since last ping",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string);
+ }
+ if (!t->ping_state.is_delayed_ping_timer_set) {
+ t->ping_state.is_delayed_ping_timer_set = true;
+ grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
+ next_allowed_ping, &t->retry_initiate_ping_locked);
+ }
+ return;
+ }
+ pq->inflight_id = t->ping_ctr;
+ t->ping_ctr++;
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
+ grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
+ &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
+ grpc_slice_buffer_add(&t->outbuf,
+ grpc_chttp2_ping_create(false, pq->inflight_id));
+ GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx);
+ t->ping_state.last_ping_sent_time = now;
+ if (GRPC_TRACER_ON(grpc_http_trace) ||
+ GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
+ t->is_client ? "CLIENT" : "SERVER", t->peer_string,
+ t->ping_state.pings_before_data_required,
+ t->ping_policy.max_pings_without_data);
+ }
+ t->ping_state.pings_before_data_required -=
+ (t->ping_state.pings_before_data_required != 0);
+}
+
+static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s, int64_t send_bytes,
+ grpc_chttp2_write_cb **list, int64_t *ctr,
+ grpc_error *error) {
+ bool sched_any = false;
+ grpc_chttp2_write_cb *cb = *list;
+ *list = NULL;
+ *ctr += send_bytes;
+ while (cb) {
+ grpc_chttp2_write_cb *next = cb->next;
+ if (cb->call_at_byte <= *ctr) {
+ sched_any = true;
+ finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
+ } else {
+ add_to_write_list(list, cb);
+ }
+ cb = next;
+ }
+ GRPC_ERROR_UNREF(error);
+ return sched_any;
+}
+
+static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ const char *staller) {
+ gpr_log(
+ GPR_DEBUG,
+ "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
+ ":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
+ t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
+ s->flow_controlled_bytes_flowed,
+ t->settings[GRPC_ACKED_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
+ t->flow_control->remote_window(),
+ (uint32_t)GPR_MAX(
+ 0,
+ s->flow_control->remote_window_delta() +
+ (int64_t)t->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
+ s->flow_control->remote_window_delta());
+}
+
+static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
+ gpr_atm count;
+ do {
+ count = gpr_atm_acq_load(&r->count);
+ if (count == 0) return false;
+ } while (!gpr_atm_rel_cas(&r->count, count, count + 1));
+ return true;
+}
+
+/* How many bytes would we like to put on the wire during a single syscall */
+static uint32_t target_write_size(grpc_chttp2_transport *t) {
+ return 1024 * 1024;
+}
+
+// Returns true if initial_metadata contains only default headers.
+static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
+ return initial_metadata->list.default_count == initial_metadata->list.count;
+}
+
+namespace {
+class StreamWriteContext;
+
+class WriteContext {
+ public:
+ WriteContext(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) : t_(t) {
+ GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
+ GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
+ }
+
+ // TODO(ctiller): make this the destructor
+ void FlushStats(grpc_exec_ctx *exec_ctx) {
+ GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
+ exec_ctx, initial_metadata_writes_);
+ GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes_);
+ GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
+ exec_ctx, trailing_metadata_writes_);
+ GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, flow_control_writes_);
+ }
+
+ void FlushSettings(grpc_exec_ctx *exec_ctx) {
+ if (t_->dirtied_local_settings && !t_->sent_local_settings) {
+ grpc_slice_buffer_add(
+ &t_->outbuf, grpc_chttp2_settings_create(
+ t_->settings[GRPC_SENT_SETTINGS],
+ t_->settings[GRPC_LOCAL_SETTINGS],
+ t_->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS));
+ t_->force_send_settings = false;
+ t_->dirtied_local_settings = false;
+ t_->sent_local_settings = true;
+ GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
+ }
+ }
+
+ void FlushQueuedBuffers(grpc_exec_ctx *exec_ctx) {
+ /* simple writes are queued to qbuf, and flushed here */
+ grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
+ GPR_ASSERT(t_->qbuf.count == 0);
+ }
+
+ void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
+ uint32_t transport_announce =
+ t_->flow_control->MaybeSendUpdate(t_->outbuf.count > 0);
+ if (transport_announce) {
+ grpc_transport_one_way_stats throwaway_stats;
+ grpc_slice_buffer_add(
+ &t_->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
+ &throwaway_stats));
+ ResetPingRecvClock();
+ }
+ }
+
+ void FlushPingAcks() {
+ for (size_t i = 0; i < t_->ping_ack_count; i++) {
+ grpc_slice_buffer_add(&t_->outbuf,
+ grpc_chttp2_ping_create(true, t_->ping_acks[i]));
+ }
+ t_->ping_ack_count = 0;
+ }
+
+ void EnactHpackSettings(grpc_exec_ctx *exec_ctx) {
+ grpc_chttp2_hpack_compressor_set_max_table_size(
+ &t_->hpack_compressor,
+ t_->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
+ }
+
+ void UpdateStreamsNoLongerStalled() {
+ grpc_chttp2_stream *s;
+ while (grpc_chttp2_list_pop_stalled_by_transport(t_, &s)) {
+ if (t_->closed_with_error == GRPC_ERROR_NONE &&
+ grpc_chttp2_list_add_writable_stream(t_, s)) {
+ if (!stream_ref_if_not_destroyed(&s->refcount->refs)) {
+ grpc_chttp2_list_remove_writable_stream(t_, s);
+ }
+ }
+ }
+ }
+
+ grpc_chttp2_stream *NextStream() {
+ if (t_->outbuf.length > target_write_size(t_)) {
+ result_.partial = true;
+ return nullptr;
+ }
+
+ grpc_chttp2_stream *s;
+ if (!grpc_chttp2_list_pop_writable_stream(t_, &s)) {
+ return nullptr;
+ }
+
+ return s;
+ }
+
+ void ResetPingRecvClock() {
+ if (!t_->is_client) {
+ t_->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
+ t_->ping_recv_state.ping_strikes = 0;
+ }
+ }
+
+ void IncInitialMetadataWrites() { ++initial_metadata_writes_; }
+ void IncWindowUpdateWrites() { ++flow_control_writes_; }
+ void IncMessageWrites() { ++message_writes_; }
+ void IncTrailingMetadataWrites() { ++trailing_metadata_writes_; }
+
+ void NoteScheduledResults() { result_.early_results_scheduled = true; }
+
+ grpc_chttp2_transport *transport() const { return t_; }
+
+ grpc_chttp2_begin_write_result Result() {
+ result_.writing = t_->outbuf.count > 0;
+ return result_;
+ }
+
+ private:
+ grpc_chttp2_transport *const t_;
+
+ /* stats histogram counters: we increment these throughout this function,
+ and at the end publish to the central stats histograms */
+ int flow_control_writes_ = 0;
+ int initial_metadata_writes_ = 0;
+ int trailing_metadata_writes_ = 0;
+ int message_writes_ = 0;
+ grpc_chttp2_begin_write_result result_ = {false, false, false};
+};
+
+class DataSendContext {
+ public:
+ DataSendContext(WriteContext *write_context, grpc_chttp2_transport *t,
+ grpc_chttp2_stream *s)
+ : write_context_(write_context),
+ t_(t),
+ s_(s),
+ sending_bytes_before_(s_->sending_bytes) {}
+
+ uint32_t stream_remote_window() const {
+ return (uint32_t)GPR_MAX(
+ 0, s_->flow_control->remote_window_delta() +
+ (int64_t)t_->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
+ }
+
+ uint32_t max_outgoing() const {
+ return (uint32_t)GPR_MIN(
+ t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ GPR_MIN(stream_remote_window(), t_->flow_control->remote_window()));
+ }
+
+ bool AnyOutgoing() const { return max_outgoing() != 0; }
+
+ void FlushCompressedBytes() {
+ uint32_t send_bytes =
+ (uint32_t)GPR_MIN(max_outgoing(), s_->compressed_data_buffer.length);
+ bool is_last_data_frame =
+ (send_bytes == s_->compressed_data_buffer.length &&
+ s_->flow_controlled_buffer.length == 0 &&
+ s_->fetching_send_message == NULL);
+ if (is_last_data_frame && s_->send_trailing_metadata != NULL &&
+ s_->stream_compression_ctx != NULL) {
+ if (!grpc_stream_compress(s_->stream_compression_ctx,
+ &s_->flow_controlled_buffer,
+ &s_->compressed_data_buffer, NULL, MAX_SIZE_T,
+ GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) {
+ gpr_log(GPR_ERROR, "Stream compression failed.");
+ }
+ grpc_stream_compression_context_destroy(s_->stream_compression_ctx);
+ s_->stream_compression_ctx = NULL;
+ /* After finish, bytes in s->compressed_data_buffer may be
+ * more than max_outgoing. Start another round of the current
+ * while loop so that send_bytes and is_last_data_frame are
+ * recalculated. */
+ return;
+ }
+ is_last_frame_ = is_last_data_frame && s_->send_trailing_metadata != NULL &&
+ grpc_metadata_batch_is_empty(s_->send_trailing_metadata);
+ grpc_chttp2_encode_data(s_->id, &s_->compressed_data_buffer, send_bytes,
+ is_last_frame_, &s_->stats.outgoing, &t_->outbuf);
+ s_->flow_control->SentData(send_bytes);
+ if (s_->compressed_data_buffer.length == 0) {
+ s_->sending_bytes += s_->uncompressed_data_size;
+ }
+ }
+
+ void CompressMoreBytes() {
+ if (s_->stream_compression_ctx == NULL) {
+ s_->stream_compression_ctx =
+ grpc_stream_compression_context_create(s_->stream_compression_method);
+ }
+ s_->uncompressed_data_size = s_->flow_controlled_buffer.length;
+ if (!grpc_stream_compress(s_->stream_compression_ctx,
+ &s_->flow_controlled_buffer,
+ &s_->compressed_data_buffer, NULL, MAX_SIZE_T,
+ GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) {
+ gpr_log(GPR_ERROR, "Stream compression failed.");
+ }
+ }
+
+ bool is_last_frame() const { return is_last_frame_; }
+
+ void CallCallbacks(grpc_exec_ctx *exec_ctx) {
+ if (update_list(exec_ctx, t_, s_,
+ (int64_t)(s_->sending_bytes - sending_bytes_before_),
+ &s_->on_flow_controlled_cbs,
+ &s_->flow_controlled_bytes_flowed, GRPC_ERROR_NONE)) {
+ write_context_->NoteScheduledResults();
+ }
+ }
+
+ private:
+ WriteContext *write_context_;
+ grpc_chttp2_transport *t_;
+ grpc_chttp2_stream *s_;
+ const size_t sending_bytes_before_;
+ bool is_last_frame_ = false;
+};
+
+class StreamWriteContext {
+ public:
+ StreamWriteContext(WriteContext *write_context, grpc_chttp2_stream *s)
+ : write_context_(write_context), t_(write_context->transport()), s_(s) {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
+ t_->is_client ? "CLIENT" : "SERVER", s->id,
+ s->sent_initial_metadata, s->send_initial_metadata != NULL,
+ (int)(s->flow_control->local_window_delta() -
+ s->flow_control->announced_window_delta())));
+ }
+
+ void FlushInitialMetadata(grpc_exec_ctx *exec_ctx) {
+ /* send initial metadata if it's available */
+ if (s_->sent_initial_metadata) return;
+ if (s_->send_initial_metadata == nullptr) return;
+
+ // We skip this on the server side if there is no custom initial
+ // metadata, there are no messages to send, and we are also sending
+ // trailing metadata. This results in a Trailers-Only response,
+ // which is required for retries, as per:
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid
+ if (!t_->is_client && s_->fetching_send_message == nullptr &&
+ s_->flow_controlled_buffer.length == 0 &&
+ s_->compressed_data_buffer.length == 0 &&
+ s_->send_trailing_metadata != nullptr &&
+ is_default_initial_metadata(s_->send_initial_metadata)) {
+ ConvertInitialMetadataToTrailingMetadata();
+ } else {
+ grpc_encode_header_options hopt = {
+ s_->id, // stream_id
+ false, // is_eof
+ t_->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] !=
+ 0, // use_true_binary_metadata
+ t_->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], // max_frame_size
+ &s_->stats.outgoing // stats
+ };
+ grpc_chttp2_encode_header(exec_ctx, &t_->hpack_compressor, NULL, 0,
+ s_->send_initial_metadata, &hopt, &t_->outbuf);
+ write_context_->ResetPingRecvClock();
+ write_context_->IncInitialMetadataWrites();
+ }
+
+ s_->send_initial_metadata = NULL;
+ s_->sent_initial_metadata = true;
+ write_context_->NoteScheduledResults();
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t_, s_, &s_->send_initial_metadata_finished, GRPC_ERROR_NONE,
+ "send_initial_metadata_finished");
+ }
+
+ void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
+ /* send any window updates */
+ const uint32_t stream_announce = s_->flow_control->MaybeSendUpdate();
+ if (stream_announce == 0) return;
+
+ grpc_slice_buffer_add(
+ &t_->outbuf, grpc_chttp2_window_update_create(s_->id, stream_announce,
+ &s_->stats.outgoing));
+ write_context_->ResetPingRecvClock();
+ write_context_->IncWindowUpdateWrites();
+ }
+
+ void FlushData(grpc_exec_ctx *exec_ctx) {
+ if (!s_->sent_initial_metadata) return;
+
+ if (s_->flow_controlled_buffer.length == 0 &&
+ s_->compressed_data_buffer.length == 0) {
+ return; // early out: nothing to do
+ }
+
+ DataSendContext data_send_context(write_context_, t_, s_);
+
+ if (!data_send_context.AnyOutgoing()) {
+ if (t_->flow_control->remote_window() <= 0) {
+ report_stall(t_, s_, "transport");
+ grpc_chttp2_list_add_stalled_by_transport(t_, s_);
+ } else if (data_send_context.stream_remote_window() <= 0) {
+ report_stall(t_, s_, "stream");
+ grpc_chttp2_list_add_stalled_by_stream(t_, s_);
+ }
+ return; // early out: nothing to do
+ }
+
+ while ((s_->flow_controlled_buffer.length > 0 ||
+ s_->compressed_data_buffer.length > 0) &&
+ data_send_context.max_outgoing() > 0) {
+ if (s_->compressed_data_buffer.length > 0) {
+ data_send_context.FlushCompressedBytes();
+ } else {
+ data_send_context.CompressMoreBytes();
+ }
+ }
+ write_context_->ResetPingRecvClock();
+ if (data_send_context.is_last_frame()) {
+ SentLastFrame(exec_ctx);
+ }
+ data_send_context.CallCallbacks(exec_ctx);
+ stream_became_writable_ = true;
+ if (s_->flow_controlled_buffer.length > 0 ||
+ s_->compressed_data_buffer.length > 0) {
+ GRPC_CHTTP2_STREAM_REF(s_, "chttp2_writing:fork");
+ grpc_chttp2_list_add_writable_stream(t_, s_);
+ }
+ write_context_->IncMessageWrites();
+ }
+
+ void FlushTrailingMetadata(grpc_exec_ctx *exec_ctx) {
+ if (!s_->sent_initial_metadata) return;
+
+ if (s_->send_trailing_metadata == NULL) return;
+ if (s_->fetching_send_message != NULL) return;
+ if (s_->flow_controlled_buffer.length != 0) return;
+ if (s_->compressed_data_buffer.length != 0) return;
+
+ GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata"));
+ if (grpc_metadata_batch_is_empty(s_->send_trailing_metadata)) {
+ grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, 0, true,
+ &s_->stats.outgoing, &t_->outbuf);
+ } else {
+ grpc_encode_header_options hopt = {
+ s_->id, true,
+ t_->settings[GRPC_PEER_SETTINGS]
+ [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] !=
+ 0,
+
+ t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
+ &s_->stats.outgoing};
+ grpc_chttp2_encode_header(exec_ctx, &t_->hpack_compressor,
+ extra_headers_for_trailing_metadata_,
+ num_extra_headers_for_trailing_metadata_,
+ s_->send_trailing_metadata, &hopt, &t_->outbuf);
+ }
+ write_context_->IncTrailingMetadataWrites();
+ write_context_->ResetPingRecvClock();
+ SentLastFrame(exec_ctx);
+
+ write_context_->NoteScheduledResults();
+ grpc_chttp2_complete_closure_step(
+ exec_ctx, t_, s_, &s_->send_trailing_metadata_finished, GRPC_ERROR_NONE,
+ "send_trailing_metadata_finished");
+ }
+
+ bool stream_became_writable() { return stream_became_writable_; }
+
+ private:
+ void ConvertInitialMetadataToTrailingMetadata() {
+ GRPC_CHTTP2_IF_TRACING(
+ gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
+ // When sending Trailers-Only, we need to move the :status and
+ // content-type headers to the trailers.
+ if (s_->send_initial_metadata->idx.named.status != NULL) {
+ extra_headers_for_trailing_metadata_
+ [num_extra_headers_for_trailing_metadata_++] =
+ &s_->send_initial_metadata->idx.named.status->md;
+ }
+ if (s_->send_initial_metadata->idx.named.content_type != NULL) {
+ extra_headers_for_trailing_metadata_
+ [num_extra_headers_for_trailing_metadata_++] =
+ &s_->send_initial_metadata->idx.named.content_type->md;
+ }
+ }
+
+ void SentLastFrame(grpc_exec_ctx *exec_ctx) {
+ s_->send_trailing_metadata = NULL;
+ s_->sent_trailing_metadata = true;
+
+ if (!t_->is_client && !s_->read_closed) {
+ grpc_slice_buffer_add(
+ &t_->outbuf, grpc_chttp2_rst_stream_create(
+ s_->id, GRPC_HTTP2_NO_ERROR, &s_->stats.outgoing));
+ }
+ grpc_chttp2_mark_stream_closed(exec_ctx, t_, s_, !t_->is_client, true,
+ GRPC_ERROR_NONE);
+ }
+
+ WriteContext *const write_context_;
+ grpc_chttp2_transport *const t_;
+ grpc_chttp2_stream *const s_;
+ bool stream_became_writable_ = false;
+ grpc_mdelem *extra_headers_for_trailing_metadata_[2];
+ size_t num_extra_headers_for_trailing_metadata_ = 0;
+};
+} // namespace
+
+grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+ WriteContext ctx(exec_ctx, t);
+ ctx.FlushSettings(exec_ctx);
+ ctx.FlushPingAcks();
+ ctx.FlushQueuedBuffers(exec_ctx);
+ ctx.EnactHpackSettings(exec_ctx);
+
+ if (t->flow_control->remote_window() > 0) {
+ ctx.UpdateStreamsNoLongerStalled();
+ }
+
+ /* for each grpc_chttp2_stream that's become writable, frame it's data
+ (according to available window sizes) and add to the output buffer */
+ while (grpc_chttp2_stream *s = ctx.NextStream()) {
+ StreamWriteContext stream_ctx(&ctx, s);
+ stream_ctx.FlushInitialMetadata(exec_ctx);
+ stream_ctx.FlushWindowUpdates(exec_ctx);
+ stream_ctx.FlushData(exec_ctx);
+ stream_ctx.FlushTrailingMetadata(exec_ctx);
+
+ if (stream_ctx.stream_became_writable()) {
+ if (!grpc_chttp2_list_add_writing_stream(t, s)) {
+ /* already in writing list: drop ref */
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
+ } else {
+ /* ref will be dropped at end of write */
+ }
+ } else {
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
+ }
+ }
+
+ ctx.FlushWindowUpdates(exec_ctx);
+
+ maybe_initiate_ping(exec_ctx, t);
+
+ GPR_TIMER_END("grpc_chttp2_begin_write", 0);
+
+ return ctx.Result();
+}
+
+void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+ grpc_error *error) {
+ GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
+ grpc_chttp2_stream *s;
+
+ while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
+ if (s->sending_bytes != 0) {
+ update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
+ &s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
+ GRPC_ERROR_REF(error));
+ s->sending_bytes = 0;
+ }
+ GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
+ }
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->outbuf);
+ GRPC_ERROR_UNREF(error);
+ GPR_TIMER_END("grpc_chttp2_end_write", 0);
+}
diff --git a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
index 83365192e3..b280487ca3 100644
--- a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.c
+++ b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
@@ -21,6 +21,8 @@
#include <stdio.h>
#include <string.h>
+#include <grpc/grpc_cronet.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
diff --git a/src/core/ext/transport/cronet/transport/cronet_api_dummy.c b/src/core/ext/transport/cronet/transport/cronet_api_dummy.cc
index 4f248ad919..4f248ad919 100644
--- a/src/core/ext/transport/cronet/transport/cronet_api_dummy.c
+++ b/src/core/ext/transport/cronet/transport/cronet_api_dummy.cc
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index 765c13c65e..97e4f7d72b 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -27,6 +27,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/incoming_metadata.h"
+#include "src/core/ext/transport/cronet/transport/cronet_transport.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -288,7 +289,7 @@ static void maybe_flush_read(stream_obj *s) {
CRONET_LOG(GPR_DEBUG, "%p: Flush read", s);
s->state.flush_read = true;
null_and_maybe_free_read_buffer(s);
- s->state.rs.read_buffer = gpr_malloc(GRPC_FLUSH_READ_SIZE);
+ s->state.rs.read_buffer = (char *)gpr_malloc(GRPC_FLUSH_READ_SIZE);
if (!s->state.pending_read_from_cronet) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
@@ -313,7 +314,8 @@ static void add_to_storage(struct stream_obj *s,
struct op_storage *storage = &s->storage;
/* add new op at the beginning of the linked list. The memory is freed
in remove_from_storage */
- struct op_and_state *new_op = gpr_malloc(sizeof(struct op_and_state));
+ struct op_and_state *new_op =
+ (struct op_and_state *)gpr_malloc(sizeof(struct op_and_state));
memcpy(&new_op->op, op, sizeof(grpc_transport_stream_op_batch));
memset(&new_op->state, 0, sizeof(new_op->state));
new_op->s = s;
@@ -685,12 +687,12 @@ static void create_grpc_frame(grpc_exec_ctx *exec_ctx,
size_t length = GRPC_SLICE_LENGTH(slice);
*p_write_buffer_size = length + GRPC_HEADER_SIZE_IN_BYTES;
/* This is freed in the on_write_completed callback */
- char *write_buffer = gpr_malloc(length + GRPC_HEADER_SIZE_IN_BYTES);
+ char *write_buffer = (char *)gpr_malloc(length + GRPC_HEADER_SIZE_IN_BYTES);
*pp_write_buffer = write_buffer;
uint8_t *p = (uint8_t *)write_buffer;
/* Append 5 byte header */
/* Compressed flag */
- *p++ = (flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0;
+ *p++ = (uint8_t)((flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0);
/* Message length */
*p++ = (uint8_t)(length >> 24);
*p++ = (uint8_t)(length >> 16);
@@ -1182,7 +1184,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.length_field);
if (stream_state->rs.length_field > 0) {
stream_state->rs.read_buffer =
- gpr_malloc((size_t)stream_state->rs.length_field);
+ (char *)gpr_malloc((size_t)stream_state->rs.length_field);
GPR_ASSERT(stream_state->rs.read_buffer);
stream_state->rs.remaining_bytes = stream_state->rs.length_field;
stream_state->rs.received_bytes = 0;
@@ -1429,10 +1431,6 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
- return NULL;
-}
-
static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *gt) {
return NULL;
@@ -1451,19 +1449,19 @@ static const grpc_transport_vtable grpc_cronet_vtable = {
perform_op,
destroy_stream,
destroy_transport,
- get_peer,
get_endpoint};
grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
const grpc_channel_args *args,
void *reserved) {
- grpc_cronet_transport *ct = gpr_malloc(sizeof(grpc_cronet_transport));
+ grpc_cronet_transport *ct =
+ (grpc_cronet_transport *)gpr_malloc(sizeof(grpc_cronet_transport));
if (!ct) {
goto error;
}
ct->base.vtable = &grpc_cronet_vtable;
- ct->engine = engine;
- ct->host = gpr_malloc(strlen(target) + 1);
+ ct->engine = (stream_engine *)engine;
+ ct->host = (char *)gpr_malloc(strlen(target) + 1);
if (!ct->host) {
goto error;
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.h b/src/core/ext/transport/cronet/transport/cronet_transport.h
index 3bd4249b79..43ff391f79 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.h
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.h
@@ -21,8 +21,16 @@
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
const grpc_channel_args *args,
void *reserved);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H */
diff --git a/src/core/ext/transport/inproc/inproc_plugin.c b/src/core/ext/transport/inproc/inproc_plugin.cc
index 6a796a0b19..5d8a1c74ab 100644
--- a/src/core/ext/transport/inproc/inproc_plugin.c
+++ b/src/core/ext/transport/inproc/inproc_plugin.cc
@@ -21,9 +21,11 @@
grpc_tracer_flag grpc_inproc_trace = GRPC_TRACER_INITIALIZER(false, "inproc");
-void grpc_inproc_plugin_init(void) {
+extern "C" void grpc_inproc_plugin_init(void) {
grpc_register_tracer(&grpc_inproc_trace);
grpc_inproc_transport_init();
}
-void grpc_inproc_plugin_shutdown(void) { grpc_inproc_transport_shutdown(); }
+extern "C" void grpc_inproc_plugin_shutdown(void) {
+ grpc_inproc_transport_shutdown();
+}
diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.cc
index 6f4b429ee2..1551f5e988 100644
--- a/src/core/ext/transport/inproc/inproc_transport.c
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -37,7 +37,6 @@
if (GRPC_TRACER_ON(grpc_inproc_trace)) gpr_log(__VA_ARGS__); \
} while (0)
-static const grpc_transport_vtable inproc_vtable;
static grpc_slice g_empty_slice;
static grpc_slice g_fake_path_key;
static grpc_slice g_fake_path_value;
@@ -63,96 +62,22 @@ typedef struct inproc_transport {
struct inproc_stream *stream_list;
} inproc_transport;
-typedef struct sb_list_entry {
- grpc_slice_buffer sb;
- struct sb_list_entry *next;
-} sb_list_entry;
-
-// Specialize grpc_byte_stream for our use case
-typedef struct {
- grpc_byte_stream base;
- sb_list_entry *le;
- grpc_error *shutdown_error;
-} inproc_slice_byte_stream;
-
-typedef struct {
- // TODO (vjpai): Add some inlined elements to avoid alloc in simple cases
- sb_list_entry *head;
- sb_list_entry *tail;
-} slice_buffer_list;
-
-static void slice_buffer_list_init(slice_buffer_list *l) {
- l->head = NULL;
- l->tail = NULL;
-}
-
-static void sb_list_entry_destroy(grpc_exec_ctx *exec_ctx, sb_list_entry *le) {
- grpc_slice_buffer_destroy_internal(exec_ctx, &le->sb);
- gpr_free(le);
-}
-
-static void slice_buffer_list_destroy(grpc_exec_ctx *exec_ctx,
- slice_buffer_list *l) {
- sb_list_entry *curr = l->head;
- while (curr != NULL) {
- sb_list_entry *le = curr;
- curr = curr->next;
- sb_list_entry_destroy(exec_ctx, le);
- }
- l->head = NULL;
- l->tail = NULL;
-}
-
-static bool slice_buffer_list_empty(slice_buffer_list *l) {
- return l->head == NULL;
-}
-
-static void slice_buffer_list_append_entry(slice_buffer_list *l,
- sb_list_entry *next) {
- next->next = NULL;
- if (l->tail) {
- l->tail->next = next;
- l->tail = next;
- } else {
- l->head = next;
- l->tail = next;
- }
-}
-
-static grpc_slice_buffer *slice_buffer_list_append(slice_buffer_list *l) {
- sb_list_entry *next = gpr_malloc(sizeof(*next));
- grpc_slice_buffer_init(&next->sb);
- slice_buffer_list_append_entry(l, next);
- return &next->sb;
-}
-
-static sb_list_entry *slice_buffer_list_pophead(slice_buffer_list *l) {
- sb_list_entry *ret = l->head;
- l->head = l->head->next;
- if (l->head == NULL) {
- l->tail = NULL;
- }
- return ret;
-}
-
typedef struct inproc_stream {
inproc_transport *t;
grpc_metadata_batch to_read_initial_md;
uint32_t to_read_initial_md_flags;
bool to_read_initial_md_filled;
- slice_buffer_list to_read_message;
grpc_metadata_batch to_read_trailing_md;
bool to_read_trailing_md_filled;
- bool reads_needed;
- bool read_closure_scheduled;
- grpc_closure read_closure;
+ bool ops_needed;
+ bool op_closure_scheduled;
+ grpc_closure op_closure;
// Write buffer used only during gap at init time when client-side
// stream is set up but server side stream is not yet set up
grpc_metadata_batch write_buffer_initial_md;
bool write_buffer_initial_md_filled;
uint32_t write_buffer_initial_md_flags;
- gpr_timespec write_buffer_deadline;
- slice_buffer_list write_buffer_message;
+ grpc_millis write_buffer_deadline;
grpc_metadata_batch write_buffer_trailing_md;
bool write_buffer_trailing_md_filled;
grpc_error *write_buffer_cancel_error;
@@ -165,11 +90,15 @@ typedef struct inproc_stream {
gpr_arena *arena;
+ grpc_transport_stream_op_batch *send_message_op;
+ grpc_transport_stream_op_batch *send_trailing_md_op;
grpc_transport_stream_op_batch *recv_initial_md_op;
grpc_transport_stream_op_batch *recv_message_op;
grpc_transport_stream_op_batch *recv_trailing_md_op;
- inproc_slice_byte_stream recv_message_stream;
+ grpc_slice_buffer recv_message;
+ grpc_slice_buffer_stream recv_stream;
+ bool recv_inited;
bool initial_md_sent;
bool trailing_md_sent;
@@ -181,61 +110,18 @@ typedef struct inproc_stream {
grpc_error *cancel_self_error;
grpc_error *cancel_other_error;
- gpr_timespec deadline;
+ grpc_millis deadline;
bool listed;
struct inproc_stream *stream_list_prev;
struct inproc_stream *stream_list_next;
} inproc_stream;
-static bool inproc_slice_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *bs, size_t max,
- grpc_closure *on_complete) {
- // Because inproc transport always provides the entire message atomically,
- // the byte stream always has data available when this function is called.
- // Thus, this function always returns true (unlike other transports) and
- // there is never any need to schedule a closure
- return true;
-}
-
-static grpc_error *inproc_slice_byte_stream_pull(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *bs,
- grpc_slice *slice) {
- inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
- if (stream->shutdown_error != GRPC_ERROR_NONE) {
- return GRPC_ERROR_REF(stream->shutdown_error);
- }
- *slice = grpc_slice_buffer_take_first(&stream->le->sb);
- return GRPC_ERROR_NONE;
-}
-
-static void inproc_slice_byte_stream_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *bs,
- grpc_error *error) {
- inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
- GRPC_ERROR_UNREF(stream->shutdown_error);
- stream->shutdown_error = error;
-}
-
-static void inproc_slice_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *bs) {
- inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
- sb_list_entry_destroy(exec_ctx, stream->le);
- GRPC_ERROR_UNREF(stream->shutdown_error);
-}
-
-static const grpc_byte_stream_vtable inproc_slice_byte_stream_vtable = {
- inproc_slice_byte_stream_next, inproc_slice_byte_stream_pull,
- inproc_slice_byte_stream_shutdown, inproc_slice_byte_stream_destroy};
-
-void inproc_slice_byte_stream_init(inproc_slice_byte_stream *s,
- sb_list_entry *le) {
- s->base.length = (uint32_t)le->sb.length;
- s->base.flags = 0;
- s->base.vtable = &inproc_slice_byte_stream_vtable;
- s->le = le;
- s->shutdown_error = GRPC_ERROR_NONE;
-}
+static grpc_closure do_nothing_closure;
+static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
+ grpc_error *error);
+static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error);
static void ref_transport(inproc_transport *t) {
INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
@@ -281,12 +167,14 @@ static void unref_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s,
static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
- slice_buffer_list_destroy(exec_ctx, &s->to_read_message);
- slice_buffer_list_destroy(exec_ctx, &s->write_buffer_message);
GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
GRPC_ERROR_UNREF(s->cancel_self_error);
GRPC_ERROR_UNREF(s->cancel_other_error);
+ if (s->recv_inited) {
+ grpc_slice_buffer_destroy_internal(exec_ctx, &s->recv_message);
+ }
+
unref_transport(exec_ctx, s->t);
if (s->closure_at_destroy) {
@@ -294,9 +182,6 @@ static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
}
}
-static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
static void log_metadata(const grpc_metadata_batch *md_batch, bool is_client,
bool is_initial) {
for (grpc_linked_mdelem *md = md_batch->list.head; md != NULL;
@@ -327,7 +212,8 @@ static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error = GRPC_ERROR_NONE;
for (grpc_linked_mdelem *elem = metadata->list.head;
(elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) {
- grpc_linked_mdelem *nelem = gpr_arena_alloc(s->arena, sizeof(*nelem));
+ grpc_linked_mdelem *nelem =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*nelem));
nelem->md = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@@ -359,11 +245,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->write_buffer_initial_md_filled = false;
grpc_metadata_batch_init(&s->write_buffer_trailing_md);
s->write_buffer_trailing_md_filled = false;
- slice_buffer_list_init(&s->to_read_message);
- slice_buffer_list_init(&s->write_buffer_message);
- s->reads_needed = false;
- s->read_closure_scheduled = false;
- GRPC_CLOSURE_INIT(&s->read_closure, read_state_machine, s,
+ s->ops_needed = false;
+ s->op_closure_scheduled = false;
+ GRPC_CLOSURE_INIT(&s->op_closure, op_state_machine, s,
grpc_schedule_on_exec_ctx);
s->t = t;
s->closure_at_destroy = NULL;
@@ -377,8 +261,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->cancel_self_error = GRPC_ERROR_NONE;
s->cancel_other_error = GRPC_ERROR_NONE;
s->write_buffer_cancel_error = GRPC_ERROR_NONE;
- s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- s->write_buffer_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ s->deadline = GRPC_MILLIS_INF_FUTURE;
+ s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
s->stream_list_prev = NULL;
gpr_mu_lock(&t->mu->mu);
@@ -421,15 +305,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
cs->write_buffer_initial_md_flags,
&s->to_read_initial_md, &s->to_read_initial_md_flags,
&s->to_read_initial_md_filled);
- s->deadline = gpr_time_min(s->deadline, cs->write_buffer_deadline);
+ s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline);
grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md);
cs->write_buffer_initial_md_filled = false;
}
- while (!slice_buffer_list_empty(&cs->write_buffer_message)) {
- slice_buffer_list_append_entry(
- &s->to_read_message,
- slice_buffer_list_pophead(&cs->write_buffer_message));
- }
if (cs->write_buffer_trailing_md_filled) {
fill_in_metadata(exec_ctx, s, &cs->write_buffer_trailing_md, 0,
&s->to_read_trailing_md, NULL,
@@ -488,9 +367,39 @@ static void close_other_side_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
}
}
+// Call the on_complete closure associated with this stream_op_batch if
+// this stream_op_batch is only one of the pending operations for this
+// stream. This is called when one of the pending operations for the stream
+// is done and about to be NULLed out
+static void complete_if_batch_end_locked(grpc_exec_ctx *exec_ctx,
+ inproc_stream *s, grpc_error *error,
+ grpc_transport_stream_op_batch *op,
+ const char *msg) {
+ int is_sm = (int)(op == s->send_message_op);
+ int is_stm = (int)(op == s->send_trailing_md_op);
+ int is_rim = (int)(op == s->recv_initial_md_op);
+ int is_rm = (int)(op == s->recv_message_op);
+ int is_rtm = (int)(op == s->recv_trailing_md_op);
+
+ if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
+ INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error);
+ GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_REF(error));
+ }
+}
+
+static void maybe_schedule_op_closure_locked(grpc_exec_ctx *exec_ctx,
+ inproc_stream *s,
+ grpc_error *error) {
+ if (s && s->ops_needed && !s->op_closure_scheduled) {
+ GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_REF(error));
+ s->op_closure_scheduled = true;
+ s->ops_needed = false;
+ }
+}
+
static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error) {
- INPROC_LOG(GPR_DEBUG, "read_state_machine %p fail_helper", s);
+ INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata
if (!s->trailing_md_sent) {
@@ -512,14 +421,7 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (other->cancel_other_error == GRPC_ERROR_NONE) {
other->cancel_other_error = GRPC_ERROR_REF(error);
}
- if (other->reads_needed) {
- if (!other->read_closure_scheduled) {
- GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
- GRPC_ERROR_REF(error));
- other->read_closure_scheduled = true;
- }
- other->reads_needed = false;
- }
+ maybe_schedule_op_closure_locked(exec_ctx, other, error);
} else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
s->write_buffer_cancel_error = GRPC_ERROR_REF(error);
}
@@ -531,12 +433,14 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
- grpc_linked_mdelem *path_md = gpr_arena_alloc(s->arena, sizeof(*path_md));
+ grpc_linked_mdelem *path_md =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*path_md));
path_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
GRPC_ERROR_NONE);
- grpc_linked_mdelem *auth_md = gpr_arena_alloc(s->arena, sizeof(*auth_md));
+ grpc_linked_mdelem *auth_md =
+ (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*auth_md));
auth_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
@@ -562,14 +466,9 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
err);
// Last use of err so no need to REF and then UNREF it
- if ((s->recv_initial_md_op != s->recv_message_op) &&
- (s->recv_initial_md_op != s->recv_trailing_md_op)) {
- INPROC_LOG(GPR_DEBUG,
- "fail_helper %p scheduling initial-metadata-on-complete %p",
- error, s);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
- GRPC_ERROR_REF(error));
- }
+ complete_if_batch_end_locked(
+ exec_ctx, s, error, s->recv_initial_md_op,
+ "fail_helper scheduling recv-initial-metadata-on-complete");
s->recv_initial_md_op = NULL;
}
if (s->recv_message_op) {
@@ -578,20 +477,30 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
GRPC_CLOSURE_SCHED(
exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
- if (s->recv_message_op != s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-on-complete %p",
- s, error);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
- GRPC_ERROR_REF(error));
- }
+ complete_if_batch_end_locked(
+ exec_ctx, s, error, s->recv_message_op,
+ "fail_helper scheduling recv-message-on-complete");
s->recv_message_op = NULL;
}
+ if (s->send_message_op) {
+ complete_if_batch_end_locked(
+ exec_ctx, s, error, s->send_message_op,
+ "fail_helper scheduling send-message-on-complete");
+ s->send_message_op = NULL;
+ }
+ if (s->send_trailing_md_op) {
+ complete_if_batch_end_locked(
+ exec_ctx, s, error, s->send_trailing_md_op,
+ "fail_helper scheduling send-trailng-md-on-complete");
+ s->send_trailing_md_op = NULL;
+ }
if (s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG,
"fail_helper %p scheduling trailing-md-on-complete %p", s,
error);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
- GRPC_ERROR_REF(error));
+ complete_if_batch_end_locked(
+ exec_ctx, s, error, s->recv_trailing_md_op,
+ "fail_helper scheduling recv-trailing-metadata-on-complete");
s->recv_trailing_md_op = NULL;
}
close_other_side_locked(exec_ctx, s, "fail_helper:other_side");
@@ -600,12 +509,61 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
GRPC_ERROR_UNREF(error);
}
-static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void message_transfer_locked(grpc_exec_ctx *exec_ctx,
+ inproc_stream *sender,
+ inproc_stream *receiver) {
+ size_t remaining =
+ sender->send_message_op->payload->send_message.send_message->length;
+ if (receiver->recv_inited) {
+ grpc_slice_buffer_destroy_internal(exec_ctx, &receiver->recv_message);
+ }
+ grpc_slice_buffer_init(&receiver->recv_message);
+ receiver->recv_inited = true;
+ do {
+ grpc_slice message_slice;
+ grpc_closure unused;
+ GPR_ASSERT(grpc_byte_stream_next(
+ exec_ctx, sender->send_message_op->payload->send_message.send_message,
+ SIZE_MAX, &unused));
+ grpc_error *error = grpc_byte_stream_pull(
+ exec_ctx, sender->send_message_op->payload->send_message.send_message,
+ &message_slice);
+ if (error != GRPC_ERROR_NONE) {
+ cancel_stream_locked(exec_ctx, sender, GRPC_ERROR_REF(error));
+ break;
+ }
+ GPR_ASSERT(error == GRPC_ERROR_NONE);
+ remaining -= GRPC_SLICE_LENGTH(message_slice);
+ grpc_slice_buffer_add(&receiver->recv_message, message_slice);
+ } while (remaining > 0);
+
+ grpc_slice_buffer_stream_init(&receiver->recv_stream, &receiver->recv_message,
+ 0);
+ *receiver->recv_message_op->payload->recv_message.recv_message =
+ &receiver->recv_stream.base;
+ INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready",
+ receiver);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ receiver->recv_message_op->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_NONE);
+ complete_if_batch_end_locked(
+ exec_ctx, sender, GRPC_ERROR_NONE, sender->send_message_op,
+ "message_transfer scheduling sender on_complete");
+ complete_if_batch_end_locked(
+ exec_ctx, receiver, GRPC_ERROR_NONE, receiver->recv_message_op,
+ "message_transfer scheduling receiver on_complete");
+
+ receiver->recv_message_op = NULL;
+ sender->send_message_op = NULL;
+}
+
+static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
// This function gets called when we have contents in the unprocessed reads
// Get what we want based on our ops wanted
// Schedule our appropriate closures
- // and then return to reads_needed state if still needed
+ // and then return to ops_needed state if still needed
// Since this is a closure directly invoked by the combiner, it should not
// unref the error parameter explicitly; the combiner will do that implicitly
@@ -613,12 +571,14 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
bool needs_close = false;
- INPROC_LOG(GPR_DEBUG, "read_state_machine %p", arg);
+ INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg);
inproc_stream *s = (inproc_stream *)arg;
gpr_mu *mu = &s->t->mu->mu; // keep aside in case s gets closed
gpr_mu_lock(mu);
- s->read_closure_scheduled = false;
+ s->op_closure_scheduled = false;
// cancellation takes precedence
+ inproc_stream *other = s->other_side;
+
if (s->cancel_self_error != GRPC_ERROR_NONE) {
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_self_error));
goto done;
@@ -630,89 +590,116 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
goto done;
}
- if (s->recv_initial_md_op) {
- if (!s->to_read_initial_md_filled) {
- // We entered the state machine on some other kind of read even though
- // we still haven't satisfied initial md . That's an error.
- new_err =
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected frame sequencing");
- INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p scheduling on_complete errors for no "
- "initial md %p",
- s, new_err);
+ if (s->send_message_op && other) {
+ if (other->recv_message_op) {
+ message_transfer_locked(exec_ctx, s, other);
+ maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
+ } else if (!s->t->is_client &&
+ (s->trailing_md_sent || other->recv_trailing_md_op)) {
+ // A server send will never be matched if the client is waiting
+ // for trailing metadata already
+ complete_if_batch_end_locked(
+ exec_ctx, s, GRPC_ERROR_NONE, s->send_message_op,
+ "op_state_machine scheduling send-message-on-complete");
+ s->send_message_op = NULL;
+ }
+ }
+ // Pause a send trailing metadata if there is still an outstanding
+ // send message unless we know that the send message will never get
+ // matched to a receive. This happens on the client if the server has
+ // already sent status.
+ if (s->send_trailing_md_op &&
+ (!s->send_message_op ||
+ (s->t->is_client &&
+ (s->trailing_md_recvd || s->to_read_trailing_md_filled)))) {
+ grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
+ : &other->to_read_trailing_md;
+ bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
+ : &other->to_read_trailing_md_filled;
+ if (*destfilled || s->trailing_md_sent) {
+ // The buffer is already in use; that's an error!
+ INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
+ new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
- } else if (s->initial_md_recvd) {
+ } else {
+ if (!other || !other->closed) {
+ fill_in_metadata(exec_ctx, s,
+ s->send_trailing_md_op->payload->send_trailing_metadata
+ .send_trailing_metadata,
+ 0, dest, NULL, destfilled);
+ }
+ s->trailing_md_sent = true;
+ if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
+ INPROC_LOG(GPR_DEBUG,
+ "op_state_machine %p scheduling trailing-md-on-complete", s);
+ GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
+ GRPC_ERROR_NONE);
+ s->recv_trailing_md_op = NULL;
+ needs_close = true;
+ }
+ }
+ maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
+ complete_if_batch_end_locked(
+ exec_ctx, s, GRPC_ERROR_NONE, s->send_trailing_md_op,
+ "op_state_machine scheduling send-trailing-metadata-on-complete");
+ s->send_trailing_md_op = NULL;
+ }
+ if (s->recv_initial_md_op) {
+ if (s->initial_md_recvd) {
new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
INPROC_LOG(
GPR_DEBUG,
- "read_state_machine %p scheduling on_complete errors for already "
+ "op_state_machine %p scheduling on_complete errors for already "
"recvd initial md %p",
s, new_err);
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
goto done;
}
- s->initial_md_recvd = true;
- new_err = fill_in_metadata(
- exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags,
- s->recv_initial_md_op->payload->recv_initial_metadata
- .recv_initial_metadata,
- s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, NULL);
- s->recv_initial_md_op->payload->recv_initial_metadata.recv_initial_metadata
- ->deadline = s->deadline;
- grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md);
- s->to_read_initial_md_filled = false;
- INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p scheduling initial-metadata-ready %p", s,
- new_err);
- GRPC_CLOSURE_SCHED(exec_ctx,
- s->recv_initial_md_op->payload->recv_initial_metadata
- .recv_initial_metadata_ready,
- GRPC_ERROR_REF(new_err));
- if ((s->recv_initial_md_op != s->recv_message_op) &&
- (s->recv_initial_md_op != s->recv_trailing_md_op)) {
- INPROC_LOG(
- GPR_DEBUG,
- "read_state_machine %p scheduling initial-metadata-on-complete %p", s,
- new_err);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
- GRPC_ERROR_REF(new_err));
- }
- s->recv_initial_md_op = NULL;
-
- if (new_err != GRPC_ERROR_NONE) {
+ if (s->to_read_initial_md_filled) {
+ s->initial_md_recvd = true;
+ new_err = fill_in_metadata(
+ exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags,
+ s->recv_initial_md_op->payload->recv_initial_metadata
+ .recv_initial_metadata,
+ s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
+ NULL);
+ s->recv_initial_md_op->payload->recv_initial_metadata
+ .recv_initial_metadata->deadline = s->deadline;
+ grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md);
+ s->to_read_initial_md_filled = false;
INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p scheduling on_complete errors2 %p", s,
+ "op_state_machine %p scheduling initial-metadata-ready %p", s,
new_err);
- fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
- goto done;
+ GRPC_CLOSURE_SCHED(exec_ctx,
+ s->recv_initial_md_op->payload->recv_initial_metadata
+ .recv_initial_metadata_ready,
+ GRPC_ERROR_REF(new_err));
+ complete_if_batch_end_locked(
+ exec_ctx, s, new_err, s->recv_initial_md_op,
+ "op_state_machine scheduling recv-initial-metadata-on-complete");
+ s->recv_initial_md_op = NULL;
+
+ if (new_err != GRPC_ERROR_NONE) {
+ INPROC_LOG(GPR_DEBUG,
+ "op_state_machine %p scheduling on_complete errors2 %p", s,
+ new_err);
+ fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
+ goto done;
+ }
}
}
- if (s->to_read_initial_md_filled) {
- new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected recv frame");
- fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
- goto done;
- }
- if (!slice_buffer_list_empty(&s->to_read_message) && s->recv_message_op) {
- inproc_slice_byte_stream_init(
- &s->recv_message_stream,
- slice_buffer_list_pophead(&s->to_read_message));
- *s->recv_message_op->payload->recv_message.recv_message =
- &s->recv_message_stream.base;
- INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s);
- GRPC_CLOSURE_SCHED(
- exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
- GRPC_ERROR_NONE);
- if (s->recv_message_op != s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p scheduling message-on-complete %p", s,
- new_err);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
- GRPC_ERROR_REF(new_err));
+ if (s->recv_message_op) {
+ if (other && other->send_message_op) {
+ message_transfer_locked(exec_ctx, other, s);
+ maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
}
- s->recv_message_op = NULL;
+ }
+ if (s->recv_trailing_md_op && s->t->is_client && other &&
+ other->send_message_op) {
+ maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
}
if (s->to_read_trailing_md_filled) {
if (s->trailing_md_recvd) {
@@ -720,7 +707,7 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
INPROC_LOG(
GPR_DEBUG,
- "read_state_machine %p scheduling on_complete errors for already "
+ "op_state_machine %p scheduling on_complete errors for already "
"recvd trailing md %p",
s, new_err);
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
@@ -729,21 +716,24 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
if (s->recv_message_op != NULL) {
// This message needs to be wrapped up because it will never be
// satisfied
- INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready",
- s);
+ INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
exec_ctx,
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
- if (s->recv_message_op != s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p scheduling message-on-complete %p", s,
- new_err);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
- GRPC_ERROR_REF(new_err));
- }
+ complete_if_batch_end_locked(
+ exec_ctx, s, new_err, s->recv_message_op,
+ "op_state_machine scheduling recv-message-on-complete");
s->recv_message_op = NULL;
}
+ if ((s->trailing_md_sent || s->t->is_client) && s->send_message_op) {
+ // Nothing further will try to receive from this stream, so finish off
+ // any outstanding send_message op
+ complete_if_batch_end_locked(
+ exec_ctx, s, new_err, s->send_message_op,
+ "op_state_machine scheduling send-message-on-complete");
+ s->send_message_op = NULL;
+ }
if (s->recv_trailing_md_op != NULL) {
// We wanted trailing metadata and we got it
s->trailing_md_recvd = true;
@@ -761,61 +751,65 @@ static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
// (If the server hasn't already sent its trailing md, it doesn't have
// a final status, so don't mark this op complete)
if (s->t->is_client || s->trailing_md_sent) {
- INPROC_LOG(
- GPR_DEBUG,
- "read_state_machine %p scheduling trailing-md-on-complete %p", s,
- new_err);
+ INPROC_LOG(GPR_DEBUG,
+ "op_state_machine %p scheduling trailing-md-on-complete %p",
+ s, new_err);
GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
GRPC_ERROR_REF(new_err));
s->recv_trailing_md_op = NULL;
needs_close = true;
} else {
INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p server needs to delay handling "
+ "op_state_machine %p server needs to delay handling "
"trailing-md-on-complete %p",
s, new_err);
}
} else {
INPROC_LOG(
GPR_DEBUG,
- "read_state_machine %p has trailing md but not yet waiting for it",
- s);
+ "op_state_machine %p has trailing md but not yet waiting for it", s);
}
}
if (s->trailing_md_recvd && s->recv_message_op) {
// No further message will come on this stream, so finish off the
// recv_message_op
- INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s);
+ INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED(
exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
- if (s->recv_message_op != s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
- "read_state_machine %p scheduling message-on-complete %p", s,
- new_err);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
- GRPC_ERROR_REF(new_err));
- }
+ complete_if_batch_end_locked(
+ exec_ctx, s, new_err, s->recv_message_op,
+ "op_state_machine scheduling recv-message-on-complete");
s->recv_message_op = NULL;
}
- if (s->recv_message_op || s->recv_trailing_md_op) {
+ if (s->trailing_md_recvd && (s->trailing_md_sent || s->t->is_client) &&
+ s->send_message_op) {
+ // Nothing further will try to receive from this stream, so finish off
+ // any outstanding send_message op
+ complete_if_batch_end_locked(
+ exec_ctx, s, new_err, s->send_message_op,
+ "op_state_machine scheduling send-message-on-complete");
+ s->send_message_op = NULL;
+ }
+ if (s->send_message_op || s->send_trailing_md_op || s->recv_initial_md_op ||
+ s->recv_message_op || s->recv_trailing_md_op) {
// Didn't get the item we wanted so we still need to get
// rescheduled
- INPROC_LOG(GPR_DEBUG, "read_state_machine %p still needs closure %p %p", s,
- s->recv_message_op, s->recv_trailing_md_op);
- s->reads_needed = true;
+ INPROC_LOG(
+ GPR_DEBUG, "op_state_machine %p still needs closure %p %p %p %p %p", s,
+ s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
+ s->recv_message_op, s->recv_trailing_md_op);
+ s->ops_needed = true;
}
done:
if (needs_close) {
- close_other_side_locked(exec_ctx, s, "read_state_machine");
+ close_other_side_locked(exec_ctx, s, "op_state_machine");
close_stream_locked(exec_ctx, s);
}
gpr_mu_unlock(mu);
GRPC_ERROR_UNREF(new_err);
}
-static grpc_closure do_nothing_closure;
-
static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error) {
bool ret = false; // was the cancel accepted
@@ -824,14 +818,7 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (s->cancel_self_error == GRPC_ERROR_NONE) {
ret = true;
s->cancel_self_error = GRPC_ERROR_REF(error);
- if (s->reads_needed) {
- if (!s->read_closure_scheduled) {
- GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure,
- GRPC_ERROR_REF(s->cancel_self_error));
- s->read_closure_scheduled = true;
- }
- s->reads_needed = false;
- }
+ maybe_schedule_op_closure_locked(exec_ctx, s, s->cancel_self_error);
// Send trailing md to the other side indicating cancellation, even if we
// already have
s->trailing_md_sent = true;
@@ -851,14 +838,8 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (other->cancel_other_error == GRPC_ERROR_NONE) {
other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error);
}
- if (other->reads_needed) {
- if (!other->read_closure_scheduled) {
- GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
- GRPC_ERROR_REF(other->cancel_other_error));
- other->read_closure_scheduled = true;
- }
- other->reads_needed = false;
- }
+ maybe_schedule_op_closure_locked(exec_ctx, other,
+ other->cancel_other_error);
} else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error);
}
@@ -867,11 +848,9 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// couldn't complete that because we hadn't yet sent out trailing
// md, now's the chance
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
- "cancel_stream %p scheduling trailing-md-on-complete %p", s,
- s->cancel_self_error);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
- GRPC_ERROR_REF(s->cancel_self_error));
+ complete_if_batch_end_locked(
+ exec_ctx, s, s->cancel_self_error, s->recv_trailing_md_op,
+ "cancel_stream scheduling trailing-md-on-complete");
s->recv_trailing_md_op = NULL;
}
}
@@ -916,7 +895,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// already self-canceled so still give it an error
error = GRPC_ERROR_REF(s->cancel_self_error);
} else {
- INPROC_LOG(GPR_DEBUG, "perform_stream_op %p%s%s%s%s%s%s", s,
+ INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %s%s%s%s%s%s%s", s,
+ s->t->is_client ? "client" : "server",
op->send_initial_metadata ? " send_initial_metadata" : "",
op->send_message ? " send_message" : "",
op->send_trailing_metadata ? " send_trailing_metadata" : "",
@@ -927,10 +907,9 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
bool needs_close = false;
+ inproc_stream *other = s->other_side;
if (error == GRPC_ERROR_NONE &&
- (op->send_initial_metadata || op->send_message ||
- op->send_trailing_metadata)) {
- inproc_stream *other = s->other_side;
+ (op->send_initial_metadata || op->send_trailing_metadata)) {
if (s->t->is_closed) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
}
@@ -946,7 +925,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s);
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata");
} else {
- if (!other->closed) {
+ if (!other || !other->closed) {
fill_in_metadata(
exec_ctx, s,
op->payload->send_initial_metadata.send_initial_metadata,
@@ -954,79 +933,28 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
dest, destflags, destfilled);
}
if (s->t->is_client) {
- gpr_timespec *dl =
+ grpc_millis *dl =
(other == NULL) ? &s->write_buffer_deadline : &other->deadline;
- *dl = gpr_time_min(*dl, op->payload->send_initial_metadata
- .send_initial_metadata->deadline);
+ *dl = GPR_MIN(*dl, op->payload->send_initial_metadata
+ .send_initial_metadata->deadline);
s->initial_md_sent = true;
}
}
- }
- if (error == GRPC_ERROR_NONE && op->send_message) {
- size_t remaining = op->payload->send_message.send_message->length;
- grpc_slice_buffer *dest = slice_buffer_list_append(
- (other == NULL) ? &s->write_buffer_message : &other->to_read_message);
- do {
- grpc_slice message_slice;
- grpc_closure unused;
- GPR_ASSERT(grpc_byte_stream_next(exec_ctx,
- op->payload->send_message.send_message,
- SIZE_MAX, &unused));
- error = grpc_byte_stream_pull(
- exec_ctx, op->payload->send_message.send_message, &message_slice);
- if (error != GRPC_ERROR_NONE) {
- cancel_stream_locked(exec_ctx, s, GRPC_ERROR_REF(error));
- break;
- }
- GPR_ASSERT(error == GRPC_ERROR_NONE);
- remaining -= GRPC_SLICE_LENGTH(message_slice);
- grpc_slice_buffer_add(dest, message_slice);
- } while (remaining != 0);
- grpc_byte_stream_destroy(exec_ctx,
- op->payload->send_message.send_message);
- }
- if (error == GRPC_ERROR_NONE && op->send_trailing_metadata) {
- grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
- : &other->to_read_trailing_md;
- bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
- : &other->to_read_trailing_md_filled;
- if (*destfilled || s->trailing_md_sent) {
- // The buffer is already in use; that's an error!
- INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
- } else {
- if (!other->closed) {
- fill_in_metadata(
- exec_ctx, s,
- op->payload->send_trailing_metadata.send_trailing_metadata, 0,
- dest, NULL, destfilled);
- }
- s->trailing_md_sent = true;
- if (!s->t->is_client && s->trailing_md_recvd &&
- s->recv_trailing_md_op) {
- INPROC_LOG(GPR_DEBUG,
- "perform_stream_op %p scheduling trailing-md-on-complete",
- s);
- GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
- GRPC_ERROR_NONE);
- s->recv_trailing_md_op = NULL;
- needs_close = true;
- }
- }
- }
- if (other != NULL && other->reads_needed) {
- if (!other->read_closure_scheduled) {
- GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure, error);
- other->read_closure_scheduled = true;
- }
- other->reads_needed = false;
+ maybe_schedule_op_closure_locked(exec_ctx, other, error);
}
}
+
if (error == GRPC_ERROR_NONE &&
- (op->recv_initial_metadata || op->recv_message ||
+ (op->send_message || op->send_trailing_metadata ||
+ op->recv_initial_metadata || op->recv_message ||
op->recv_trailing_metadata)) {
- // If there are any reads, mark it so that the read closure will react to
- // them
+ // Mark ops that need to be processed by the closure
+ if (op->send_message) {
+ s->send_message_op = op;
+ }
+ if (op->send_trailing_metadata) {
+ s->send_trailing_md_op = op;
+ }
if (op->recv_initial_metadata) {
s->recv_initial_md_op = op;
}
@@ -1038,25 +966,28 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
// We want to initiate the closure if:
- // 1. There is initial metadata and something ready to take that
- // 2. There is a message and something ready to take it
- // 3. There is trailing metadata, even if nothing specifically wants
- // that because that can shut down the message as well
- if ((s->to_read_initial_md_filled && op->recv_initial_metadata) ||
- ((!slice_buffer_list_empty(&s->to_read_message) ||
- s->trailing_md_recvd) &&
- op->recv_message) ||
- (s->to_read_trailing_md_filled)) {
- if (!s->read_closure_scheduled) {
- GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure, GRPC_ERROR_NONE);
- s->read_closure_scheduled = true;
+ // 1. We want to send a message and the other side wants to receive or end
+ // 2. We want to send trailing metadata and there isn't an unmatched send
+ // 3. We want initial metadata and the other side has sent it
+ // 4. We want to receive a message and there is a message ready
+ // 5. There is trailing metadata, even if nothing specifically wants
+ // that because that can shut down the receive message as well
+ if ((op->send_message && other && ((other->recv_message_op != NULL) ||
+ (other->recv_trailing_md_op != NULL))) ||
+ (op->send_trailing_metadata && !op->send_message) ||
+ (op->recv_initial_metadata && s->to_read_initial_md_filled) ||
+ (op->recv_message && other && (other->send_message_op != NULL)) ||
+ (s->to_read_trailing_md_filled || s->trailing_md_recvd)) {
+ if (!s->op_closure_scheduled) {
+ GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_NONE);
+ s->op_closure_scheduled = true;
}
} else {
- s->reads_needed = true;
+ s->ops_needed = true;
}
} else {
if (error != GRPC_ERROR_NONE) {
- // Schedule op's read closures that we didn't push to read state machine
+ // Schedule op's closures that we didn't push to op state machine
if (op->recv_initial_metadata) {
INPROC_LOG(
GPR_DEBUG,
@@ -1164,6 +1095,55 @@ static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
}
/*******************************************************************************
+ * INTEGRATION GLUE
+ */
+
+static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset *pollset) {
+ // Nothing to do here
+}
+
+static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
+ grpc_stream *gs, grpc_pollset_set *pollset_set) {
+ // Nothing to do here
+}
+
+static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
+ return NULL;
+}
+
+/*******************************************************************************
+ * GLOBAL INIT AND DESTROY
+ */
+static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+
+void grpc_inproc_transport_init(void) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
+ grpc_schedule_on_exec_ctx);
+ g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);
+
+ grpc_slice key_tmp = grpc_slice_from_static_string(":path");
+ g_fake_path_key = grpc_slice_intern(key_tmp);
+ grpc_slice_unref_internal(&exec_ctx, key_tmp);
+
+ g_fake_path_value = grpc_slice_from_static_string("/");
+
+ grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
+ g_fake_auth_key = grpc_slice_intern(auth_tmp);
+ grpc_slice_unref_internal(&exec_ctx, auth_tmp);
+
+ g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static const grpc_transport_vtable inproc_vtable = {
+ sizeof(inproc_stream), "inproc", init_stream,
+ set_pollset, set_pollset_set, perform_stream_op,
+ perform_transport_op, destroy_stream, destroy_transport,
+ get_endpoint};
+
+/*******************************************************************************
* Main inproc transport functions
*/
static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
@@ -1172,10 +1152,10 @@ static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
grpc_transport **client_transport,
const grpc_channel_args *client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
- inproc_transport *st = gpr_zalloc(sizeof(*st));
- inproc_transport *ct = gpr_zalloc(sizeof(*ct));
+ inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st));
+ inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct));
// Share one lock between both sides since both sides get affected
- st->mu = ct->mu = gpr_malloc(sizeof(*st->mu));
+ st->mu = ct->mu = (shared_mu *)gpr_malloc(sizeof(*st->mu));
gpr_mu_init(&st->mu->mu);
gpr_ref_init(&st->mu->refs, 2);
st->base.vtable = &inproc_vtable;
@@ -1212,8 +1192,8 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING;
- default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
- default_authority_arg.value.string = "inproc.authority";
+ default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY;
+ default_authority_arg.value.string = (char *)"inproc.authority";
grpc_channel_args *client_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
@@ -1237,61 +1217,6 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server,
return channel;
}
-/*******************************************************************************
- * INTEGRATION GLUE
- */
-
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset *pollset) {
- // Nothing to do here
-}
-
-static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset_set *pollset_set) {
- // Nothing to do here
-}
-
-static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return gpr_strdup("inproc");
-}
-
-static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
- return NULL;
-}
-
-static const grpc_transport_vtable inproc_vtable = {
- sizeof(inproc_stream), "inproc",
- init_stream, set_pollset,
- set_pollset_set, perform_stream_op,
- perform_transport_op, destroy_stream,
- destroy_transport, get_peer,
- get_endpoint};
-
-/*******************************************************************************
- * GLOBAL INIT AND DESTROY
- */
-static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
-
-void grpc_inproc_transport_init(void) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
- grpc_schedule_on_exec_ctx);
- g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);
-
- grpc_slice key_tmp = grpc_slice_from_static_string(":path");
- g_fake_path_key = grpc_slice_intern(key_tmp);
- grpc_slice_unref_internal(&exec_ctx, key_tmp);
-
- g_fake_path_value = grpc_slice_from_static_string("/");
-
- grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
- g_fake_auth_key = grpc_slice_intern(auth_tmp);
- grpc_slice_unref_internal(&exec_ctx, auth_tmp);
-
- g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
void grpc_inproc_transport_shutdown(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_slice_unref_internal(&exec_ctx, g_empty_slice);
diff --git a/src/core/lib/support/backoff.c b/src/core/lib/backoff/backoff.cc
index 6dc0df473b..fe0a751817 100644
--- a/src/core/lib/support/backoff.c
+++ b/src/core/lib/backoff/backoff.cc
@@ -16,13 +16,14 @@
*
*/
-#include "src/core/lib/support/backoff.h"
+#include "src/core/lib/backoff/backoff.h"
#include <grpc/support/useful.h>
-void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
- double multiplier, double jitter,
- int64_t min_timeout_millis, int64_t max_timeout_millis) {
+void grpc_backoff_init(grpc_backoff *backoff,
+ grpc_millis initial_connect_timeout, double multiplier,
+ double jitter, grpc_millis min_timeout_millis,
+ grpc_millis max_timeout_millis) {
backoff->initial_connect_timeout = initial_connect_timeout;
backoff->multiplier = multiplier;
backoff->jitter = jitter;
@@ -31,11 +32,11 @@ void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
-gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) {
+grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
- const int64_t first_timeout =
+ const grpc_millis first_timeout =
GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis);
- return gpr_time_add(now, gpr_time_from_millis(first_timeout, GPR_TIMESPAN));
+ return grpc_exec_ctx_now(exec_ctx) + first_timeout;
}
/* Generate a random number between 0 and 1. */
@@ -44,11 +45,11 @@ static double generate_uniform_random_number(uint32_t *rng_state) {
return *rng_state / (double)((uint32_t)1 << 31);
}
-gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
+grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
const double new_timeout_millis =
backoff->multiplier * (double)backoff->current_timeout_millis;
backoff->current_timeout_millis =
- GPR_MIN((int64_t)new_timeout_millis, backoff->max_timeout_millis);
+ GPR_MIN((grpc_millis)new_timeout_millis, backoff->max_timeout_millis);
const double jitter_range_width = backoff->jitter * new_timeout_millis;
const double jitter =
@@ -56,17 +57,17 @@ gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) {
jitter_range_width;
backoff->current_timeout_millis =
- (int64_t)((double)(backoff->current_timeout_millis) + jitter);
+ (grpc_millis)((double)(backoff->current_timeout_millis) + jitter);
- const gpr_timespec current_deadline = gpr_time_add(
- now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN));
+ const grpc_millis current_deadline =
+ grpc_exec_ctx_now(exec_ctx) + backoff->current_timeout_millis;
- const gpr_timespec min_deadline = gpr_time_add(
- now, gpr_time_from_millis(backoff->min_timeout_millis, GPR_TIMESPAN));
+ const grpc_millis min_deadline =
+ grpc_exec_ctx_now(exec_ctx) + backoff->min_timeout_millis;
- return gpr_time_max(current_deadline, min_deadline);
+ return GPR_MAX(current_deadline, min_deadline);
}
-void gpr_backoff_reset(gpr_backoff *backoff) {
+void grpc_backoff_reset(grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
}
diff --git a/src/core/lib/support/backoff.h b/src/core/lib/backoff/backoff.h
index 6e0cc3a4b6..80e49ea52a 100644
--- a/src/core/lib/support/backoff.h
+++ b/src/core/lib/backoff/backoff.h
@@ -16,41 +16,51 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_BACKOFF_H
-#define GRPC_CORE_LIB_SUPPORT_BACKOFF_H
+#ifndef GRPC_CORE_LIB_BACKOFF_BACKOFF_H
+#define GRPC_CORE_LIB_BACKOFF_BACKOFF_H
-#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
typedef struct {
/// const: how long to wait after the first failure before retrying
- int64_t initial_connect_timeout;
+ grpc_millis initial_connect_timeout;
/// const: factor with which to multiply backoff after a failed retry
double multiplier;
/// const: amount to randomize backoffs
double jitter;
/// const: minimum time between retries in milliseconds
- int64_t min_timeout_millis;
+ grpc_millis min_timeout_millis;
/// const: maximum time between retries in milliseconds
- int64_t max_timeout_millis;
+ grpc_millis max_timeout_millis;
/// random number generator
uint32_t rng_state;
/// current retry timeout in milliseconds
- int64_t current_timeout_millis;
-} gpr_backoff;
+ grpc_millis current_timeout_millis;
+} grpc_backoff;
/// Initialize backoff machinery - does not need to be destroyed
-void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout,
- double multiplier, double jitter,
- int64_t min_timeout_millis, int64_t max_timeout_millis);
+void grpc_backoff_init(grpc_backoff *backoff,
+ grpc_millis initial_connect_timeout, double multiplier,
+ double jitter, grpc_millis min_timeout_millis,
+ grpc_millis max_timeout_millis);
/// Begin retry loop: returns a timespec for the NEXT retry
-gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now);
+grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
/// Step a retry loop: returns a timespec for the NEXT retry
-gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now);
-/// Reset the backoff, so the next gpr_backoff_step will be a gpr_backoff_begin
+grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
+/// Reset the backoff, so the next grpc_backoff_step will be a
+/// grpc_backoff_begin
/// instead
-void gpr_backoff_reset(gpr_backoff *backoff);
+void grpc_backoff_reset(grpc_backoff *backoff);
+
+#ifdef __cplusplus
+}
+#endif
-#endif /* GRPC_CORE_LIB_SUPPORT_BACKOFF_H */
+#endif /* GRPC_CORE_LIB_BACKOFF_BACKOFF_H */
diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.cc
index 02db798b5c..30248b3c60 100644
--- a/src/core/lib/channel/channel_args.c
+++ b/src/core/lib/channel/channel_args.cc
@@ -86,13 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
- grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *dst =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
+ dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@@ -117,7 +118,7 @@ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
const grpc_channel_args *b) {
const size_t max_out = (a->num_args + b->num_args);
- grpc_arg *uniques = gpr_malloc(sizeof(*uniques) * max_out);
+ grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
@@ -160,24 +161,25 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
static int cmp_key_stable(const void *ap, const void *bp) {
- const grpc_arg *const *a = ap;
- const grpc_arg *const *b = bp;
+ const grpc_arg *const *a = (const grpc_arg *const *)ap;
+ const grpc_arg *const *b = (const grpc_arg *const *)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
- grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+ grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
- grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args *b =
+ (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
- b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}
@@ -210,7 +212,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) {
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a) {
size_t i;
- if (a == NULL) return 0;
+ if (a == NULL) return GRPC_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, a->args[i].key)) {
@@ -224,7 +226,7 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
grpc_stream_compression_algorithm
grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
size_t i;
- if (a == NULL) return 0;
+ if (a == NULL) return GRPC_STREAM_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
@@ -241,7 +243,7 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@@ -251,7 +253,7 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@@ -306,7 +308,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
state == 0) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
gpr_log(GPR_ERROR,
"Tried to disable default compression algorithm '%s'. The "
@@ -322,7 +324,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
@@ -347,7 +349,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
state == 0) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
0);
gpr_log(GPR_ERROR,
@@ -364,7 +366,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 0599e189c3..1896d35cf4 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -23,6 +23,10 @@
#include <grpc/grpc.h>
#include "src/core/lib/iomgr/socket_mutator.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Channel args are intentionally immutable, to avoid the need for locking.
/** Copy the arguments in \a src into a new instance */
@@ -149,4 +153,8 @@ grpc_arg grpc_channel_arg_integer_create(char *name, int value);
grpc_arg grpc_channel_arg_pointer_create(char *name, void *value,
const grpc_arg_pointer_vtable *vtable);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.cc
index 0f8e33c4be..775c8bc667 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.cc
@@ -233,15 +233,10 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
grpc_call_element *next_elem = elem + 1;
+ GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
}
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- grpc_call_element *next_elem = elem + 1;
- return next_elem->filter->get_peer(exec_ctx, next_elem);
-}
-
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *channel_info) {
@@ -265,12 +260,3 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
}
-
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL);
- op->cancel_stream = true;
- op->payload->cancel_stream.cancel_error = error;
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
-}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index a80f8aa826..5c00c09889 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -40,6 +40,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/transport.h"
@@ -69,8 +70,9 @@ typedef struct {
grpc_call_context_element *context;
grpc_slice path;
gpr_timespec start_time;
- gpr_timespec deadline;
+ grpc_millis deadline;
gpr_arena *arena;
+ grpc_call_combiner *call_combiner;
} grpc_call_element_args;
typedef struct {
@@ -150,9 +152,6 @@ typedef struct {
void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem);
- /* Implement grpc_call_get_peer() */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
-
/* Implement grpc_channel_get_info() */
void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
const grpc_channel_info *channel_info);
@@ -271,8 +270,6 @@ void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
stack */
void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_transport_op *op);
-/* Pass through a request to get_peer to the next child element */
-char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
/* Pass through a request to get_channel_info() to the next child element */
void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -284,14 +281,10 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
/* Given the top element of a call stack, get the call stack itself */
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
-void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op);
-void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx,
- grpc_call_element *cur_elem,
- grpc_error *error);
-
extern grpc_tracer_flag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.cc
index c369e33073..b663ebfb52 100644
--- a/src/core/lib/channel/channel_stack_builder.c
+++ b/src/core/lib/channel/channel_stack_builder.cc
@@ -51,7 +51,8 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
- grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
+ grpc_channel_stack_builder *b =
+ (grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@@ -76,7 +77,8 @@ const char *grpc_channel_stack_builder_get_target(
static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
grpc_channel_stack_builder *builder, filter_node *node) {
- grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
+ grpc_channel_stack_builder_iterator *it =
+ (grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
@@ -124,6 +126,20 @@ bool grpc_channel_stack_builder_move_prev(
return true;
}
+grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder *builder, const char *filter_name) {
+ GPR_ASSERT(filter_name != NULL);
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_create_iterator_at_first(builder);
+ while (grpc_channel_stack_builder_move_next(it)) {
+ if (grpc_channel_stack_builder_iterator_is_end(it)) break;
+ const char *filter_name_at_it =
+ grpc_channel_stack_builder_iterator_filter_name(it);
+ if (strcmp(filter_name, filter_name_at_it) == 0) break;
+ }
+ return it;
+}
+
bool grpc_channel_stack_builder_move_prev(
grpc_channel_stack_builder_iterator *iterator);
@@ -169,6 +185,21 @@ bool grpc_channel_stack_builder_append_filter(
return ok;
}
+bool grpc_channel_stack_builder_remove_filter(
+ grpc_channel_stack_builder *builder, const char *filter_name) {
+ grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator_find(builder, filter_name);
+ if (grpc_channel_stack_builder_iterator_is_end(it)) {
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return false;
+ }
+ it->node->prev->next = it->node->next;
+ it->node->next->prev = it->node->prev;
+ gpr_free(it->node);
+ grpc_channel_stack_builder_iterator_destroy(it);
+ return true;
+}
+
bool grpc_channel_stack_builder_prepend_filter(
grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func, void *user_data) {
@@ -183,13 +214,13 @@ bool grpc_channel_stack_builder_prepend_filter(
static void add_after(filter_node *before, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func,
void *user_data) {
- filter_node *new = gpr_malloc(sizeof(*new));
- new->next = before->next;
- new->prev = before;
- new->next->prev = new->prev->next = new;
- new->filter = filter;
- new->init = post_init_func;
- new->init_arg = user_data;
+ filter_node *new_node = (filter_node *)gpr_malloc(sizeof(*new_node));
+ new_node->next = before->next;
+ new_node->prev = before;
+ new_node->next->prev = new_node->prev->next = new_node;
+ new_node->filter = filter;
+ new_node->init = post_init_func;
+ new_node->init_arg = user_data;
}
bool grpc_channel_stack_builder_add_filter_before(
@@ -237,7 +268,7 @@ grpc_error *grpc_channel_stack_builder_finish(
// create an array of filters
const grpc_channel_filter **filters =
- gpr_malloc(sizeof(*filters) * num_filters);
+ (const grpc_channel_filter **)gpr_malloc(sizeof(*filters) * num_filters);
size_t i = 0;
for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
filters[i++] = p->filter;
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index d43e427962..fdff2a2b6d 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -95,6 +95,11 @@ bool grpc_channel_stack_builder_move_next(
bool grpc_channel_stack_builder_move_prev(
grpc_channel_stack_builder_iterator *iterator);
+/// Return an iterator at \a filter_name, or at the end of the list if not
+/// found.
+grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder *builder, const char *filter_name);
+
typedef void (*grpc_post_filter_create_init_func)(
grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg);
@@ -132,6 +137,11 @@ bool grpc_channel_stack_builder_append_filter(
grpc_post_filter_create_init_func post_init_func,
void *user_data) GRPC_MUST_USE_RESULT;
+/// Remove any filter whose name is \a filter_name from \a builder. Returns true
+/// if \a filter_name was not found.
+bool grpc_channel_stack_builder_remove_filter(
+ grpc_channel_stack_builder *builder, const char *filter_name);
+
/// Terminate iteration and destroy \a iterator
void grpc_channel_stack_builder_iterator_destroy(
grpc_channel_stack_builder_iterator *iterator);
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.cc
index af06ca802e..4f37908958 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.cc
@@ -36,7 +36,57 @@ typedef struct connected_channel_channel_data {
grpc_transport *transport;
} channel_data;
-typedef struct connected_channel_call_data { void *unused; } call_data;
+typedef struct {
+ grpc_closure closure;
+ grpc_closure *original_closure;
+ grpc_call_combiner *call_combiner;
+ const char *reason;
+} callback_state;
+
+typedef struct connected_channel_call_data {
+ grpc_call_combiner *call_combiner;
+ // Closures used for returning results on the call combiner.
+ callback_state on_complete[6]; // Max number of pending batches.
+ callback_state recv_initial_metadata_ready;
+ callback_state recv_message_ready;
+} call_data;
+
+static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ callback_state *state = (callback_state *)arg;
+ GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
+ state->original_closure, GRPC_ERROR_REF(error),
+ state->reason);
+}
+
+static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ run_in_call_combiner(exec_ctx, arg, error);
+ gpr_free(arg);
+}
+
+static void intercept_callback(call_data *calld, callback_state *state,
+ bool free_when_done, const char *reason,
+ grpc_closure **original_closure) {
+ state->original_closure = *original_closure;
+ state->call_combiner = calld->call_combiner;
+ state->reason = reason;
+ *original_closure = GRPC_CLOSURE_INIT(
+ &state->closure,
+ free_when_done ? run_cancel_in_call_combiner : run_in_call_combiner,
+ state, grpc_schedule_on_exec_ctx);
+}
+
+static callback_state *get_state_for_batch(
+ call_data *calld, grpc_transport_stream_op_batch *batch) {
+ if (batch->send_initial_metadata) return &calld->on_complete[0];
+ if (batch->send_message) return &calld->on_complete[1];
+ if (batch->send_trailing_metadata) return &calld->on_complete[2];
+ if (batch->recv_initial_metadata) return &calld->on_complete[3];
+ if (batch->recv_message) return &calld->on_complete[4];
+ if (batch->recv_trailing_metadata) return &calld->on_complete[5];
+ GPR_UNREACHABLE_CODE(return NULL);
+}
/* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal
@@ -49,19 +99,44 @@ typedef struct connected_channel_call_data { void *unused; } call_data;
into transport stream operations */
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
-
+ grpc_transport_stream_op_batch *batch) {
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ if (batch->recv_initial_metadata) {
+ callback_state *state = &calld->recv_initial_metadata_ready;
+ intercept_callback(
+ calld, state, false, "recv_initial_metadata_ready",
+ &batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
+ }
+ if (batch->recv_message) {
+ callback_state *state = &calld->recv_message_ready;
+ intercept_callback(calld, state, false, "recv_message_ready",
+ &batch->payload->recv_message.recv_message_ready);
+ }
+ if (batch->cancel_stream) {
+ // There can be more than one cancellation batch in flight at any
+ // given time, so we can't just pick out a fixed index into
+ // calld->on_complete like we can for the other ops. However,
+ // cancellation isn't in the fast path, so we just allocate a new
+ // closure for each one.
+ callback_state *state = (callback_state *)gpr_malloc(sizeof(*state));
+ intercept_callback(calld, state, true, "on_complete (cancel_stream)",
+ &batch->on_complete);
+ } else {
+ callback_state *state = get_state_for_batch(calld, batch);
+ intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
+ }
grpc_transport_perform_stream_op(exec_ctx, chand->transport,
- TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
+ TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+ batch);
+ GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+ "passed batch to transport");
}
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
@@ -69,8 +144,9 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data, args->arena);
@@ -82,8 +158,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
@@ -92,8 +168,8 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
@@ -118,11 +194,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
}
-static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- return grpc_transport_get_peer(exec_ctx, chand->transport);
-}
-
/* No-op. */
static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
@@ -138,7 +209,6 @@ const grpc_channel_filter grpc_connected_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- con_get_peer,
con_get_channel_info,
"connected",
};
@@ -148,7 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack,
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
- cd->transport = t;
+ cd->transport = (grpc_transport *)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@@ -156,7 +226,8 @@ static void bind_transport(grpc_channel_stack *channel_stack,
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
- channel_stack->call_stack_size += grpc_transport_stream_size(t);
+ channel_stack->call_stack_size +=
+ grpc_transport_stream_size((grpc_transport *)t);
}
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@@ -170,6 +241,6 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}
diff --git a/src/core/lib/channel/connected_channel.h b/src/core/lib/channel/connected_channel.h
index 10c98cce54..4615727baa 100644
--- a/src/core/lib/channel/connected_channel.h
+++ b/src/core/lib/channel/connected_channel.h
@@ -21,6 +21,10 @@
#include "src/core/lib/channel/channel_stack_builder.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_connected_filter;
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@@ -30,4 +34,8 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
/* Debug helper to dig the transport stream out of a call element */
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H */
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.cc
index 2cb83f4114..b27ee37e5b 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.cc
@@ -84,7 +84,8 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
- grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
+ grpc_handshake_manager* mgr =
+ (grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
@@ -137,8 +138,8 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
realloc_count = mgr->count * 2;
}
if (realloc_count > 0) {
- mgr->handshakers =
- gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
+ mgr->handshakers = (grpc_handshaker**)gpr_realloc(
+ mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
gpr_mu_unlock(&mgr->mu);
@@ -205,7 +206,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// handshakers together.
static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
gpr_mu_lock(&mgr->mu);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
gpr_mu_unlock(&mgr->mu);
@@ -219,7 +220,7 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked when deadline is exceeded.
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
- grpc_handshake_manager* mgr = arg;
+ grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
grpc_handshake_manager_shutdown(
exec_ctx, mgr,
@@ -231,7 +232,7 @@ static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
- gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
+ grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data) {
gpr_mu_lock(&mgr->mu);
GPR_ASSERT(mgr->index == 0);
@@ -241,7 +242,8 @@ void grpc_handshake_manager_do_handshake(
mgr->args.endpoint = endpoint;
mgr->args.args = grpc_channel_args_copy(channel_args);
mgr->args.user_data = user_data;
- mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
+ mgr->args.read_buffer =
+ (grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer));
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;
@@ -253,9 +255,7 @@ void grpc_handshake_manager_do_handshake(
gpr_ref(&mgr->refs);
GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr,
grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &mgr->deadline_timer,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &mgr->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &mgr->deadline_timer, deadline, &mgr->on_timeout);
// Start first handshaker, which also owns a ref.
gpr_ref(&mgr->refs);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE);
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index eb9a59bd08..8ed38c15ba 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -26,6 +26,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/tcp_server.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Handshakers are used to perform initial handshakes on a connection
/// before the client sends the initial request. Some examples of what
/// a handshaker can be used for includes support for HTTP CONNECT on
@@ -145,7 +149,7 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
void grpc_handshake_manager_do_handshake(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
- gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor,
+ grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
grpc_iomgr_cb_func on_handshake_done, void* user_data);
/// Add \a mgr to the server side list of all pending handshake managers, the
@@ -164,4 +168,8 @@ void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head,
void grpc_handshake_manager_pending_list_shutdown_all(
grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */
diff --git a/src/core/lib/channel/handshaker_factory.c b/src/core/lib/channel/handshaker_factory.cc
index 4deb280c60..4deb280c60 100644
--- a/src/core/lib/channel/handshaker_factory.c
+++ b/src/core/lib/channel/handshaker_factory.cc
diff --git a/src/core/lib/channel/handshaker_factory.h b/src/core/lib/channel/handshaker_factory.h
index 6238e735dc..59008adf05 100644
--- a/src/core/lib/channel/handshaker_factory.h
+++ b/src/core/lib/channel/handshaker_factory.h
@@ -24,6 +24,10 @@
#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// A handshaker factory is used to create handshakers.
typedef struct grpc_handshaker_factory grpc_handshaker_factory;
@@ -48,4 +52,8 @@ void grpc_handshaker_factory_add_handshakers(
void grpc_handshaker_factory_destroy(
grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H */
diff --git a/src/core/lib/channel/handshaker_registry.c b/src/core/lib/channel/handshaker_registry.cc
index 8c4bc3aa00..c6bc87d704 100644
--- a/src/core/lib/channel/handshaker_registry.c
+++ b/src/core/lib/channel/handshaker_registry.cc
@@ -34,7 +34,7 @@ typedef struct {
static void grpc_handshaker_factory_list_register(
grpc_handshaker_factory_list* list, bool at_start,
grpc_handshaker_factory* factory) {
- list->list = gpr_realloc(
+ list->list = (grpc_handshaker_factory**)gpr_realloc(
list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*));
if (at_start) {
memmove(list->list + 1, list->list,
diff --git a/src/core/lib/channel/handshaker_registry.h b/src/core/lib/channel/handshaker_registry.h
index a3b2ac1dc7..ddd280bea8 100644
--- a/src/core/lib/channel/handshaker_registry.h
+++ b/src/core/lib/channel/handshaker_registry.h
@@ -24,6 +24,10 @@
#include "src/core/lib/channel/handshaker_factory.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
HANDSHAKER_CLIENT = 0,
HANDSHAKER_SERVER,
@@ -45,4 +49,8 @@ void grpc_handshakers_add(grpc_exec_ctx* exec_ctx,
const grpc_channel_args* args,
grpc_handshake_manager* handshake_mgr);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_REGISTRY_H */
diff --git a/src/core/lib/compression/algorithm_metadata.h b/src/core/lib/compression/algorithm_metadata.h
index 08feafc1bb..17caf58f69 100644
--- a/src/core/lib/compression/algorithm_metadata.h
+++ b/src/core/lib/compression/algorithm_metadata.h
@@ -22,6 +22,10 @@
#include <grpc/compression.h>
#include "src/core/lib/transport/metadata.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Return compression algorithm based metadata value */
grpc_slice grpc_compression_algorithm_slice(
grpc_compression_algorithm algorithm);
@@ -49,4 +53,8 @@ grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
grpc_slice str);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/lib/compression/compression.c b/src/core/lib/compression/compression.cc
index ec84c01811..1cfac23129 100644
--- a/src/core/lib/compression/compression.c
+++ b/src/core/lib/compression/compression.cc
@@ -60,7 +60,7 @@ int grpc_stream_compression_algorithm_parse(
}
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
- char **name) {
+ const char **name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
switch (algorithm) {
@@ -80,7 +80,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
}
int grpc_stream_compression_algorithm_name(
- grpc_stream_compression_algorithm algorithm, char **name) {
+ grpc_stream_compression_algorithm algorithm, const char **name) {
GRPC_API_TRACE(
"grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
diff --git a/src/core/lib/compression/message_compress.c b/src/core/lib/compression/message_compress.cc
index c051e28864..c051e28864 100644
--- a/src/core/lib/compression/message_compress.c
+++ b/src/core/lib/compression/message_compress.cc
diff --git a/src/core/lib/compression/message_compress.h b/src/core/lib/compression/message_compress.h
index ca8ca37f8e..fffe175fd2 100644
--- a/src/core/lib/compression/message_compress.h
+++ b/src/core/lib/compression/message_compress.h
@@ -22,6 +22,10 @@
#include <grpc/compression.h>
#include <grpc/slice_buffer.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* compress 'input' to 'output' using 'algorithm'.
On success, appends compressed slices to output and returns 1.
On failure, appends uncompressed slices to output and returns 0. */
@@ -36,4 +40,8 @@ int grpc_msg_decompress(grpc_exec_ctx* exec_ctx,
grpc_compression_algorithm algorithm,
grpc_slice_buffer* input, grpc_slice_buffer* output);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H */
diff --git a/src/core/lib/compression/stream_compression.cc b/src/core/lib/compression/stream_compression.cc
new file mode 100644
index 0000000000..7faeb0d34f
--- /dev/null
+++ b/src/core/lib/compression/stream_compression.cc
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/log.h>
+
+#include "src/core/lib/compression/stream_compression.h"
+#include "src/core/lib/compression/stream_compression_gzip.h"
+
+extern "C" const grpc_stream_compression_vtable
+ grpc_stream_compression_identity_vtable;
+
+bool grpc_stream_compress(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ return ctx->vtable->compress(ctx, in, out, output_size, max_output_size,
+ flush);
+}
+
+bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size,
+ bool *end_of_context) {
+ return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size,
+ end_of_context);
+}
+
+grpc_stream_compression_context *grpc_stream_compression_context_create(
+ grpc_stream_compression_method method) {
+ switch (method) {
+ case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS:
+ case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS:
+ return grpc_stream_compression_identity_vtable.context_create(method);
+ case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS:
+ case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS:
+ return grpc_stream_compression_gzip_vtable.context_create(method);
+ default:
+ gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method);
+ return NULL;
+ }
+}
+
+void grpc_stream_compression_context_destroy(
+ grpc_stream_compression_context *ctx) {
+ ctx->vtable->context_destroy(ctx);
+}
+
+int grpc_stream_compression_method_parse(
+ grpc_slice value, bool is_compress,
+ grpc_stream_compression_method *method) {
+ if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) {
+ *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
+ : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
+ return 1;
+ } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) {
+ *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS
+ : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS;
+ return 1;
+ } else {
+ return 0;
+ }
+}
diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h
index 844dff81a3..6ee3ac1966 100644
--- a/src/core/lib/compression/stream_compression.h
+++ b/src/core/lib/compression/stream_compression.h
@@ -24,15 +24,24 @@
#include <grpc/slice_buffer.h>
#include <zlib.h>
+#include "src/core/lib/transport/static_metadata.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
+
/* Stream compression/decompression context */
typedef struct grpc_stream_compression_context {
- z_stream zs;
- int (*flate)(z_stream *zs, int flush);
+ const grpc_stream_compression_vtable *vtable;
} grpc_stream_compression_context;
typedef enum grpc_stream_compression_method {
- GRPC_STREAM_COMPRESSION_COMPRESS = 0,
- GRPC_STREAM_COMPRESSION_DECOMPRESS,
+ GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0,
+ GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS,
+ GRPC_STREAM_COMPRESSION_GZIP_COMPRESS,
+ GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS,
GRPC_STREAM_COMPRESSION_METHOD_COUNT
} grpc_stream_compression_method;
@@ -43,6 +52,19 @@ typedef enum grpc_stream_compression_flush {
GRPC_STREAM_COMPRESSION_FLUSH_COUNT
} grpc_stream_compression_flush;
+struct grpc_stream_compression_vtable {
+ bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
+ grpc_slice_buffer *out, size_t *output_size,
+ size_t max_output_size, grpc_stream_compression_flush flush);
+ bool (*decompress)(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in, grpc_slice_buffer *out,
+ size_t *output_size, size_t max_output_size,
+ bool *end_of_context);
+ grpc_stream_compression_context *(*context_create)(
+ grpc_stream_compression_method method);
+ void (*context_destroy)(grpc_stream_compression_context *ctx);
+};
+
/**
* Compress bytes provided in \a in with a given context, with an optional flush
* at the end of compression. Emits at most \a max_output_size compressed bytes
@@ -87,4 +109,14 @@ grpc_stream_compression_context *grpc_stream_compression_context_create(
void grpc_stream_compression_context_destroy(
grpc_stream_compression_context *ctx);
+/**
+ * Parse stream compression method based on algorithm name
+ */
+int grpc_stream_compression_method_parse(
+ grpc_slice value, bool is_compress, grpc_stream_compression_method *method);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/src/core/lib/compression/stream_compression.c b/src/core/lib/compression/stream_compression_gzip.cc
index df13d53e06..087b018be5 100644
--- a/src/core/lib/compression/stream_compression.c
+++ b/src/core/lib/compression/stream_compression_gzip.cc
@@ -19,13 +19,20 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
-#include "src/core/lib/compression/stream_compression.h"
+#include "src/core/lib/compression/stream_compression_gzip.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#define OUTPUT_BLOCK_SIZE (1024)
-static bool gzip_flate(grpc_stream_compression_context *ctx,
+typedef struct grpc_stream_compression_context_gzip {
+ grpc_stream_compression_context base;
+
+ z_stream zs;
+ int (*flate)(z_stream *zs, int flush);
+} grpc_stream_compression_context_gzip;
+
+static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
grpc_slice_buffer *in, grpc_slice_buffer *out,
size_t *output_size, size_t max_output_size, int flush,
bool *end_of_context) {
@@ -124,11 +131,18 @@ static bool gzip_flate(grpc_stream_compression_context *ctx,
return true;
}
-bool grpc_stream_compress(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
- grpc_stream_compression_flush flush) {
- GPR_ASSERT(ctx->flate == deflate);
+static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ GPR_ASSERT(gzip_ctx->flate == deflate);
int gzip_flush;
switch (flush) {
case GRPC_STREAM_COMPRESSION_FLUSH_NONE:
@@ -143,49 +157,71 @@ bool grpc_stream_compress(grpc_stream_compression_context *ctx,
default:
gzip_flush = 0;
}
- return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush,
+ return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush,
NULL);
}
-bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
- bool *end_of_context) {
- GPR_ASSERT(ctx->flate == inflate);
- return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH,
- end_of_context);
+static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ bool *end_of_context) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ GPR_ASSERT(gzip_ctx->flate == inflate);
+ return gzip_flate(gzip_ctx, in, out, output_size, max_output_size,
+ Z_SYNC_FLUSH, end_of_context);
}
-grpc_stream_compression_context *grpc_stream_compression_context_create(
+static grpc_stream_compression_context *
+grpc_stream_compression_context_create_gzip(
grpc_stream_compression_method method) {
- grpc_stream_compression_context *ctx =
- gpr_zalloc(sizeof(grpc_stream_compression_context));
+ GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ||
+ method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)gpr_zalloc(
+ sizeof(grpc_stream_compression_context_gzip));
int r;
- if (ctx == NULL) {
+ if (gzip_ctx == NULL) {
return NULL;
}
- if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) {
- r = inflateInit2(&ctx->zs, 0x1F);
- ctx->flate = inflate;
+ if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) {
+ r = inflateInit2(&gzip_ctx->zs, 0x1F);
+ gzip_ctx->flate = inflate;
} else {
- r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
+ r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8,
Z_DEFAULT_STRATEGY);
- ctx->flate = deflate;
+ gzip_ctx->flate = deflate;
}
if (r != Z_OK) {
- gpr_free(ctx);
+ gpr_free(gzip_ctx);
return NULL;
}
- return ctx;
+ gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable;
+ return (grpc_stream_compression_context *)gzip_ctx;
}
-void grpc_stream_compression_context_destroy(
+static void grpc_stream_compression_context_destroy_gzip(
grpc_stream_compression_context *ctx) {
- if (ctx->flate == inflate) {
- inflateEnd(&ctx->zs);
+ if (ctx == NULL) {
+ return;
+ }
+ grpc_stream_compression_context_gzip *gzip_ctx =
+ (grpc_stream_compression_context_gzip *)ctx;
+ if (gzip_ctx->flate == inflate) {
+ inflateEnd(&gzip_ctx->zs);
} else {
- deflateEnd(&ctx->zs);
+ deflateEnd(&gzip_ctx->zs);
}
gpr_free(ctx);
}
+
+const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = {
+ grpc_stream_compress_gzip, grpc_stream_decompress_gzip,
+ grpc_stream_compression_context_create_gzip,
+ grpc_stream_compression_context_destroy_gzip};
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h b/src/core/lib/compression/stream_compression_gzip.h
index 2ca68cc9d1..a3f1b0406f 100644
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h
+++ b/src/core/lib/compression/stream_compression_gzip.h
@@ -16,13 +16,19 @@
*
*/
-#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
-#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
+#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
+#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/port.h"
+#include "src/core/lib/compression/stream_compression.h"
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly);
+#ifdef __cplusplus
+extern "C" {
+#endif
-#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H */
+extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/core/lib/compression/stream_compression_identity.cc b/src/core/lib/compression/stream_compression_identity.cc
new file mode 100644
index 0000000000..9b2e6062e1
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_identity.cc
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/compression/stream_compression_identity.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+#define OUTPUT_BLOCK_SIZE (1024)
+
+/* Singleton context used for all identity streams. */
+static grpc_stream_compression_context identity_ctx = {
+ &grpc_stream_compression_identity_vtable};
+
+static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size) {
+ if (max_output_size >= in->length) {
+ if (output_size) {
+ *output_size = in->length;
+ }
+ grpc_slice_buffer_move_into(in, out);
+ } else {
+ if (output_size) {
+ *output_size = max_output_size;
+ }
+ grpc_slice_buffer_move_first(in, max_output_size, out);
+ }
+}
+
+static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
+ grpc_slice_buffer *in,
+ grpc_slice_buffer *out,
+ size_t *output_size,
+ size_t max_output_size,
+ grpc_stream_compression_flush flush) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
+ return true;
+}
+
+static bool grpc_stream_decompress_identity(
+ grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
+ grpc_slice_buffer *out, size_t *output_size, size_t max_output_size,
+ bool *end_of_context) {
+ if (ctx == NULL) {
+ return false;
+ }
+ grpc_stream_compression_pass_through(in, out, output_size, max_output_size);
+ if (end_of_context) {
+ *end_of_context = false;
+ }
+ return true;
+}
+
+static grpc_stream_compression_context *
+grpc_stream_compression_context_create_identity(
+ grpc_stream_compression_method method) {
+ GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS ||
+ method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS);
+ /* No context needed in this case. Use fake context instead. */
+ return (grpc_stream_compression_context *)&identity_ctx;
+}
+
+static void grpc_stream_compression_context_destroy_identity(
+ grpc_stream_compression_context *ctx) {
+ return;
+}
+
+const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = {
+ grpc_stream_compress_identity, grpc_stream_decompress_identity,
+ grpc_stream_compression_context_create_identity,
+ grpc_stream_compression_context_destroy_identity};
diff --git a/src/core/lib/compression/stream_compression_identity.h b/src/core/lib/compression/stream_compression_identity.h
new file mode 100644
index 0000000000..3a729fafad
--- /dev/null
+++ b/src/core/lib/compression/stream_compression_identity.h
@@ -0,0 +1,35 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
+#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H
+
+#include "src/core/lib/compression/stream_compression.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const grpc_stream_compression_vtable
+ grpc_stream_compression_identity_vtable;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c
deleted file mode 100644
index 4dbd94c724..0000000000
--- a/src/core/lib/debug/stats.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/debug/stats.h"
-
-#include <inttypes.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/support/string.h"
-
-grpc_stats_data *grpc_stats_per_cpu_storage = NULL;
-static size_t g_num_cores;
-
-void grpc_stats_init(void) {
- g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
- grpc_stats_per_cpu_storage =
- gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
-}
-
-void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }
-
-void grpc_stats_collect(grpc_stats_data *output) {
- memset(output, 0, sizeof(*output));
- for (size_t core = 0; core < g_num_cores; core++) {
- for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
- output->counters[i] += gpr_atm_no_barrier_load(
- &grpc_stats_per_cpu_storage[core].counters[i]);
- }
- }
-}
-
-char *grpc_stats_data_as_json(const grpc_stats_data *data) {
- gpr_strvec v;
- char *tmp;
- bool is_first = true;
- gpr_strvec_init(&v);
- gpr_strvec_add(&v, gpr_strdup("{"));
- for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
- gpr_asprintf(&tmp, "%s\"%s\": %" PRIdPTR, is_first ? "" : ", ",
- grpc_stats_counter_name[i], data->counters[i]);
- gpr_strvec_add(&v, tmp);
- is_first = false;
- }
- gpr_strvec_add(&v, gpr_strdup("}"));
- tmp = gpr_strvec_flatten(&v, NULL);
- gpr_strvec_destroy(&v);
- return tmp;
-}
diff --git a/src/core/lib/debug/stats.cc b/src/core/lib/debug/stats.cc
new file mode 100644
index 0000000000..4096384dd9
--- /dev/null
+++ b/src/core/lib/debug/stats.cc
@@ -0,0 +1,174 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/debug/stats.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/support/string.h"
+
+grpc_stats_data *grpc_stats_per_cpu_storage = NULL;
+static size_t g_num_cores;
+
+void grpc_stats_init(void) {
+ g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
+ grpc_stats_per_cpu_storage =
+ (grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
+}
+
+void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }
+
+void grpc_stats_collect(grpc_stats_data *output) {
+ memset(output, 0, sizeof(*output));
+ for (size_t core = 0; core < g_num_cores; core++) {
+ for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ output->counters[i] += gpr_atm_no_barrier_load(
+ &grpc_stats_per_cpu_storage[core].counters[i]);
+ }
+ for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
+ output->histograms[i] += gpr_atm_no_barrier_load(
+ &grpc_stats_per_cpu_storage[core].histograms[i]);
+ }
+ }
+}
+
+void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
+ grpc_stats_data *c) {
+ for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ c->counters[i] = b->counters[i] - a->counters[i];
+ }
+ for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
+ c->histograms[i] = b->histograms[i] - a->histograms[i];
+ }
+}
+
+int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
+ const int *table, int table_size) {
+ GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx);
+ const int *const start = table;
+ while (table_size > 0) {
+ int step = table_size / 2;
+ const int *it = table + step;
+ if (value >= *it) {
+ table = it + 1;
+ table_size -= step + 1;
+ } else {
+ table_size = step;
+ }
+ }
+ return (int)(table - start) - 1;
+}
+
+size_t grpc_stats_histo_count(const grpc_stats_data *stats,
+ grpc_stats_histograms histogram) {
+ size_t sum = 0;
+ for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) {
+ sum += (size_t)stats->histograms[grpc_stats_histo_start[histogram] + i];
+ }
+ return sum;
+}
+
+static double threshold_for_count_below(const gpr_atm *bucket_counts,
+ const int *bucket_boundaries,
+ int num_buckets, double count_below) {
+ double count_so_far;
+ double lower_bound;
+ double upper_bound;
+ int lower_idx;
+ int upper_idx;
+
+ /* find the lowest bucket that gets us above count_below */
+ count_so_far = 0.0;
+ for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) {
+ count_so_far += (double)bucket_counts[lower_idx];
+ if (count_so_far >= count_below) {
+ break;
+ }
+ }
+ if (count_so_far == count_below) {
+ /* this bucket hits the threshold exactly... we should be midway through
+ any run of zero values following the bucket */
+ for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) {
+ if (bucket_counts[upper_idx]) {
+ break;
+ }
+ }
+ return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0;
+ } else {
+ /* treat values as uniform throughout the bucket, and find where this value
+ should lie */
+ lower_bound = bucket_boundaries[lower_idx];
+ upper_bound = bucket_boundaries[lower_idx + 1];
+ return upper_bound -
+ (upper_bound - lower_bound) * (count_so_far - count_below) /
+ (double)bucket_counts[lower_idx];
+ }
+}
+
+double grpc_stats_histo_percentile(const grpc_stats_data *stats,
+ grpc_stats_histograms histogram,
+ double percentile) {
+ size_t count = grpc_stats_histo_count(stats, histogram);
+ if (count == 0) return 0.0;
+ return threshold_for_count_below(
+ stats->histograms + grpc_stats_histo_start[histogram],
+ grpc_stats_histo_bucket_boundaries[histogram],
+ grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0);
+}
+
+char *grpc_stats_data_as_json(const grpc_stats_data *data) {
+ gpr_strvec v;
+ char *tmp;
+ bool is_first = true;
+ gpr_strvec_init(&v);
+ gpr_strvec_add(&v, gpr_strdup("{"));
+ for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ gpr_asprintf(&tmp, "%s\"%s\": %" PRIdPTR, is_first ? "" : ", ",
+ grpc_stats_counter_name[i], data->counters[i]);
+ gpr_strvec_add(&v, tmp);
+ is_first = false;
+ }
+ for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
+ gpr_asprintf(&tmp, "%s\"%s\": [", is_first ? "" : ", ",
+ grpc_stats_histogram_name[i]);
+ gpr_strvec_add(&v, tmp);
+ for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
+ gpr_asprintf(&tmp, "%s%" PRIdPTR, j == 0 ? "" : ",",
+ data->histograms[grpc_stats_histo_start[i] + j]);
+ gpr_strvec_add(&v, tmp);
+ }
+ gpr_asprintf(&tmp, "], \"%s_bkt\": [", grpc_stats_histogram_name[i]);
+ gpr_strvec_add(&v, tmp);
+ for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
+ gpr_asprintf(&tmp, "%s%d", j == 0 ? "" : ",",
+ grpc_stats_histo_bucket_boundaries[i][j]);
+ gpr_strvec_add(&v, tmp);
+ }
+ gpr_strvec_add(&v, gpr_strdup("]"));
+ is_first = false;
+ }
+ gpr_strvec_add(&v, gpr_strdup("}"));
+ tmp = gpr_strvec_flatten(&v, NULL);
+ gpr_strvec_destroy(&v);
+ return tmp;
+}
diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h
index 563b108dff..fec1d651e6 100644
--- a/src/core/lib/debug/stats.h
+++ b/src/core/lib/debug/stats.h
@@ -23,8 +23,13 @@
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_stats_data {
gpr_atm counters[GRPC_STATS_COUNTER_COUNT];
+ gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS];
} grpc_stats_data;
extern grpc_stats_data *grpc_stats_per_cpu_storage;
@@ -36,9 +41,29 @@ extern grpc_stats_data *grpc_stats_per_cpu_storage;
(gpr_atm_no_barrier_fetch_add( \
&GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1))
+#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \
+ (gpr_atm_no_barrier_fetch_add( \
+ &GRPC_THREAD_STATS_DATA((exec_ctx)) \
+ ->histograms[histogram##_FIRST_SLOT + (index)], \
+ 1))
+
void grpc_stats_init(void);
void grpc_stats_shutdown(void);
void grpc_stats_collect(grpc_stats_data *output);
+// c = b-a
+void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
+ grpc_stats_data *c);
char *grpc_stats_data_as_json(const grpc_stats_data *data);
+int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
+ const int *table, int table_size);
+double grpc_stats_histo_percentile(const grpc_stats_data *data,
+ grpc_stats_histograms histogram,
+ double percentile);
+size_t grpc_stats_histo_count(const grpc_stats_data *data,
+ grpc_stats_histograms histogram);
+
+#ifdef __cplusplus
+}
+#endif
#endif
diff --git a/src/core/lib/debug/stats_data.cc b/src/core/lib/debug/stats_data.cc
new file mode 100644
index 0000000000..5d737c56cb
--- /dev/null
+++ b/src/core/lib/debug/stats_data.cc
@@ -0,0 +1,707 @@
+/*
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Automatically generated by tools/codegen/core/gen_stats_data.py
+ */
+
+#include "src/core/lib/debug/stats_data.h"
+#include <grpc/support/useful.h>
+#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
+ "client_calls_created",
+ "server_calls_created",
+ "cqs_created",
+ "client_channels_created",
+ "client_subchannels_created",
+ "server_channels_created",
+ "syscall_poll",
+ "syscall_wait",
+ "pollset_kick",
+ "pollset_kicked_without_poller",
+ "pollset_kicked_again",
+ "pollset_kick_wakeup_fd",
+ "pollset_kick_wakeup_cv",
+ "pollset_kick_own_thread",
+ "histogram_slow_lookups",
+ "syscall_write",
+ "syscall_read",
+ "tcp_backup_pollers_created",
+ "tcp_backup_poller_polls",
+ "http2_op_batches",
+ "http2_op_cancel",
+ "http2_op_send_initial_metadata",
+ "http2_op_send_message",
+ "http2_op_send_trailing_metadata",
+ "http2_op_recv_initial_metadata",
+ "http2_op_recv_message",
+ "http2_op_recv_trailing_metadata",
+ "http2_settings_writes",
+ "http2_pings_sent",
+ "http2_writes_begun",
+ "http2_writes_offloaded",
+ "http2_writes_continued",
+ "http2_partial_writes",
+ "http2_initiate_write_due_to_initial_write",
+ "http2_initiate_write_due_to_start_new_stream",
+ "http2_initiate_write_due_to_send_message",
+ "http2_initiate_write_due_to_send_initial_metadata",
+ "http2_initiate_write_due_to_send_trailing_metadata",
+ "http2_initiate_write_due_to_retry_send_ping",
+ "http2_initiate_write_due_to_continue_pings",
+ "http2_initiate_write_due_to_goaway_sent",
+ "http2_initiate_write_due_to_rst_stream",
+ "http2_initiate_write_due_to_close_from_api",
+ "http2_initiate_write_due_to_stream_flow_control",
+ "http2_initiate_write_due_to_transport_flow_control",
+ "http2_initiate_write_due_to_send_settings",
+ "http2_initiate_write_due_to_bdp_estimator_ping",
+ "http2_initiate_write_due_to_flow_control_unstalled_by_setting",
+ "http2_initiate_write_due_to_flow_control_unstalled_by_update",
+ "http2_initiate_write_due_to_application_ping",
+ "http2_initiate_write_due_to_keepalive_ping",
+ "http2_initiate_write_due_to_transport_flow_control_unstalled",
+ "http2_initiate_write_due_to_ping_response",
+ "http2_initiate_write_due_to_force_rst_stream",
+ "http2_spurious_writes_begun",
+ "hpack_recv_indexed",
+ "hpack_recv_lithdr_incidx",
+ "hpack_recv_lithdr_incidx_v",
+ "hpack_recv_lithdr_notidx",
+ "hpack_recv_lithdr_notidx_v",
+ "hpack_recv_lithdr_nvridx",
+ "hpack_recv_lithdr_nvridx_v",
+ "hpack_recv_uncompressed",
+ "hpack_recv_huffman",
+ "hpack_recv_binary",
+ "hpack_recv_binary_base64",
+ "hpack_send_indexed",
+ "hpack_send_lithdr_incidx",
+ "hpack_send_lithdr_incidx_v",
+ "hpack_send_lithdr_notidx",
+ "hpack_send_lithdr_notidx_v",
+ "hpack_send_lithdr_nvridx",
+ "hpack_send_lithdr_nvridx_v",
+ "hpack_send_uncompressed",
+ "hpack_send_huffman",
+ "hpack_send_binary",
+ "hpack_send_binary_base64",
+ "combiner_locks_initiated",
+ "combiner_locks_scheduled_items",
+ "combiner_locks_scheduled_final_items",
+ "combiner_locks_offloaded",
+ "call_combiner_locks_initiated",
+ "call_combiner_locks_scheduled_items",
+ "call_combiner_set_notify_on_cancel",
+ "call_combiner_cancelled",
+ "executor_scheduled_short_items",
+ "executor_scheduled_long_items",
+ "executor_scheduled_to_self",
+ "executor_wakeup_initiated",
+ "executor_queue_drained",
+ "executor_push_retries",
+ "server_requested_calls",
+ "server_slowpath_requests_queued",
+ "cq_ev_queue_trylock_failures",
+ "cq_ev_queue_trylock_successes",
+ "cq_ev_queue_transient_pop_failures",
+};
+const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
+ "Number of client side calls created by this process",
+ "Number of server side calls created by this process",
+ "Number of completion queues created", "Number of client channels created",
+ "Number of client subchannels created", "Number of server channels created",
+ "Number of polling syscalls (epoll_wait, poll, etc) made by this process",
+ "Number of sleeping syscalls made by this process",
+ "How many polling wakeups were performed by the process (only valid for "
+ "epoll1 right now)",
+ "How many times was a polling wakeup requested without an active poller "
+ "(only valid for epoll1 right now)",
+ "How many times was the same polling worker awoken repeatedly before "
+ "waking up (only valid for epoll1 right now)",
+ "How many times was an eventfd used as the wakeup vector for a polling "
+ "wakeup (only valid for epoll1 right now)",
+ "How many times was a condition variable used as the wakeup vector for a "
+ "polling wakeup (only valid for epoll1 right now)",
+ "How many times could a polling wakeup be satisfied by keeping the waking "
+ "thread awake? (only valid for epoll1 right now)",
+ "Number of times histogram increments went through the slow (binary "
+ "search) path",
+ "Number of write syscalls (or equivalent - eg sendmsg) made by this "
+ "process",
+ "Number of read syscalls (or equivalent - eg recvmsg) made by this process",
+ "Number of times a backup poller has been created (this can be expensive)",
+ "Number of polls performed on the backup poller",
+ "Number of batches received by HTTP2 transport",
+ "Number of cancelations received by HTTP2 transport",
+ "Number of batches containing send initial metadata",
+ "Number of batches containing send message",
+ "Number of batches containing send trailing metadata",
+ "Number of batches containing receive initial metadata",
+ "Number of batches containing receive message",
+ "Number of batches containing receive trailing metadata",
+ "Number of settings frames sent", "Number of HTTP2 pings sent by process",
+ "Number of HTTP2 writes initiated",
+ "Number of HTTP2 writes offloaded to the executor from application threads",
+ "Number of HTTP2 writes that finished seeing more data needed to be "
+ "written",
+ "Number of HTTP2 writes that were made knowing there was still more data "
+ "to be written (we cap maximum write size to syscall_write)",
+ "Number of HTTP2 writes initiated due to 'initial_write'",
+ "Number of HTTP2 writes initiated due to 'start_new_stream'",
+ "Number of HTTP2 writes initiated due to 'send_message'",
+ "Number of HTTP2 writes initiated due to 'send_initial_metadata'",
+ "Number of HTTP2 writes initiated due to 'send_trailing_metadata'",
+ "Number of HTTP2 writes initiated due to 'retry_send_ping'",
+ "Number of HTTP2 writes initiated due to 'continue_pings'",
+ "Number of HTTP2 writes initiated due to 'goaway_sent'",
+ "Number of HTTP2 writes initiated due to 'rst_stream'",
+ "Number of HTTP2 writes initiated due to 'close_from_api'",
+ "Number of HTTP2 writes initiated due to 'stream_flow_control'",
+ "Number of HTTP2 writes initiated due to 'transport_flow_control'",
+ "Number of HTTP2 writes initiated due to 'send_settings'",
+ "Number of HTTP2 writes initiated due to 'bdp_estimator_ping'",
+ "Number of HTTP2 writes initiated due to "
+ "'flow_control_unstalled_by_setting'",
+ "Number of HTTP2 writes initiated due to "
+ "'flow_control_unstalled_by_update'",
+ "Number of HTTP2 writes initiated due to 'application_ping'",
+ "Number of HTTP2 writes initiated due to 'keepalive_ping'",
+ "Number of HTTP2 writes initiated due to "
+ "'transport_flow_control_unstalled'",
+ "Number of HTTP2 writes initiated due to 'ping_response'",
+ "Number of HTTP2 writes initiated due to 'force_rst_stream'",
+ "Number of HTTP2 writes initiated with nothing to write",
+ "Number of HPACK indexed fields received",
+ "Number of HPACK literal headers received with incremental indexing",
+ "Number of HPACK literal headers received with incremental indexing and "
+ "literal keys",
+ "Number of HPACK literal headers received with no indexing",
+ "Number of HPACK literal headers received with no indexing and literal "
+ "keys",
+ "Number of HPACK literal headers received with never-indexing",
+ "Number of HPACK literal headers received with never-indexing and literal "
+ "keys",
+ "Number of uncompressed strings received in metadata",
+ "Number of huffman encoded strings received in metadata",
+ "Number of binary strings received in metadata",
+ "Number of binary strings received encoded in base64 in metadata",
+ "Number of HPACK indexed fields sent",
+ "Number of HPACK literal headers sent with incremental indexing",
+ "Number of HPACK literal headers sent with incremental indexing and "
+ "literal keys",
+ "Number of HPACK literal headers sent with no indexing",
+ "Number of HPACK literal headers sent with no indexing and literal keys",
+ "Number of HPACK literal headers sent with never-indexing",
+ "Number of HPACK literal headers sent with never-indexing and literal keys",
+ "Number of uncompressed strings sent in metadata",
+ "Number of huffman encoded strings sent in metadata",
+ "Number of binary strings received in metadata",
+ "Number of binary strings received encoded in base64 in metadata",
+ "Number of combiner lock entries by process (first items queued to a "
+ "combiner)",
+ "Number of items scheduled against combiner locks",
+ "Number of final items scheduled against combiner locks",
+ "Number of combiner locks offloaded to different threads",
+ "Number of call combiner lock entries by process (first items queued to a "
+ "call combiner)",
+ "Number of items scheduled against call combiner locks",
+ "Number of times a cancellation callback was set on a call combiner",
+ "Number of times a call combiner was cancelled",
+ "Number of finite runtime closures scheduled against the executor (gRPC "
+ "thread pool)",
+ "Number of potentially infinite runtime closures scheduled against the "
+ "executor (gRPC thread pool)",
+ "Number of closures scheduled by the executor to the executor",
+ "Number of thread wakeups initiated within the executor",
+ "Number of times an executor queue was drained",
+ "Number of times we raced and were forced to retry pushing a closure to "
+ "the executor",
+ "How many calls were requested (not necessarily received) by the server",
+ "How many times was the server slow path taken (indicates too few "
+ "outstanding requests)",
+ "Number of lock (trylock) acquisition failures on completion queue event "
+ "queue. High value here indicates high contention on completion queues",
+ "Number of lock (trylock) acquisition successes on completion queue event "
+ "queue.",
+ "Number of times NULL was popped out of completion queue's event queue "
+ "even though the event queue was not empty",
+};
+const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
+ "call_initial_size",
+ "poll_events_returned",
+ "tcp_write_size",
+ "tcp_write_iov_size",
+ "tcp_read_size",
+ "tcp_read_offer",
+ "tcp_read_offer_iov_size",
+ "http2_send_message_size",
+ "http2_send_initial_metadata_per_write",
+ "http2_send_message_per_write",
+ "http2_send_trailing_metadata_per_write",
+ "http2_send_flowctl_per_write",
+ "server_cqs_checked",
+};
+const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
+ "Initial size of the grpc_call arena created at call start",
+ "How many events are called for each syscall_poll",
+ "Number of bytes offered to each syscall_write",
+ "Number of byte segments offered to each syscall_write",
+ "Number of bytes received by each syscall_read",
+ "Number of bytes offered to each syscall_read",
+ "Number of byte segments offered to each syscall_read",
+ "Size of messages received by HTTP2 transport",
+ "Number of streams initiated written per TCP write",
+ "Number of streams whose payload was written per TCP write",
+ "Number of streams terminated per TCP write",
+ "Number of flow control updates written per TCP write",
+ "How many completion queues were checked looking for a CQ that had "
+ "requested the incoming call",
+};
+const int grpc_stats_table_0[65] = {
+ 0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
+ 17, 21, 26, 32, 39, 47, 57, 68, 82, 98,
+ 117, 140, 167, 199, 238, 284, 339, 404, 482, 575,
+ 685, 816, 972, 1158, 1380, 1644, 1959, 2334, 2780, 3312,
+ 3945, 4699, 5597, 6667, 7941, 9459, 11267, 13420, 15984, 19038,
+ 22676, 27009, 32169, 38315, 45635, 54353, 64737, 77104, 91834, 109378,
+ 130273, 155159, 184799, 220100, 262144};
+const uint8_t grpc_stats_table_1[124] = {
+ 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6,
+ 7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
+ 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 22, 23, 24,
+ 24, 25, 25, 26, 26, 26, 27, 27, 28, 29, 29, 30, 30, 30, 31, 31, 32, 33,
+ 33, 34, 34, 34, 35, 35, 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50,
+ 51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58};
+const int grpc_stats_table_2[129] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60,
+ 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 98, 102, 106, 110,
+ 114, 118, 122, 126, 131, 136, 141, 146, 151, 156, 162, 168, 174, 180, 186,
+ 192, 199, 206, 213, 220, 228, 236, 244, 252, 260, 269, 278, 287, 297, 307,
+ 317, 327, 338, 349, 360, 372, 384, 396, 409, 422, 436, 450, 464, 479, 494,
+ 510, 526, 543, 560, 578, 596, 615, 634, 654, 674, 695, 717, 739, 762, 785,
+ 809, 834, 859, 885, 912, 939, 967, 996, 1024};
+const uint8_t grpc_stats_table_3[166] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16,
+ 17, 17, 18, 19, 19, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 27, 28,
+ 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 39,
+ 40, 40, 41, 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51,
+ 51, 52, 52, 53, 53, 54, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 63, 64,
+ 65, 65, 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75, 76,
+ 76, 77, 78, 79, 79, 80, 81, 82, 83, 84, 85, 85, 86, 87, 88, 88, 89, 90, 90,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97, 98, 98, 99};
+const int grpc_stats_table_4[65] = {
+ 0, 1, 2, 3, 4, 6, 8, 11,
+ 15, 20, 26, 34, 44, 57, 73, 94,
+ 121, 155, 199, 255, 327, 419, 537, 688,
+ 881, 1128, 1444, 1848, 2365, 3026, 3872, 4954,
+ 6338, 8108, 10373, 13270, 16976, 21717, 27782, 35541,
+ 45467, 58165, 74409, 95189, 121772, 155778, 199281, 254933,
+ 326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
+ 2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
+ 16777216};
+const uint8_t grpc_stats_table_5[87] = {
+ 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
+ 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
+ 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
+ 49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
+const int grpc_stats_table_6[65] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
+ 51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
+ 151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
+ 418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
+const uint8_t grpc_stats_table_7[102] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
+ 23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
+ 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
+const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
+const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 262144);
+ if (value < 6) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4651092515166879744ull) {
+ int bucket =
+ grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6;
+ _bkt.dbl = grpc_stats_table_0[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_0, 64));
+}
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 29) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4642789003353915392ull) {
+ int bucket =
+ grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
+ _bkt.dbl = grpc_stats_table_2[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_2, 128));
+}
+void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
+ int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
+ int value) {
+ value = GPR_CLAMP(value, 0, 16777216);
+ if (value < 5) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4683743612465315840ull) {
+ int bucket =
+ grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+ _bkt.dbl = grpc_stats_table_4[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
+ 64));
+}
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int value) {
+ value = GPR_CLAMP(value, 0, 1024);
+ if (value < 13) {
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4637863191261478912ull) {
+ int bucket =
+ grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+ _bkt.dbl = grpc_stats_table_6[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM(
+ (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
+ value = GPR_CLAMP(value, 0, 64);
+ if (value < 3) {
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
+ return;
+ }
+ union {
+ double dbl;
+ uint64_t uint;
+ } _val, _bkt;
+ _val.dbl = value;
+ if (_val.uint < 4625196817309499392ull) {
+ int bucket =
+ grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
+ _bkt.dbl = grpc_stats_table_8[bucket];
+ bucket -= (_val.uint < _bkt.uint);
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx),
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
+ return;
+ }
+ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
+ grpc_stats_histo_find_bucket_slow(
+ (exec_ctx), value, grpc_stats_table_8, 8));
+}
+const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 8};
+const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448,
+ 512, 576, 640, 704, 768, 832};
+const int *const grpc_stats_histo_bucket_boundaries[13] = {
+ grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
+ grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
+ grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
+ grpc_stats_table_8};
+void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = {
+ grpc_stats_inc_call_initial_size,
+ grpc_stats_inc_poll_events_returned,
+ grpc_stats_inc_tcp_write_size,
+ grpc_stats_inc_tcp_write_iov_size,
+ grpc_stats_inc_tcp_read_size,
+ grpc_stats_inc_tcp_read_offer,
+ grpc_stats_inc_tcp_read_offer_iov_size,
+ grpc_stats_inc_http2_send_message_size,
+ grpc_stats_inc_http2_send_initial_metadata_per_write,
+ grpc_stats_inc_http2_send_message_per_write,
+ grpc_stats_inc_http2_send_trailing_metadata_per_write,
+ grpc_stats_inc_http2_send_flowctl_per_write,
+ grpc_stats_inc_server_cqs_checked};
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index c9c2f65c30..031942df5c 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -21,27 +21,489 @@
#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H
#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H
+#include <inttypes.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
- GRPC_STATS_COUNTER_SYSCALL_WRITE,
- GRPC_STATS_COUNTER_SYSCALL_READ,
+ GRPC_STATS_COUNTER_CQS_CREATED,
+ GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED,
+ GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED,
+ GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_POLL,
GRPC_STATS_COUNTER_SYSCALL_WAIT,
+ GRPC_STATS_COUNTER_POLLSET_KICK,
+ GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER,
+ GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN,
+ GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD,
+ GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV,
+ GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD,
+ GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
+ GRPC_STATS_COUNTER_SYSCALL_WRITE,
+ GRPC_STATS_COUNTER_SYSCALL_READ,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
+ GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
+ GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
+ GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
+ GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
+ GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
+ GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN,
+ GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX,
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V,
+ GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED,
+ GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN,
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY,
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64,
+ GRPC_STATS_COUNTER_HPACK_SEND_INDEXED,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX,
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V,
+ GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED,
+ GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN,
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY,
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
+ GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED,
+ GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS,
+ GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL,
+ GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
+ GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
+ GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
+ GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
+ GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
+ GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
+ GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES,
+ GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES,
+ GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
+extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
+extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
+typedef enum {
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
+ GRPC_STATS_HISTOGRAM_COUNT
+} grpc_stats_histograms;
+extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
+extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
+typedef enum {
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
+ GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_FIRST_SLOT = 64,
+ GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_BUCKETS = 128,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 192,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 256,
+ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 320,
+ GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 384,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 448,
+ GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
+ GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832,
+ GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
+ GRPC_STATS_HISTOGRAM_BUCKETS = 840
+} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
-#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
- GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
-#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
- GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
+#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED)
+#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
+#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
+#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT)
-extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
+#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK)
+#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
+#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
+#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
+#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
+#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
+#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
+#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
+#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
+#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
+#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
+#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
+#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
+#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \
+ exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN)
+#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
+#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
+#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
+#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS)
+#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
+#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED)
+#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL)
+#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
+#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
+#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
+#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
+#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
+#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
+#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES)
+#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER((exec_ctx), \
+ GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES)
+#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(exec_ctx) \
+ GRPC_STATS_INC_COUNTER( \
+ (exec_ctx), GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES)
+#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
+ grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
+ grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \
+ grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, value) \
+ grpc_stats_inc_tcp_read_offer_iov_size((exec_ctx), (int)(value))
+void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_initial_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
+ (int)(value))
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(
+ grpc_exec_ctx *exec_ctx, int x);
+#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
+ grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+ int x);
+#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
+ grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
+extern const int grpc_stats_histo_buckets[13];
+extern const int grpc_stats_histo_start[13];
+extern const int *const grpc_stats_histo_bucket_boundaries[13];
+extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx,
+ int x);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */
diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml
index 8afe48f5cd..af4553028e 100644
--- a/src/core/lib/debug/stats_data.yaml
+++ b/src/core/lib/debug/stats_data.yaml
@@ -1,9 +1,294 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Stats data declaration
-# use tools/codegen/core/gen_stats_data.py to turn this into stats_data.h
+# use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h
+# overall
- counter: client_calls_created
+ doc: Number of client side calls created by this process
- counter: server_calls_created
-- counter: syscall_write
-- counter: syscall_read
+ doc: Number of server side calls created by this process
+- histogram: call_initial_size
+ max: 262144
+ buckets: 64
+ doc: Initial size of the grpc_call arena created at call start
+- counter: cqs_created
+ doc: Number of completion queues created
+- counter: client_channels_created
+ doc: Number of client channels created
+- counter: client_subchannels_created
+ doc: Number of client subchannels created
+- counter: server_channels_created
+ doc: Number of server channels created
+# polling
- counter: syscall_poll
+ doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
- counter: syscall_wait
+ doc: Number of sleeping syscalls made by this process
+- histogram: poll_events_returned
+ max: 1024
+ buckets: 128
+ doc: How many events are called for each syscall_poll
+- counter: pollset_kick
+ doc: How many polling wakeups were performed by the process
+ (only valid for epoll1 right now)
+- counter: pollset_kicked_without_poller
+ doc: How many times was a polling wakeup requested without an active poller
+ (only valid for epoll1 right now)
+- counter: pollset_kicked_again
+ doc: How many times was the same polling worker awoken repeatedly before
+ waking up
+ (only valid for epoll1 right now)
+- counter: pollset_kick_wakeup_fd
+ doc: How many times was an eventfd used as the wakeup vector for a polling
+ wakeup
+ (only valid for epoll1 right now)
+- counter: pollset_kick_wakeup_cv
+ doc: How many times was a condition variable used as the wakeup vector for a
+ polling wakeup
+ (only valid for epoll1 right now)
+- counter: pollset_kick_own_thread
+ doc: How many times could a polling wakeup be satisfied by keeping the waking
+ thread awake?
+ (only valid for epoll1 right now)
+# stats system
+- counter: histogram_slow_lookups
+ doc: Number of times histogram increments went through the slow
+ (binary search) path
+# tcp
+- counter: syscall_write
+ doc: Number of write syscalls (or equivalent - eg sendmsg) made by this process
+- counter: syscall_read
+ doc: Number of read syscalls (or equivalent - eg recvmsg) made by this process
+- histogram: tcp_write_size
+ max: 16777216 # 16 meg max write tracked
+ buckets: 64
+ doc: Number of bytes offered to each syscall_write
+- histogram: tcp_write_iov_size
+ max: 1024
+ buckets: 64
+ doc: Number of byte segments offered to each syscall_write
+- histogram: tcp_read_size
+ max: 16777216
+ buckets: 64
+ doc: Number of bytes received by each syscall_read
+- histogram: tcp_read_offer
+ max: 16777216
+ buckets: 64
+ doc: Number of bytes offered to each syscall_read
+- histogram: tcp_read_offer_iov_size
+ max: 1024
+ buckets: 64
+ doc: Number of byte segments offered to each syscall_read
+- counter: tcp_backup_pollers_created
+ doc: Number of times a backup poller has been created (this can be expensive)
+- counter: tcp_backup_poller_polls
+ doc: Number of polls performed on the backup poller
+# chttp2
+- counter: http2_op_batches
+ doc: Number of batches received by HTTP2 transport
+- counter: http2_op_cancel
+ doc: Number of cancelations received by HTTP2 transport
+- counter: http2_op_send_initial_metadata
+ doc: Number of batches containing send initial metadata
+- counter: http2_op_send_message
+ doc: Number of batches containing send message
+- counter: http2_op_send_trailing_metadata
+ doc: Number of batches containing send trailing metadata
+- counter: http2_op_recv_initial_metadata
+ doc: Number of batches containing receive initial metadata
+- counter: http2_op_recv_message
+ doc: Number of batches containing receive message
+- counter: http2_op_recv_trailing_metadata
+ doc: Number of batches containing receive trailing metadata
+- histogram: http2_send_message_size
+ max: 16777216
+ buckets: 64
+ doc: Size of messages received by HTTP2 transport
+- histogram: http2_send_initial_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams initiated written per TCP write
+- histogram: http2_send_message_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams whose payload was written per TCP write
+- histogram: http2_send_trailing_metadata_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of streams terminated per TCP write
+- histogram: http2_send_flowctl_per_write
+ max: 1024
+ buckets: 64
+ doc: Number of flow control updates written per TCP write
+- counter: http2_settings_writes
+ doc: Number of settings frames sent
+- counter: http2_pings_sent
+ doc: Number of HTTP2 pings sent by process
+- counter: http2_writes_begun
+ doc: Number of HTTP2 writes initiated
+- counter: http2_writes_offloaded
+ doc: Number of HTTP2 writes offloaded to the executor from application threads
+- counter: http2_writes_continued
+ doc: Number of HTTP2 writes that finished seeing more data needed to be
+ written
+- counter: http2_partial_writes
+ doc: Number of HTTP2 writes that were made knowing there was still more data
+ to be written (we cap maximum write size to syscall_write)
+- counter: http2_initiate_write_due_to_initial_write
+ doc: Number of HTTP2 writes initiated due to 'initial_write'
+- counter: http2_initiate_write_due_to_start_new_stream
+ doc: Number of HTTP2 writes initiated due to 'start_new_stream'
+- counter: http2_initiate_write_due_to_send_message
+ doc: Number of HTTP2 writes initiated due to 'send_message'
+- counter: http2_initiate_write_due_to_send_initial_metadata
+ doc: Number of HTTP2 writes initiated due to 'send_initial_metadata'
+- counter: http2_initiate_write_due_to_send_trailing_metadata
+ doc: Number of HTTP2 writes initiated due to 'send_trailing_metadata'
+- counter: http2_initiate_write_due_to_retry_send_ping
+ doc: Number of HTTP2 writes initiated due to 'retry_send_ping'
+- counter: http2_initiate_write_due_to_continue_pings
+ doc: Number of HTTP2 writes initiated due to 'continue_pings'
+- counter: http2_initiate_write_due_to_goaway_sent
+ doc: Number of HTTP2 writes initiated due to 'goaway_sent'
+- counter: http2_initiate_write_due_to_rst_stream
+ doc: Number of HTTP2 writes initiated due to 'rst_stream'
+- counter: http2_initiate_write_due_to_close_from_api
+ doc: Number of HTTP2 writes initiated due to 'close_from_api'
+- counter: http2_initiate_write_due_to_stream_flow_control
+ doc: Number of HTTP2 writes initiated due to 'stream_flow_control'
+- counter: http2_initiate_write_due_to_transport_flow_control
+ doc: Number of HTTP2 writes initiated due to 'transport_flow_control'
+- counter: http2_initiate_write_due_to_send_settings
+ doc: Number of HTTP2 writes initiated due to 'send_settings'
+- counter: http2_initiate_write_due_to_bdp_estimator_ping
+ doc: Number of HTTP2 writes initiated due to 'bdp_estimator_ping'
+- counter: http2_initiate_write_due_to_flow_control_unstalled_by_setting
+ doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_setting'
+- counter: http2_initiate_write_due_to_flow_control_unstalled_by_update
+ doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_update'
+- counter: http2_initiate_write_due_to_application_ping
+ doc: Number of HTTP2 writes initiated due to 'application_ping'
+- counter: http2_initiate_write_due_to_keepalive_ping
+ doc: Number of HTTP2 writes initiated due to 'keepalive_ping'
+- counter: http2_initiate_write_due_to_transport_flow_control_unstalled
+ doc: Number of HTTP2 writes initiated due to 'transport_flow_control_unstalled'
+- counter: http2_initiate_write_due_to_ping_response
+ doc: Number of HTTP2 writes initiated due to 'ping_response'
+- counter: http2_initiate_write_due_to_force_rst_stream
+ doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
+- counter: http2_spurious_writes_begun
+ doc: Number of HTTP2 writes initiated with nothing to write
+- counter: hpack_recv_indexed
+ doc: Number of HPACK indexed fields received
+- counter: hpack_recv_lithdr_incidx
+ doc: Number of HPACK literal headers received with incremental indexing
+- counter: hpack_recv_lithdr_incidx_v
+ doc: Number of HPACK literal headers received with incremental indexing and literal keys
+- counter: hpack_recv_lithdr_notidx
+ doc: Number of HPACK literal headers received with no indexing
+- counter: hpack_recv_lithdr_notidx_v
+ doc: Number of HPACK literal headers received with no indexing and literal keys
+- counter: hpack_recv_lithdr_nvridx
+ doc: Number of HPACK literal headers received with never-indexing
+- counter: hpack_recv_lithdr_nvridx_v
+ doc: Number of HPACK literal headers received with never-indexing and literal keys
+- counter: hpack_recv_uncompressed
+ doc: Number of uncompressed strings received in metadata
+- counter: hpack_recv_huffman
+ doc: Number of huffman encoded strings received in metadata
+- counter: hpack_recv_binary
+ doc: Number of binary strings received in metadata
+- counter: hpack_recv_binary_base64
+ doc: Number of binary strings received encoded in base64 in metadata
+- counter: hpack_send_indexed
+ doc: Number of HPACK indexed fields sent
+- counter: hpack_send_lithdr_incidx
+ doc: Number of HPACK literal headers sent with incremental indexing
+- counter: hpack_send_lithdr_incidx_v
+ doc: Number of HPACK literal headers sent with incremental indexing and literal keys
+- counter: hpack_send_lithdr_notidx
+ doc: Number of HPACK literal headers sent with no indexing
+- counter: hpack_send_lithdr_notidx_v
+ doc: Number of HPACK literal headers sent with no indexing and literal keys
+- counter: hpack_send_lithdr_nvridx
+ doc: Number of HPACK literal headers sent with never-indexing
+- counter: hpack_send_lithdr_nvridx_v
+ doc: Number of HPACK literal headers sent with never-indexing and literal keys
+- counter: hpack_send_uncompressed
+ doc: Number of uncompressed strings sent in metadata
+- counter: hpack_send_huffman
+ doc: Number of huffman encoded strings sent in metadata
+- counter: hpack_send_binary
+ doc: Number of binary strings received in metadata
+- counter: hpack_send_binary_base64
+ doc: Number of binary strings received encoded in base64 in metadata
+# combiner locks
+- counter: combiner_locks_initiated
+ doc: Number of combiner lock entries by process
+ (first items queued to a combiner)
+- counter: combiner_locks_scheduled_items
+ doc: Number of items scheduled against combiner locks
+- counter: combiner_locks_scheduled_final_items
+ doc: Number of final items scheduled against combiner locks
+- counter: combiner_locks_offloaded
+ doc: Number of combiner locks offloaded to different threads
+# call combiner locks
+- counter: call_combiner_locks_initiated
+ doc: Number of call combiner lock entries by process
+ (first items queued to a call combiner)
+- counter: call_combiner_locks_scheduled_items
+ doc: Number of items scheduled against call combiner locks
+- counter: call_combiner_set_notify_on_cancel
+ doc: Number of times a cancellation callback was set on a call combiner
+- counter: call_combiner_cancelled
+ doc: Number of times a call combiner was cancelled
+# executor
+- counter: executor_scheduled_short_items
+ doc: Number of finite runtime closures scheduled against the executor
+ (gRPC thread pool)
+- counter: executor_scheduled_long_items
+ doc: Number of potentially infinite runtime closures scheduled against the
+ executor (gRPC thread pool)
+- counter: executor_scheduled_to_self
+ doc: Number of closures scheduled by the executor to the executor
+- counter: executor_wakeup_initiated
+ doc: Number of thread wakeups initiated within the executor
+- counter: executor_queue_drained
+ doc: Number of times an executor queue was drained
+- counter: executor_push_retries
+ doc: Number of times we raced and were forced to retry pushing a closure to
+ the executor
+# server
+- counter: server_requested_calls
+ doc: How many calls were requested (not necessarily received) by the server
+- histogram: server_cqs_checked
+ buckets: 8
+ max: 64
+ doc: How many completion queues were checked looking for a CQ that had
+ requested the incoming call
+- counter: server_slowpath_requests_queued
+ doc: How many times was the server slow path taken (indicates too few
+ outstanding requests)
+# cq
+- counter: cq_ev_queue_trylock_failures
+ doc: Number of lock (trylock) acquisition failures on completion queue event
+ queue. High value here indicates high contention on completion queues
+- counter: cq_ev_queue_trylock_successes
+ doc: Number of lock (trylock) acquisition successes on completion queue event
+ queue.
+- counter: cq_ev_queue_transient_pop_failures
+ doc: Number of times NULL was popped out of completion queue's event queue
+ even though the event queue was not empty
diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql
new file mode 100644
index 0000000000..04b6d471f6
--- /dev/null
+++ b/src/core/lib/debug/stats_data_bq_schema.sql
@@ -0,0 +1,96 @@
+client_calls_created_per_iteration:FLOAT,
+server_calls_created_per_iteration:FLOAT,
+cqs_created_per_iteration:FLOAT,
+client_channels_created_per_iteration:FLOAT,
+client_subchannels_created_per_iteration:FLOAT,
+server_channels_created_per_iteration:FLOAT,
+syscall_poll_per_iteration:FLOAT,
+syscall_wait_per_iteration:FLOAT,
+pollset_kick_per_iteration:FLOAT,
+pollset_kicked_without_poller_per_iteration:FLOAT,
+pollset_kicked_again_per_iteration:FLOAT,
+pollset_kick_wakeup_fd_per_iteration:FLOAT,
+pollset_kick_wakeup_cv_per_iteration:FLOAT,
+pollset_kick_own_thread_per_iteration:FLOAT,
+histogram_slow_lookups_per_iteration:FLOAT,
+syscall_write_per_iteration:FLOAT,
+syscall_read_per_iteration:FLOAT,
+tcp_backup_pollers_created_per_iteration:FLOAT,
+tcp_backup_poller_polls_per_iteration:FLOAT,
+http2_op_batches_per_iteration:FLOAT,
+http2_op_cancel_per_iteration:FLOAT,
+http2_op_send_initial_metadata_per_iteration:FLOAT,
+http2_op_send_message_per_iteration:FLOAT,
+http2_op_send_trailing_metadata_per_iteration:FLOAT,
+http2_op_recv_initial_metadata_per_iteration:FLOAT,
+http2_op_recv_message_per_iteration:FLOAT,
+http2_op_recv_trailing_metadata_per_iteration:FLOAT,
+http2_settings_writes_per_iteration:FLOAT,
+http2_pings_sent_per_iteration:FLOAT,
+http2_writes_begun_per_iteration:FLOAT,
+http2_writes_offloaded_per_iteration:FLOAT,
+http2_writes_continued_per_iteration:FLOAT,
+http2_partial_writes_per_iteration:FLOAT,
+http2_initiate_write_due_to_initial_write_per_iteration:FLOAT,
+http2_initiate_write_due_to_start_new_stream_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_message_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_initial_metadata_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_trailing_metadata_per_iteration:FLOAT,
+http2_initiate_write_due_to_retry_send_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_continue_pings_per_iteration:FLOAT,
+http2_initiate_write_due_to_goaway_sent_per_iteration:FLOAT,
+http2_initiate_write_due_to_rst_stream_per_iteration:FLOAT,
+http2_initiate_write_due_to_close_from_api_per_iteration:FLOAT,
+http2_initiate_write_due_to_stream_flow_control_per_iteration:FLOAT,
+http2_initiate_write_due_to_transport_flow_control_per_iteration:FLOAT,
+http2_initiate_write_due_to_send_settings_per_iteration:FLOAT,
+http2_initiate_write_due_to_bdp_estimator_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_flow_control_unstalled_by_setting_per_iteration:FLOAT,
+http2_initiate_write_due_to_flow_control_unstalled_by_update_per_iteration:FLOAT,
+http2_initiate_write_due_to_application_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
+http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
+http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
+http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
+http2_spurious_writes_begun_per_iteration:FLOAT,
+hpack_recv_indexed_per_iteration:FLOAT,
+hpack_recv_lithdr_incidx_per_iteration:FLOAT,
+hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,
+hpack_recv_lithdr_notidx_per_iteration:FLOAT,
+hpack_recv_lithdr_notidx_v_per_iteration:FLOAT,
+hpack_recv_lithdr_nvridx_per_iteration:FLOAT,
+hpack_recv_lithdr_nvridx_v_per_iteration:FLOAT,
+hpack_recv_uncompressed_per_iteration:FLOAT,
+hpack_recv_huffman_per_iteration:FLOAT,
+hpack_recv_binary_per_iteration:FLOAT,
+hpack_recv_binary_base64_per_iteration:FLOAT,
+hpack_send_indexed_per_iteration:FLOAT,
+hpack_send_lithdr_incidx_per_iteration:FLOAT,
+hpack_send_lithdr_incidx_v_per_iteration:FLOAT,
+hpack_send_lithdr_notidx_per_iteration:FLOAT,
+hpack_send_lithdr_notidx_v_per_iteration:FLOAT,
+hpack_send_lithdr_nvridx_per_iteration:FLOAT,
+hpack_send_lithdr_nvridx_v_per_iteration:FLOAT,
+hpack_send_uncompressed_per_iteration:FLOAT,
+hpack_send_huffman_per_iteration:FLOAT,
+hpack_send_binary_per_iteration:FLOAT,
+hpack_send_binary_base64_per_iteration:FLOAT,
+combiner_locks_initiated_per_iteration:FLOAT,
+combiner_locks_scheduled_items_per_iteration:FLOAT,
+combiner_locks_scheduled_final_items_per_iteration:FLOAT,
+combiner_locks_offloaded_per_iteration:FLOAT,
+call_combiner_locks_initiated_per_iteration:FLOAT,
+call_combiner_locks_scheduled_items_per_iteration:FLOAT,
+call_combiner_set_notify_on_cancel_per_iteration:FLOAT,
+call_combiner_cancelled_per_iteration:FLOAT,
+executor_scheduled_short_items_per_iteration:FLOAT,
+executor_scheduled_long_items_per_iteration:FLOAT,
+executor_scheduled_to_self_per_iteration:FLOAT,
+executor_wakeup_initiated_per_iteration:FLOAT,
+executor_queue_drained_per_iteration:FLOAT,
+executor_push_retries_per_iteration:FLOAT,
+server_requested_calls_per_iteration:FLOAT,
+server_slowpath_requests_queued_per_iteration:FLOAT,
+cq_ev_queue_trylock_failures_per_iteration:FLOAT,
+cq_ev_queue_trylock_successes_per_iteration:FLOAT,
+cq_ev_queue_transient_pop_failures_per_iteration:FLOAT
diff --git a/src/core/lib/debug/trace.c b/src/core/lib/debug/trace.cc
index c6c1853e20..21b0d8c3a6 100644
--- a/src/core/lib/debug/trace.c
+++ b/src/core/lib/debug/trace.cc
@@ -20,6 +20,7 @@
#include <string.h>
+#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/support/env.h"
@@ -39,7 +40,7 @@ static tracer *tracers;
#endif
void grpc_register_tracer(grpc_tracer_flag *flag) {
- tracer *t = gpr_malloc(sizeof(*t));
+ tracer *t = (tracer *)gpr_malloc(sizeof(*t));
t->flag = flag;
t->next = tracers;
TRACER_SET(*flag, false);
@@ -53,10 +54,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = gpr_malloc(len + 1);
+ s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index dd9e6a30fe..558ba942bb 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -23,6 +23,10 @@
#include <grpc/support/port_platform.h>
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#define GRPC_THREADSAFE_TRACER
@@ -35,7 +39,7 @@ typedef struct {
#else
bool value;
#endif
- char *name;
+ const char *name;
} grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER
@@ -52,4 +56,8 @@ void grpc_register_tracer(grpc_tracer_flag *flag);
void grpc_tracer_init(const char *env_var_name);
void grpc_tracer_shutdown(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_DEBUG_TRACE_H */
diff --git a/src/core/lib/http/format_request.c b/src/core/lib/http/format_request.cc
index f887726eea..88fb0ab0b6 100644
--- a/src/core/lib/http/format_request.c
+++ b/src/core/lib/http/format_request.cc
@@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
- tmp = gpr_realloc(tmp, out_len + body_size);
+ tmp = (char *)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}
diff --git a/src/core/lib/http/format_request.h b/src/core/lib/http/format_request.h
index 12b42e42fa..2e77e8661a 100644
--- a/src/core/lib/http/format_request.h
+++ b/src/core/lib/http/format_request.h
@@ -22,6 +22,10 @@
#include <grpc/slice.h>
#include "src/core/lib/http/httpcli.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request);
grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
const char *body_bytes,
@@ -29,4 +33,8 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
grpc_slice grpc_httpcli_format_connect_request(
const grpc_httpcli_request *request);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H */
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.cc
index 77af7b7c08..c96800b85c 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.cc
@@ -44,7 +44,7 @@ typedef struct {
grpc_endpoint *ep;
char *host;
char *ssl_host_override;
- gpr_timespec deadline;
+ grpc_millis deadline;
int have_read_byte;
const grpc_httpcli_handshaker *handshaker;
grpc_closure *on_done;
@@ -65,7 +65,7 @@ static grpc_httpcli_post_override g_post_override = NULL;
static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint, const char *host,
- gpr_timespec deadline,
+ grpc_millis deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_endpoint *endpoint)) {
@@ -130,7 +130,7 @@ static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
- internal_request *req = user_data;
+ internal_request *req = (internal_request *)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
@@ -159,7 +159,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@@ -175,7 +175,7 @@ static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -189,7 +189,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@@ -217,7 +217,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
- GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
+ (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
@@ -226,7 +226,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = arg;
+ internal_request *req = (internal_request *)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@@ -240,10 +240,11 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
- internal_request *req = gpr_malloc(sizeof(internal_request));
+ internal_request *req =
+ (internal_request *)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);
@@ -277,9 +278,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_done,
- grpc_httpcli_response *response) {
+ const grpc_httpcli_request *request, grpc_millis deadline,
+ grpc_closure *on_done, grpc_httpcli_response *response) {
char *name;
if (g_get_override &&
g_get_override(exec_ctx, request, deadline, on_done, response)) {
@@ -297,7 +297,7 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
- gpr_timespec deadline, grpc_closure *on_done,
+ grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response) {
char *name;
if (g_post_override &&
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 809618695e..76b790fa8a 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -32,6 +32,10 @@
/* User agent this library reports */
#define GRPC_HTTPCLI_USER_AGENT "grpc-httpcli/0.0"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Tracks in-progress http requests
TODO(ctiller): allow caching and capturing multiple requests for the
same content and combining them */
@@ -42,7 +46,7 @@ typedef struct grpc_httpcli_context {
typedef struct {
const char *default_port;
void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint,
- const char *host, gpr_timespec deadline,
+ const char *host, grpc_millis deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint));
} grpc_httpcli_handshaker;
@@ -83,8 +87,8 @@ void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_polling_entity *pollent,
grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- gpr_timespec deadline, grpc_closure *on_complete,
+ const grpc_httpcli_request *request, grpc_millis deadline,
+ grpc_closure *on_complete,
grpc_httpcli_response *response);
/* Asynchronously perform a HTTP POST.
@@ -106,21 +110,25 @@ void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
grpc_resource_quota *resource_quota,
const grpc_httpcli_request *request,
const char *body_bytes, size_t body_size,
- gpr_timespec deadline, grpc_closure *on_complete,
+ grpc_millis deadline, grpc_closure *on_complete,
grpc_httpcli_response *response);
/* override functions return 1 if they handled the request, 0 otherwise */
typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx,
const grpc_httpcli_request *request,
- gpr_timespec deadline,
+ grpc_millis deadline,
grpc_closure *on_complete,
grpc_httpcli_response *response);
typedef int (*grpc_httpcli_post_override)(
grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size, gpr_timespec deadline,
+ const char *body_bytes, size_t body_size, grpc_millis deadline,
grpc_closure *on_complete, grpc_httpcli_response *response);
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
grpc_httpcli_post_override post);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_HTTP_HTTPCLI_H */
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.cc
index 97c2886525..d832dacb69 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.cc
@@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
+ c->handshaker_factory = NULL;
}
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc);
@@ -90,8 +91,17 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
tsi_peer_destruct(&peer);
}
+static int httpcli_ssl_cmp(grpc_security_connector *sc1,
+ grpc_security_connector *sc2) {
+ grpc_httpcli_ssl_channel_security_connector *c1 =
+ (grpc_httpcli_ssl_channel_security_connector *)sc1;
+ grpc_httpcli_ssl_channel_security_connector *c2 =
+ (grpc_httpcli_ssl_channel_security_connector *)sc2;
+ return strcmp(c1->secure_peer_name, c2->secure_peer_name);
+}
+
static grpc_security_connector_vtable httpcli_ssl_vtable = {
- httpcli_ssl_destroy, httpcli_ssl_check_peer};
+ httpcli_ssl_destroy, httpcli_ssl_check_peer, httpcli_ssl_cmp};
static grpc_security_status httpcli_ssl_channel_security_connector_create(
grpc_exec_ctx *exec_ctx, const char *pem_root_certs,
@@ -105,7 +115,8 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
return GRPC_SECURITY_ERROR;
}
- c = gpr_zalloc(sizeof(grpc_httpcli_ssl_channel_security_connector));
+ c = (grpc_httpcli_ssl_channel_security_connector *)gpr_zalloc(
+ sizeof(grpc_httpcli_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.vtable = &httpcli_ssl_vtable;
@@ -121,6 +132,10 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
*sc = NULL;
return GRPC_SECURITY_ERROR;
}
+ // We don't actually need a channel credentials object in this case,
+ // but we set it to a non-NULL address so that we don't trigger
+ // assertions in grpc_channel_security_connector_cmp().
+ c->base.channel_creds = (grpc_channel_credentials *)1;
c->base.add_handshakers = httpcli_ssl_add_handshakers;
*sc = &c->base;
return GRPC_SECURITY_OK;
@@ -136,8 +151,8 @@ typedef struct {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_handshaker_args *args = arg;
- on_done_closure *c = args->user_data;
+ grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
+ on_done_closure *c = (on_done_closure *)args->user_data;
if (error != GRPC_ERROR_NONE) {
const char *msg = grpc_error_string(error);
gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg);
@@ -155,10 +170,10 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *tcp, const char *host,
- gpr_timespec deadline,
+ grpc_millis deadline,
void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *endpoint)) {
- on_done_closure *c = gpr_malloc(sizeof(*c));
+ on_done_closure *c = (on_done_closure *)gpr_malloc(sizeof(*c));
const char *pem_root_certs = grpc_get_default_ssl_roots();
if (pem_root_certs == NULL) {
gpr_log(GPR_ERROR, "Could not get default pem root certs.");
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.cc
index 9c5e93f4e5..0950bd655e 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.cc
@@ -28,7 +28,7 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
static char *buf2str(void *buffer, size_t length) {
- char *out = gpr_malloc(length + 1);
+ char *out = (char *)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
@@ -197,7 +197,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
- *hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
+ *hdrs = (grpc_http_header *)gpr_realloc(
+ *hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@@ -255,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
- *body = gpr_realloc((void *)*body, parser->body_capacity);
+ *body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;
diff --git a/src/core/lib/http/parser.h b/src/core/lib/http/parser.h
index c8dced3901..d2bda6ae0e 100644
--- a/src/core/lib/http/parser.h
+++ b/src/core/lib/http/parser.h
@@ -27,6 +27,10 @@
/* Maximum length of a header string of the form 'Key: Value\r\n' */
#define GRPC_HTTP_PARSER_MAX_HEADER_LENGTH 4096
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A single header to be passed in a request */
typedef struct grpc_http_header {
char *key;
@@ -109,4 +113,8 @@ void grpc_http_response_destroy(grpc_http_response *response);
extern grpc_tracer_flag grpc_http1_trace;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_HTTP_PARSER_H */
diff --git a/src/core/lib/support/block_annotate.h b/src/core/lib/iomgr/block_annotate.h
index 8e3ef7df65..fcbfe9eb1a 100644
--- a/src/core/lib/support/block_annotate.h
+++ b/src/core/lib/iomgr/block_annotate.h
@@ -16,8 +16,8 @@
*
*/
-#ifndef GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
-#define GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H
+#ifndef GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H
+#define GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H
#ifdef __cplusplus
extern "C" {
@@ -39,17 +39,26 @@ void gpr_thd_end_blocking_region();
do { \
gpr_thd_start_blocking_region(); \
} while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION \
- do { \
- gpr_thd_end_blocking_region(); \
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
+ do { \
+ gpr_thd_end_blocking_region(); \
+ } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
+ do { \
+ gpr_thd_end_blocking_region(); \
+ grpc_exec_ctx_invalidate_now((ec)); \
} while (0)
#else
#define GRPC_SCHEDULING_START_BLOCKING_REGION \
do { \
} while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION \
- do { \
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
+ do { \
+ } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
+ do { \
+ grpc_exec_ctx_invalidate_now((ec)); \
} while (0)
#endif
-#endif /* GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H */
+#endif /* GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H */
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
new file mode 100644
index 0000000000..d45719608b
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -0,0 +1,215 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/call_combiner.h"
+
+#include <inttypes.h>
+
+#include <grpc/support/log.h>
+#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/profiling/timers.h"
+
+grpc_tracer_flag grpc_call_combiner_trace =
+ GRPC_TRACER_INITIALIZER(false, "call_combiner");
+
+static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) {
+ if (cancel_state & 1) {
+ return (grpc_error*)(cancel_state & ~(gpr_atm)1);
+ }
+ return GRPC_ERROR_NONE;
+}
+
+static gpr_atm encode_cancel_state_error(grpc_error* error) {
+ return (gpr_atm)1 | (gpr_atm)error;
+}
+
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_init(&call_combiner->queue);
+}
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) {
+ gpr_mpscq_destroy(&call_combiner->queue);
+ GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state));
+}
+
+#ifndef NDEBUG
+#define DEBUG_ARGS , const char *file, int line
+#define DEBUG_FMT_STR "%s:%d: "
+#define DEBUG_FMT_ARGS , file, line
+#else
+#define DEBUG_ARGS
+#define DEBUG_FMT_STR
+#define DEBUG_FMT_ARGS
+#endif
+
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure,
+ grpc_error* error DEBUG_ARGS,
+ const char* reason) {
+ GPR_TIMER_BEGIN("call_combiner_start", 0);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
+ "%s] error=%s",
+ call_combiner, closure DEBUG_FMT_ARGS, reason,
+ grpc_error_string(error));
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size + 1);
+ }
+ GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
+ if (prev_size == 0) {
+ GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx);
+ GPR_TIMER_MARK("call_combiner_initiate", 0);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY");
+ }
+ // Queue was empty, so execute this closure immediately.
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
+ } else {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_INFO, " QUEUING");
+ }
+ // Queue was not empty, so add closure to queue.
+ closure->error_data.error = error;
+ gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure);
+ }
+ GPR_TIMER_END("call_combiner_start", 0);
+}
+
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner DEBUG_ARGS,
+ const char* reason) {
+ GPR_TIMER_BEGIN("call_combiner_stop", 0);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
+ call_combiner DEBUG_FMT_ARGS, reason);
+ }
+ size_t prev_size =
+ (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1);
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
+ prev_size - 1);
+ }
+ GPR_ASSERT(prev_size >= 1);
+ if (prev_size > 1) {
+ while (true) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " checking queue");
+ }
+ bool empty;
+ grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end(
+ &call_combiner->queue, &empty);
+ if (closure == NULL) {
+ // This can happen either due to a race condition within the mpscq
+ // code or because of a race with grpc_call_combiner_start().
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue returned no result; checking again");
+ }
+ continue;
+ }
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s",
+ closure, grpc_error_string(closure->error_data.error));
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
+ break;
+ }
+ } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, " queue empty");
+ }
+ GPR_TIMER_END("call_combiner_stop", 0);
+}
+
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure) {
+ GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx);
+ while (true) {
+ // Decode original state.
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ // If error is set, invoke the cancellation closure immediately.
+ // Otherwise, store the new closure.
+ if (original_error != GRPC_ERROR_NONE) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p "
+ "for pre-existing cancellation",
+ call_combiner, closure);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
+ break;
+ } else {
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ (gpr_atm)closure)) {
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p",
+ call_combiner, closure);
+ }
+ // If we replaced an earlier closure, invoke the original
+ // closure with GRPC_ERROR_NONE. This allows callers to clean
+ // up any resources they may be holding for the callback.
+ if (original_state != 0) {
+ closure = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling old cancel callback=%p",
+ call_combiner, closure);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+ }
+ break;
+ }
+ }
+ // cas failed, try again.
+ }
+}
+
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error) {
+ GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx);
+ while (true) {
+ gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
+ grpc_error* original_error = decode_cancel_state_error(original_state);
+ if (original_error != GRPC_ERROR_NONE) {
+ GRPC_ERROR_UNREF(error);
+ break;
+ }
+ if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
+ encode_cancel_state_error(error))) {
+ if (original_state != 0) {
+ grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
+ if (GRPC_TRACER_ON(grpc_call_combiner_trace)) {
+ gpr_log(GPR_DEBUG,
+ "call_combiner=%p: scheduling notify_on_cancel callback=%p",
+ call_combiner, notify_on_cancel);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
+ }
+ break;
+ }
+ // cas failed, try again.
+ }
+}
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
new file mode 100644
index 0000000000..527f84fce0
--- /dev/null
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+#define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H
+
+#include <stddef.h>
+
+#include <grpc/support/atm.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/support/mpscq.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// A simple, lock-free mechanism for serializing activity related to a
+// single call. This is similar to a combiner but is more lightweight.
+//
+// It requires the callback (or, in the common case where the callback
+// actually kicks off a chain of callbacks, the last callback in that
+// chain) to explicitly indicate (by calling GRPC_CALL_COMBINER_STOP())
+// when it is done with the action that was kicked off by the original
+// callback.
+
+extern grpc_tracer_flag grpc_call_combiner_trace;
+
+typedef struct {
+ gpr_atm size; // size_t, num closures in queue or currently executing
+ gpr_mpscq queue;
+ // Either 0 (if not cancelled and no cancellation closure set),
+ // a grpc_closure* (if the lowest bit is 0),
+ // or a grpc_error* (if the lowest bit is 1).
+ gpr_atm cancel_state;
+} grpc_call_combiner;
+
+// Assumes memory was initialized to zero.
+void grpc_call_combiner_init(grpc_call_combiner* call_combiner);
+
+void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
+
+#ifndef NDEBUG
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ __FILE__, __LINE__, (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
+ (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* file, int line, const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* file, int line, const char* reason);
+#else
+#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \
+ reason) \
+ grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
+ (reason))
+#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
+ grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
+/// Starts processing \a closure on \a call_combiner.
+void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure, grpc_error* error,
+ const char* reason);
+/// Yields the call combiner to the next closure in the queue, if any.
+void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ const char* reason);
+#endif
+
+/// Registers \a closure to be invoked by \a call_combiner when
+/// grpc_call_combiner_cancel() is called.
+///
+/// Once a closure is registered, it will always be scheduled exactly
+/// once; this allows the closure to hold references that will be freed
+/// regardless of whether or not the call was cancelled. If a cancellation
+/// does occur, the closure will be scheduled with the cancellation error;
+/// otherwise, it will be scheduled with GRPC_ERROR_NONE.
+///
+/// The closure will be scheduled in the following cases:
+/// - If grpc_call_combiner_cancel() was called prior to registering the
+/// closure, it will be scheduled immediately with the cancelation error.
+/// - If grpc_call_combiner_cancel() is called after registering the
+/// closure, the closure will be scheduled with the cancellation error.
+/// - If grpc_call_combiner_set_notify_on_cancel() is called again to
+/// register a new cancellation closure, the previous cancellation
+/// closure will be scheduled with GRPC_ERROR_NONE.
+///
+/// If \a closure is NULL, then no closure will be invoked on
+/// cancellation; this effectively unregisters the previously set closure.
+/// However, most filters will not need to explicitly unregister their
+/// callbacks, as this is done automatically when the call is destroyed.
+void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_closure* closure);
+
+/// Indicates that the call has been cancelled.
+void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
+ grpc_call_combiner* call_combiner,
+ grpc_error* error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.cc
index 26f9cbe0fa..00edefc6ae 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.cc
@@ -109,7 +109,7 @@ typedef struct {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- wrapped_closure *wc = arg;
+ wrapped_closure *wc = (wrapped_closure *)arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
@@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#endif
- wrapped_closure *wc = gpr_malloc(sizeof(*wc));
+ wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG
@@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
@@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.cc
index 9b66987b68..53f4b7eaa7 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.cc
@@ -19,11 +19,13 @@
#include "src/core/lib/iomgr/combiner.h"
#include <assert.h>
+#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
@@ -73,14 +75,15 @@ static const grpc_closure_scheduler_vtable finally_scheduler = {
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_combiner *grpc_combiner_create(void) {
- grpc_combiner *lock = gpr_zalloc(sizeof(*lock));
+ grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
- GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
+ GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@@ -153,6 +156,7 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
grpc_error *error) {
+ GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
GPR_TIMER_BEGIN("combiner.execute", 0);
grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
@@ -160,6 +164,8 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
if (last == 1) {
+ GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx);
+ GPR_TIMER_MARK("combiner.initiated", 0);
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
(gpr_atm)exec_ctx);
// first element on this list: add it to the list of combiner locks
@@ -190,11 +196,12 @@ static void move_next(grpc_exec_ctx *exec_ctx) {
}
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_combiner *lock = arg;
+ grpc_combiner *lock = (grpc_combiner *)arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+ GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx);
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
@@ -325,6 +332,7 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_error *error) {
+ GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx);
grpc_combiner *lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
@@ -350,7 +358,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
grpc_error *error) {
- combiner_finally_exec(exec_ctx, closure, GRPC_ERROR_REF(error));
+ combiner_finally_exec(exec_ctx, (grpc_closure *)closure,
+ GRPC_ERROR_REF(error));
}
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 8e0434369d..10e5fb480d 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -26,6 +26,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/mpscq.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Provides serialized access to some resource.
// Each action queued on a combiner is executed serially in a borrowed thread.
// The actual thread executing actions may change over time (but there will only
@@ -63,4 +67,8 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
extern grpc_tracer_flag grpc_combiner_trace;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_COMBINER_H */
diff --git a/src/core/lib/iomgr/endpoint.c b/src/core/lib/iomgr/endpoint.cc
index 37cce335ca..5eab1d3158 100644
--- a/src/core/lib/iomgr/endpoint.c
+++ b/src/core/lib/iomgr/endpoint.cc
@@ -39,6 +39,12 @@ void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set);
}
+void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ ep->vtable->delete_from_pollset_set(exec_ctx, ep, pollset_set);
+}
+
void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
grpc_error* why) {
ep->vtable->shutdown(exec_ctx, ep, why);
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 8f0523a981..92964e0f2d 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -26,6 +26,10 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resource_quota.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
@@ -41,6 +45,8 @@ struct grpc_endpoint_vtable {
grpc_pollset *pollset);
void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset_set *pollset);
+ void (*delete_from_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
+ grpc_pollset_set *pollset);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
@@ -81,18 +87,27 @@ void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *why);
void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
-/* Add an endpoint to a pollset, so that when the pollset is polled, events from
- this endpoint are considered */
+/* Add an endpoint to a pollset or pollset_set, so that when the pollset is
+ polled, events from this endpoint are considered */
void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset *pollset);
void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint *ep,
grpc_pollset_set *pollset_set);
+/* Delete an endpoint from a pollset_set */
+void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *ep,
+ grpc_pollset_set *pollset_set);
+
grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
struct grpc_endpoint {
const grpc_endpoint_vtable *vtable;
};
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_H */
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index b60e62fc3e..ee91795749 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -21,6 +21,10 @@
#include "src/core/lib/iomgr/endpoint.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
grpc_endpoint *client;
grpc_endpoint *server;
@@ -29,4 +33,8 @@ typedef struct {
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
grpc_channel_args *args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.c b/src/core/lib/iomgr/endpoint_pair_posix.cc
index 3ade2148ba..3ade2148ba 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.c
+++ b/src/core/lib/iomgr/endpoint_pair_posix.cc
diff --git a/src/core/lib/iomgr/endpoint_pair_uv.c b/src/core/lib/iomgr/endpoint_pair_uv.cc
index ff72fe0492..ff72fe0492 100644
--- a/src/core/lib/iomgr/endpoint_pair_uv.c
+++ b/src/core/lib/iomgr/endpoint_pair_uv.cc
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.c b/src/core/lib/iomgr/endpoint_pair_windows.cc
index 782fa2fd69..782fa2fd69 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.c
+++ b/src/core/lib/iomgr/endpoint_pair_windows.cc
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.cc
index 3759dda992..2ea6cf1301 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.cc
@@ -15,9 +15,11 @@
* limitations under the License.
*
*/
+#include <grpc/support/port_platform.h>
#include "src/core/lib/iomgr/error.h"
+#include <inttypes.h>
#include <string.h>
#include <grpc/status.h>
@@ -211,7 +213,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
#ifndef NDEBUG
grpc_error *orig = *err;
#endif
- *err = gpr_realloc(
+ *err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@@ -278,13 +280,13 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
memcpy((*err)->arena + slot, &value, sizeof(value));
}
-static void internal_add_error(grpc_error **err, grpc_error *new) {
- grpc_linked_error new_last = {new, UINT8_MAX};
+static void internal_add_error(grpc_error **err, grpc_error *new_err) {
+ grpc_linked_error new_last = {new_err, UINT8_MAX};
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
if (slot == UINT8_MAX) {
- gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err, new,
- grpc_error_string(new));
- GRPC_ERROR_UNREF(new);
+ gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err,
+ new_err, grpc_error_string(new_err));
+ GRPC_ERROR_UNREF(new_err);
return;
}
if ((*err)->first_err == UINT8_MAX) {
@@ -321,8 +323,8 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
uint8_t initial_arena_capacity = (uint8_t)(
DEFAULT_ERROR_CAPACITY +
(uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
- grpc_error *err =
- gpr_malloc(sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
+ grpc_error *err = (grpc_error *)gpr_malloc(
+ sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
}
@@ -406,7 +408,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
- out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
+ out = (grpc_error *)gpr_malloc(sizeof(*in) +
+ new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@@ -431,10 +434,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
intptr_t value) {
GPR_TIMER_BEGIN("grpc_error_set_int", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_set_int(&new, which, value);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_set_int(&new_err, which, value);
GPR_TIMER_END("grpc_error_set_int", 0);
- return new;
+ return new_err;
}
typedef struct {
@@ -476,10 +479,10 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
grpc_slice str) {
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_set_str(&new, which, str);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_set_str(&new_err, which, str);
GPR_TIMER_END("grpc_error_set_str", 0);
- return new;
+ return new_err;
}
bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
@@ -506,10 +509,10 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
GPR_TIMER_BEGIN("grpc_error_add_child", 0);
- grpc_error *new = copy_error_and_unref(src);
- internal_add_error(&new, child);
+ grpc_error *new_err = copy_error_and_unref(src);
+ internal_add_error(&new_err, child);
GPR_TIMER_END("grpc_error_add_child", 0);
- return new;
+ return new_err;
}
static const char *no_error_string = "\"No Error\"";
@@ -530,7 +533,7 @@ typedef struct {
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
- *s = gpr_realloc(*s, *cap);
+ *s = (char *)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
@@ -582,7 +585,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
static void append_kv(kv_pairs *kvs, char *key, char *value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
- kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
+ kvs->kvs =
+ (kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@@ -639,7 +643,7 @@ static char *key_time(grpc_error_times which) {
static char *fmt_time(gpr_timespec tm) {
char *out;
- char *pfx = "!!";
+ const char *pfx = "!!";
switch (tm.clock_type) {
case GPR_CLOCK_MONOTONIC:
pfx = "@monotonic:";
@@ -695,8 +699,8 @@ static char *errs_string(grpc_error *err) {
}
static int cmp_kvs(const void *a, const void *b) {
- const kv_pair *ka = a;
- const kv_pair *kb = b;
+ const kv_pair *ka = (const kv_pair *)a;
+ const kv_pair *kb = (const kv_pair *)b;
return strcmp(ka->key, kb->key);
}
@@ -731,7 +735,7 @@ const char *grpc_error_string(grpc_error *err) {
void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
if (p != NULL) {
GPR_TIMER_END("grpc_error_string", 0);
- return p;
+ return (const char *)p;
}
kv_pairs kvs;
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index b362948691..b36330a7ab 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -19,8 +19,8 @@
#ifndef GRPC_CORE_LIB_IOMGR_ERROR_H
#define GRPC_CORE_LIB_IOMGR_ERROR_H
+#include <inttypes.h>
#include <stdbool.h>
-#include <stdint.h>
#include <grpc/slice.h>
#include <grpc/status.h>
diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h
index 7507484557..8746d5d353 100644
--- a/src/core/lib/iomgr/error_internal.h
+++ b/src/core/lib/iomgr/error_internal.h
@@ -24,6 +24,10 @@
#include <grpc/support/sync.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_linked_error grpc_linked_error;
struct grpc_linked_error {
@@ -57,4 +61,8 @@ struct grpc_error {
bool grpc_error_is_special(grpc_error *err);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 7f053fa728..6126e2771c 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.c
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -20,11 +20,11 @@
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
-
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <pthread.h>
#include <string.h>
@@ -40,12 +40,12 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
@@ -145,7 +145,7 @@ static const char *kick_state_string(kick_state st) {
}
struct grpc_pollset_worker {
- kick_state kick_state;
+ kick_state state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
grpc_pollset_worker *next;
@@ -154,24 +154,24 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work;
};
-#define SET_KICK_STATE(worker, state) \
+#define SET_KICK_STATE(worker, kick_state) \
do { \
- (worker)->kick_state = (state); \
+ (worker)->state = (kick_state); \
(worker)->kick_state_mutator = __LINE__; \
} while (false)
-#define MAX_NEIGHBOURHOODS 1024
+#define MAX_NEIGHBORHOODS 1024
-typedef struct pollset_neighbourhood {
+typedef struct pollset_neighborhood {
gpr_mu mu;
grpc_pollset *active_root;
char pad[GPR_CACHELINE_SIZE];
-} pollset_neighbourhood;
+} pollset_neighborhood;
struct grpc_pollset {
gpr_mu mu;
- pollset_neighbourhood *neighbourhood;
- bool reassigning_neighbourhood;
+ pollset_neighborhood *neighborhood;
+ bool reassigning_neighborhood;
grpc_pollset_worker *root_worker;
bool kicked_without_poller;
@@ -260,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@@ -280,8 +280,9 @@ static grpc_fd *fd_create(int fd, const char *name) {
#endif
gpr_free(fd_name);
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
- .data.ptr = new_fd};
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
+ ev.data.ptr = new_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
}
@@ -384,8 +385,8 @@ GPR_TLS_DECL(g_current_thread_worker);
/* The designated poller */
static gpr_atm g_active_poller;
-static pollset_neighbourhood *g_neighbourhoods;
-static size_t g_num_neighbourhoods;
+static pollset_neighborhood *g_neighborhoods;
+static size_t g_num_neighborhoods;
/* Return true if first in list */
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
@@ -424,8 +425,8 @@ static worker_remove_result worker_remove(grpc_pollset *pollset,
}
}
-static size_t choose_neighbourhood(void) {
- return (size_t)gpr_cpu_current_cpu() % g_num_neighbourhoods;
+static size_t choose_neighborhood(void) {
+ return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
}
static grpc_error *pollset_global_init(void) {
@@ -435,17 +436,18 @@ static grpc_error *pollset_global_init(void) {
global_wakeup_fd.read_fd = -1;
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err;
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = &global_wakeup_fd};
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = &global_wakeup_fd;
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
&ev) != 0) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
- g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
- g_neighbourhoods =
- gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
- for (size_t i = 0; i < g_num_neighbourhoods; i++) {
- gpr_mu_init(&g_neighbourhoods[i].mu);
+ g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
+ g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
+ sizeof(*g_neighborhoods) * g_num_neighborhoods);
+ for (size_t i = 0; i < g_num_neighborhoods; i++) {
+ gpr_mu_init(&g_neighborhoods[i].mu);
}
return GRPC_ERROR_NONE;
}
@@ -454,17 +456,17 @@ static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_pollset);
gpr_tls_destroy(&g_current_thread_worker);
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
- for (size_t i = 0; i < g_num_neighbourhoods; i++) {
- gpr_mu_destroy(&g_neighbourhoods[i].mu);
+ for (size_t i = 0; i < g_num_neighborhoods; i++) {
+ gpr_mu_destroy(&g_neighborhoods[i].mu);
}
- gpr_free(g_neighbourhoods);
+ gpr_free(g_neighborhoods);
}
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
- pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
- pollset->reassigning_neighbourhood = false;
+ pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
+ pollset->reassigning_neighborhood = false;
pollset->root_worker = NULL;
pollset->kicked_without_poller = false;
pollset->seen_inactive = true;
@@ -477,47 +479,52 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
+ pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- retry_lock_neighbourhood:
- gpr_mu_lock(&neighbourhood->mu);
+ retry_lock_neighborhood:
+ gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- if (pollset->neighbourhood != neighbourhood) {
- gpr_mu_unlock(&neighbourhood->mu);
- neighbourhood = pollset->neighbourhood;
+ if (pollset->neighborhood != neighborhood) {
+ gpr_mu_unlock(&neighborhood->mu);
+ neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- goto retry_lock_neighbourhood;
+ goto retry_lock_neighborhood;
}
pollset->prev->next = pollset->next;
pollset->next->prev = pollset->prev;
- if (pollset == pollset->neighbourhood->active_root) {
- pollset->neighbourhood->active_root =
+ if (pollset == pollset->neighborhood->active_root) {
+ pollset->neighborhood->active_root =
pollset->next == pollset ? NULL : pollset->next;
}
}
- gpr_mu_unlock(&pollset->neighbourhood->mu);
+ gpr_mu_unlock(&pollset->neighborhood->mu);
}
gpr_mu_unlock(&pollset->mu);
gpr_mu_destroy(&pollset->mu);
}
-static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
+static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
GPR_TIMER_BEGIN("pollset_kick_all", 0);
grpc_error *error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
- switch (worker->kick_state) {
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ switch (worker->state) {
case KICKED:
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
@@ -550,27 +557,22 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
pollset->shutting_down = true;
- GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+ GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
GPR_TIMER_END("pollset_shutdown", 0);
}
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, now) <= 0) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis millis) {
+ if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+ if (delta > INT_MAX) {
+ return INT_MAX;
+ } else if (delta < 0) {
return 0;
+ } else {
+ return (int)delta;
}
-
- static const gpr_timespec round_up = {
- .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
- return millis >= 1 ? millis : 1;
}
/* Process the epoll events found by do_epoll_wait() function.
@@ -627,11 +629,11 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
(i.e the designated poller thread) will be calling this function. So there is
no need for any synchronization when accesing fields in g_epoll_set */
static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("do_epoll_wait", 0);
int r;
- int timeout = poll_deadline_to_millis_timeout(deadline, now);
+ int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (timeout != 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
}
@@ -641,11 +643,13 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
timeout);
} while (r < 0 && errno == EINTR);
if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
}
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
+ GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
}
@@ -657,9 +661,10 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return GRPC_ERROR_NONE;
}
-static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl, gpr_timespec *now,
- gpr_timespec deadline) {
+static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ grpc_pollset_worker **worker_hdl,
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("begin_worker", 0);
if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false;
@@ -675,87 +680,96 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
// pollset has been observed to be inactive, we need to move back to the
// active list
bool is_reassigning = false;
- if (!pollset->reassigning_neighbourhood) {
+ if (!pollset->reassigning_neighborhood) {
is_reassigning = true;
- pollset->reassigning_neighbourhood = true;
- pollset->neighbourhood = &g_neighbourhoods[choose_neighbourhood()];
+ pollset->reassigning_neighborhood = true;
+ pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
}
- pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
+ pollset_neighborhood *neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
// pollset unlocked: state may change (even worker->kick_state)
- retry_lock_neighbourhood:
- gpr_mu_lock(&neighbourhood->mu);
+ retry_lock_neighborhood:
+ gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
if (pollset->seen_inactive) {
- if (neighbourhood != pollset->neighbourhood) {
- gpr_mu_unlock(&neighbourhood->mu);
- neighbourhood = pollset->neighbourhood;
+ if (neighborhood != pollset->neighborhood) {
+ gpr_mu_unlock(&neighborhood->mu);
+ neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
- goto retry_lock_neighbourhood;
+ goto retry_lock_neighborhood;
}
- pollset->seen_inactive = false;
- if (neighbourhood->active_root == NULL) {
- neighbourhood->active_root = pollset->next = pollset->prev = pollset;
- /* TODO: sreek. Why would this worker state be other than UNKICKED
- * here ? (since the worker isn't added to the pollset yet, there is no
- * way it can be "found" by other threads to get kicked). */
-
- /* If there is no designated poller, make this the designated poller */
- if (worker->kick_state == UNKICKED &&
- gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
- SET_KICK_STATE(worker, DESIGNATED_POLLER);
+
+ /* In the brief time we released the pollset locks above, the worker MAY
+ have been kicked. In this case, the worker should get out of this
+ pollset ASAP and hence this should neither add the pollset to
+ neighborhood nor mark the pollset as active.
+
+ On a side note, the only way a worker's kick state could have changed
+ at this point is if it were "kicked specifically". Since the worker has
+ not added itself to the pollset yet (by calling worker_insert()), it is
+ not visible in the "kick any" path yet */
+ if (worker->state == UNKICKED) {
+ pollset->seen_inactive = false;
+ if (neighborhood->active_root == NULL) {
+ neighborhood->active_root = pollset->next = pollset->prev = pollset;
+ /* Make this the designated poller if there isn't one already */
+ if (worker->state == UNKICKED &&
+ gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
+ SET_KICK_STATE(worker, DESIGNATED_POLLER);
+ }
+ } else {
+ pollset->next = neighborhood->active_root;
+ pollset->prev = pollset->next->prev;
+ pollset->next->prev = pollset->prev->next = pollset;
}
- } else {
- pollset->next = neighbourhood->active_root;
- pollset->prev = pollset->next->prev;
- pollset->next->prev = pollset->prev->next = pollset;
}
}
if (is_reassigning) {
- GPR_ASSERT(pollset->reassigning_neighbourhood);
- pollset->reassigning_neighbourhood = false;
+ GPR_ASSERT(pollset->reassigning_neighborhood);
+ pollset->reassigning_neighborhood = false;
}
- gpr_mu_unlock(&neighbourhood->mu);
+ gpr_mu_unlock(&neighborhood->mu);
}
worker_insert(pollset, worker);
pollset->begin_refs--;
- if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) {
+ if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
- while (worker->kick_state == UNKICKED && !pollset->shutting_down) {
+ while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
- if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
- worker->kick_state == UNKICKED) {
+ if (gpr_cv_wait(&worker->cv, &pollset->mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
+ worker->state == UNKICKED) {
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
}
}
- *now = gpr_now(now->clock_type);
+ grpc_exec_ctx_invalidate_now(exec_ctx);
}
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
- pollset, worker, kick_state_string(worker->kick_state),
+ pollset, worker, kick_state_string(worker->state),
pollset->shutting_down, pollset->kicked_without_poller);
}
/* We release pollset lock in this function at a couple of places:
- * 1. Briefly when assigning pollset to a neighbourhood
+ * 1. Briefly when assigning pollset to a neighborhood
* 2. When doing gpr_cv_wait()
* It is possible that 'kicked_without_poller' was set to true during (1) and
* 'shutting_down' is set to true during (1) or (2). If either of them is
@@ -770,15 +784,15 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
}
GPR_TIMER_END("begin_worker", 0);
- return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
+ return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
}
-static bool check_neighbourhood_for_available_poller(
- pollset_neighbourhood *neighbourhood) {
- GPR_TIMER_BEGIN("check_neighbourhood_for_available_poller", 0);
+static bool check_neighborhood_for_available_poller(
+ grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
+ GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
- grpc_pollset *inspect = neighbourhood->active_root;
+ grpc_pollset *inspect = neighborhood->active_root;
if (inspect == NULL) {
break;
}
@@ -787,7 +801,7 @@ static bool check_neighbourhood_for_available_poller(
grpc_pollset_worker *inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) {
do {
- switch (inspect_worker->kick_state) {
+ switch (inspect_worker->state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
@@ -798,6 +812,7 @@ static bool check_neighbourhood_for_available_poller(
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@@ -823,8 +838,8 @@ static bool check_neighbourhood_for_available_poller(
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
- if (inspect == neighbourhood->active_root) {
- neighbourhood->active_root =
+ if (inspect == neighborhood->active_root) {
+ neighborhood->active_root =
inspect->next == inspect ? NULL : inspect->next;
}
inspect->next->prev = inspect->prev;
@@ -833,7 +848,7 @@ static bool check_neighbourhood_for_available_poller(
}
gpr_mu_unlock(&inspect->mu);
} while (!found_worker);
- GPR_TIMER_END("check_neighbourhood_for_available_poller", 0);
+ GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
return found_worker;
}
@@ -850,13 +865,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
- if (worker->next != worker && worker->next->kick_state == UNKICKED) {
+ if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
@@ -865,32 +881,33 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
- size_t poller_neighbourhood_idx =
- (size_t)(pollset->neighbourhood - g_neighbourhoods);
+ size_t poller_neighborhood_idx =
+ (size_t)(pollset->neighborhood - g_neighborhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false;
- bool scan_state[MAX_NEIGHBOURHOODS];
- for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(poller_neighbourhood_idx + i) %
- g_num_neighbourhoods];
- if (gpr_mu_trylock(&neighbourhood->mu)) {
+ bool scan_state[MAX_NEIGHBORHOODS];
+ for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
+ pollset_neighborhood *neighborhood =
+ &g_neighborhoods[(poller_neighborhood_idx + i) %
+ g_num_neighborhoods];
+ if (gpr_mu_trylock(&neighborhood->mu)) {
found_worker =
- check_neighbourhood_for_available_poller(neighbourhood);
- gpr_mu_unlock(&neighbourhood->mu);
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+ gpr_mu_unlock(&neighborhood->mu);
scan_state[i] = true;
} else {
scan_state[i] = false;
}
}
- for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
+ for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
if (scan_state[i]) continue;
- pollset_neighbourhood *neighbourhood =
- &g_neighbourhoods[(poller_neighbourhood_idx + i) %
- g_num_neighbourhoods];
- gpr_mu_lock(&neighbourhood->mu);
- found_worker = check_neighbourhood_for_available_poller(neighbourhood);
- gpr_mu_unlock(&neighbourhood->mu);
+ pollset_neighborhood *neighborhood =
+ &g_neighborhoods[(poller_neighborhood_idx + i) %
+ g_num_neighborhoods];
+ gpr_mu_lock(&neighborhood->mu);
+ found_worker =
+ check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+ gpr_mu_unlock(&neighborhood->mu);
}
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
@@ -919,7 +936,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
grpc_error *error = GRPC_ERROR_NONE;
static const char *err_desc = "pollset_work";
@@ -930,7 +947,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return GRPC_ERROR_NONE;
}
- if (begin_worker(ps, &worker, worker_hdl, &now, deadline)) {
+ if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!ps->shutting_down);
@@ -953,8 +970,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
designated poller */
if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
gpr_atm_acq_load(&g_epoll_set.num_events)) {
- append_error(&error, do_epoll_wait(exec_ctx, ps, now, deadline),
- err_desc);
+ append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
}
append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
@@ -971,9 +987,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return error;
}
-static grpc_error *pollset_kick(grpc_pollset *pollset,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
grpc_error *ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
@@ -986,14 +1003,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
- kick_state_string(pollset->root_worker->kick_state),
+ kick_state_string(pollset->root_worker->state),
pollset->root_worker->next,
- kick_state_string(pollset->root_worker->next->kick_state));
+ kick_state_string(pollset->root_worker->next->state));
gpr_strvec_add(&log, tmp);
}
if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s",
- kick_state_string(specific_worker->kick_state));
+ kick_state_string(specific_worker->state));
gpr_strvec_add(&log, tmp);
}
tmp = gpr_strvec_flatten(&log, NULL);
@@ -1001,10 +1018,12 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_log(GPR_ERROR, "%s", tmp);
gpr_free(tmp);
}
+
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) {
+ GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
@@ -1012,13 +1031,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
}
grpc_pollset_worker *next_worker = root_worker->next;
- if (root_worker->kick_state == KICKED) {
+ if (root_worker->state == KICKED) {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
- } else if (next_worker->kick_state == KICKED) {
+ } else if (next_worker->state == KICKED) {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
@@ -1029,13 +1050,15 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
// there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
- } else if (next_worker->kick_state == UNKICKED) {
+ } else if (next_worker->state == UNKICKED) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
@@ -1043,8 +1066,8 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
goto done;
- } else if (next_worker->kick_state == DESIGNATED_POLLER) {
- if (root_worker->kick_state != DESIGNATED_POLLER) {
+ } else if (next_worker->state == DESIGNATED_POLLER) {
+ if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_ERROR,
@@ -1053,10 +1076,12 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
gpr_cv_signal(&root_worker->cv);
}
goto done;
} else {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
@@ -1066,23 +1091,30 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
}
} else {
- GPR_ASSERT(next_worker->kick_state == KICKED);
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+ GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
goto done;
}
} else {
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
goto done;
}
- } else if (specific_worker->kick_state == KICKED) {
+
+ GPR_UNREACHABLE_CODE(goto done);
+ }
+
+ if (specific_worker->state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
@@ -1090,6 +1122,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
goto done;
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
@@ -1097,6 +1130,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
@@ -1104,6 +1138,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
@@ -1160,34 +1195,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
@@ -1215,7 +1250,7 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
#else /* defined(GRPC_LINUX_EPOLL) */
#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/ev_epoll1_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.h b/src/core/lib/iomgr/ev_epoll1_linux.h
index 0696e0df40..b437032b36 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.h
+++ b/src/core/lib/iomgr/ev_epoll1_linux.h
@@ -22,8 +22,16 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// a polling engine that utilizes a singleton epoll set and turnstile polling
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
deleted file mode 100644
index e2e3cd9003..0000000000
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
+++ /dev/null
@@ -1,1961 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <poll.h>
-#include <pthread.h>
-#include <signal.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/debug/stats.h"
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-#include "src/core/lib/support/env.h"
-
-#define GRPC_POLLING_TRACE(fmt, ...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
- }
-
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-
-/* The maximum number of polling threads per polling island. By default no
- limit */
-static int g_max_pollers_per_pi = INT_MAX;
-
-static int grpc_wakeup_signal = -1;
-static bool is_grpc_wakeup_signal_initialized = false;
-
-/* Implements the function defined in grpc_posix.h. This function might be
- * called before even calling grpc_init() to set either a different signal to
- * use. If signum == -1, then the use of signals is disabled */
-static void grpc_use_signal(int signum) {
- grpc_wakeup_signal = signum;
- is_grpc_wakeup_signal_initialized = true;
-
- if (grpc_wakeup_signal < 0) {
- gpr_log(GPR_INFO,
- "Use of signals is disabled. Epoll engine will not be used");
- } else {
- gpr_log(GPR_INFO, "epoll engine will be using signal: %d",
- grpc_wakeup_signal);
- }
-}
-
-struct polling_island;
-
-typedef enum {
- POLL_OBJ_FD,
- POLL_OBJ_POLLSET,
- POLL_OBJ_POLLSET_SET
-} poll_obj_type;
-
-typedef struct poll_obj {
-#ifndef NDEBUG
- poll_obj_type obj_type;
-#endif
- gpr_mu mu;
- struct polling_island *pi;
-} poll_obj;
-
-static const char *poll_obj_string(poll_obj_type po_type) {
- switch (po_type) {
- case POLL_OBJ_FD:
- return "fd";
- case POLL_OBJ_POLLSET:
- return "pollset";
- case POLL_OBJ_POLLSET_SET:
- return "pollset_set";
- }
-
- GPR_UNREACHABLE_CODE(return "UNKNOWN");
-}
-
-/*******************************************************************************
- * Fd Declarations
- */
-
-#define FD_FROM_PO(po) ((grpc_fd *)(po))
-
-struct grpc_fd {
- poll_obj po;
-
- int fd;
- /* refst format:
- bit 0 : 1=Active / 0=Orphaned
- bits 1-n : refcount
- Ref/Unref by two to avoid altering the orphaned bit */
- gpr_atm refst;
-
- /* The fd is either closed or we relinquished control of it. In either
- cases, this indicates that the 'fd' on this structure is no longer
- valid */
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- /* The pollset that last noticed that the fd is readable. The actual type
- * stored in this is (grpc_pollset *) */
- gpr_atm read_notifier_pollset;
-
- grpc_iomgr_object iomgr_object;
-};
-
-/* Reference counting for fds */
-#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
-#else
-static void fd_ref(grpc_fd *fd);
-static void fd_unref(grpc_fd *fd);
-#define GRPC_FD_REF(fd, reason) fd_ref(fd)
-#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
-#endif
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * Polling island Declarations
- */
-
-#ifndef NDEBUG
-
-#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define PI_UNREF(exec_ctx, p, r) \
- pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-
-#else
-
-#define PI_ADD_REF(p, r) pi_add_ref((p))
-#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
-
-#endif
-
-typedef struct worker_node {
- struct worker_node *next;
- struct worker_node *prev;
-} worker_node;
-
-/* This is also used as grpc_workqueue (by directly casing it) */
-typedef struct polling_island {
- gpr_mu mu;
- /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
- the refcount.
- Once the ref count becomes zero, this structure is destroyed which means
- we should ensure that there is never a scenario where a PI_ADD_REF() is
- racing with a PI_UNREF() that just made the ref_count zero. */
- gpr_atm ref_count;
-
- /* Pointer to the polling_island this merged into.
- * merged_to value is only set once in polling_island's lifetime (and that too
- * only if the island is merged with another island). Because of this, we can
- * use gpr_atm type here so that we can do atomic access on this and reduce
- * lock contention on 'mu' mutex.
- *
- * Note that if this field is not NULL (i.e not 0), all the remaining fields
- * (except mu and ref_count) are invalid and must be ignored. */
- gpr_atm merged_to;
-
- /* Number of threads currently polling on this island */
- gpr_atm poller_count;
-
- /* The list of workers waiting to do polling on this polling island */
- gpr_mu worker_list_mu;
- worker_node worker_list_head;
-
- /* The fd of the underlying epoll set */
- int epoll_fd;
-
- /* The file descriptors in the epoll set */
- size_t fd_cnt;
- size_t fd_capacity;
- grpc_fd **fds;
-} polling_island;
-
-/*******************************************************************************
- * Pollset Declarations
- */
-#define WORKER_FROM_WORKER_LIST_NODE(p) \
- (struct grpc_pollset_worker *)(((char *)(p)) - \
- offsetof(grpc_pollset_worker, pi_list_link))
-struct grpc_pollset_worker {
- /* Thread id of this worker */
- pthread_t pt_id;
-
- /* Used to prevent a worker from getting kicked multiple times */
- gpr_atm is_kicked;
-
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-
- /* Indicates if it is this worker's turn to do epoll */
- gpr_atm is_polling_turn;
-
- /* Node in the polling island's worker list. */
- worker_node pi_list_link;
-};
-
-struct grpc_pollset {
- poll_obj po;
-
- grpc_pollset_worker root_worker;
- bool kicked_without_pollers;
-
- bool shutting_down; /* Is the pollset shutting down ? */
- bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- poll_obj po;
-};
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * Polling island Definitions
- */
-
-/* The wakeup fd that is used to wake up all threads in a Polling island. This
- is useful in the polling island merge operation where we need to wakeup all
- the threads currently polling the smaller polling island (so that they can
- start polling the new/merged polling island)
-
- NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the
- threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
-static grpc_wakeup_fd polling_island_wakeup_fd;
-
-/* The polling island being polled right now.
- See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread polling_island *g_current_thread_polling_island;
-
-/* Forward declaration */
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
-
-#ifdef GRPC_TSAN
-/* Currently TSAN may incorrectly flag data races between epoll_ctl and
- epoll_wait for any grpc_fd structs that are added to the epoll set via
- epoll_ctl and are returned (within a very short window) via epoll_wait().
-
- To work-around this race, we establish a happens-before relation between
- the code just-before epoll_ctl() and the code after epoll_wait() by using
- this atomic */
-gpr_atm g_epoll_sync;
-#endif /* defined(GRPC_TSAN) */
-
-static void pi_add_ref(polling_island *pi);
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
-
-#ifndef NDEBUG
-static void pi_add_ref_dbg(polling_island *pi, const char *reason,
- const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- pi, old_cnt, old_cnt + 1, reason, file, line);
- }
- pi_add_ref(pi);
-}
-
-static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- pi, old_cnt, (old_cnt - 1), reason, file, line);
- }
- pi_unref(exec_ctx, pi);
-}
-#endif
-
-static void pi_add_ref(polling_island *pi) {
- gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
-}
-
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- /* If ref count went to zero, delete the polling island.
- Note that this deletion not be done under a lock. Once the ref count goes
- to zero, we are guaranteed that no one else holds a reference to the
- polling island (and that there is no racing pi_add_ref() call either).
-
- Also, if we are deleting the polling island and the merged_to field is
- non-empty, we should remove a ref to the merged_to polling island
- */
- if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- polling_island_delete(exec_ctx, pi);
- if (next != NULL) {
- PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
- }
- }
-}
-
-static void worker_node_init(worker_node *node) {
- node->next = node->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static void push_back_worker_node(worker_node *head, worker_node *node) {
- node->next = head;
- node->prev = head->prev;
- head->prev->next = node;
- head->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static void remove_worker_node(worker_node *node) {
- node->next->prev = node->prev;
- node->prev->next = node->next;
- /* If node's next and prev point to itself, the node is considered detached
- * from the list*/
- node->next = node->prev = node;
-}
-
-/* Not thread safe. Do under a list-level lock */
-static worker_node *pop_front_worker_node(worker_node *head) {
- worker_node *node = head->next;
- if (node != head) {
- remove_worker_node(node);
- } else {
- node = NULL;
- }
-
- return node;
-}
-
-/* Returns true if the node's next and prev are pointing to itself (which
- indicates that the node is not in the list */
-static bool is_worker_node_detached(worker_node *node) {
- return (node->next == node->prev && node->next == node);
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function
- */
-static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
- size_t fd_count, bool add_fd_refs,
- grpc_error **error) {
- int err;
- size_t i;
- struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "polling_island_add_fds";
-
-#ifdef GRPC_TSAN
- /* See the definition of g_epoll_sync for more context */
- gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
-#endif /* defined(GRPC_TSAN) */
-
- for (i = 0; i < fd_count; i++) {
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fds[i];
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
-
- if (err < 0) {
- if (errno != EEXIST) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fds[i]->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-
- continue;
- }
-
- if (pi->fd_cnt == pi->fd_capacity) {
- pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
- pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
- }
-
- pi->fds[pi->fd_cnt++] = fds[i];
- if (add_fd_refs) {
- GRPC_FD_REF(fds[i], "polling_island");
- }
- }
-}
-
-/* The caller is expected to hold pi->mu before calling this */
-static void polling_island_add_wakeup_fd_locked(polling_island *pi,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
- struct epoll_event ev;
- int err;
- char *err_msg;
- const char *err_desc = "polling_island_add_wakeup_fd";
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = wakeup_fd;
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
- "error: %d (%s)",
- pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_all_fds_locked(polling_island *pi,
- bool remove_fd_refs,
- grpc_error **error) {
- int err;
- size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fds";
-
- for (i = 0; i < pi->fd_cnt; i++) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with "
- "error: %d (%s)",
- pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-
- if (remove_fd_refs) {
- GRPC_FD_UNREF(pi->fds[i], "polling_island");
- }
- }
-
- pi->fd_cnt = 0;
-}
-
-/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
- bool is_fd_closed,
- grpc_error **error) {
- int err;
- size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fd";
-
- /* If fd is already closed, then it would have been automatically been removed
- from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- pi->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
- }
-
- for (i = 0; i < pi->fd_cnt; i++) {
- if (pi->fds[i] == fd) {
- pi->fds[i] = pi->fds[--pi->fd_cnt];
- GRPC_FD_UNREF(fd, "polling_island");
- break;
- }
- }
-}
-
-/* Might return NULL in case of an error */
-static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
- grpc_fd *initial_fd,
- grpc_error **error) {
- polling_island *pi = NULL;
- const char *err_desc = "polling_island_create";
-
- *error = GRPC_ERROR_NONE;
-
- pi = gpr_malloc(sizeof(*pi));
- gpr_mu_init(&pi->mu);
- pi->fd_cnt = 0;
- pi->fd_capacity = 0;
- pi->fds = NULL;
- pi->epoll_fd = -1;
-
- gpr_atm_rel_store(&pi->ref_count, 0);
- gpr_atm_rel_store(&pi->poller_count, 0);
- gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
-
- gpr_mu_init(&pi->worker_list_mu);
- worker_node_init(&pi->worker_list_head);
-
- pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-
- if (pi->epoll_fd < 0) {
- append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
- goto done;
- }
-
- if (initial_fd != NULL) {
- polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
- }
-
-done:
- if (*error != GRPC_ERROR_NONE) {
- polling_island_delete(exec_ctx, pi);
- pi = NULL;
- }
- return pi;
-}
-
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
- GPR_ASSERT(pi->fd_cnt == 0);
-
- if (pi->epoll_fd >= 0) {
- close(pi->epoll_fd);
- }
- gpr_mu_destroy(&pi->mu);
- gpr_mu_destroy(&pi->worker_list_mu);
- GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head));
-
- gpr_free(pi->fds);
- gpr_free(pi);
-}
-
-/* Attempts to gets the last polling island in the linked list (liked by the
- * 'merged_to' field). Since this does not lock the polling island, there are no
- * guarantees that the island returned is the last island */
-static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- while (next != NULL) {
- pi = next;
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- }
-
- return pi;
-}
-
-/* Gets the lock on the *latest* polling island i.e the last polling island in
- the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the
- returned polling island's mu.
- Usage: To lock/unlock polling island "pi", do the following:
- polling_island *pi_latest = polling_island_lock(pi);
- ...
- ... critical section ..
- ...
- gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
-static polling_island *polling_island_lock(polling_island *pi) {
- polling_island *next = NULL;
-
- while (true) {
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- if (next == NULL) {
- /* Looks like 'pi' is the last node in the linked list but unless we check
- this by holding the pi->mu lock, we cannot be sure (i.e without the
- pi->mu lock, we don't prevent island merges).
- To be absolutely sure, check once more by holding the pi->mu lock */
- gpr_mu_lock(&pi->mu);
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
- if (next == NULL) {
- /* pi is infact the last node and we have the pi->mu lock. we're done */
- break;
- }
-
- /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu
- * isn't the lock we are interested in. Continue traversing the list */
- gpr_mu_unlock(&pi->mu);
- }
-
- pi = next;
- }
-
- return pi;
-}
-
-/* Gets the lock on the *latest* polling islands in the linked lists pointed by
- *p and *q (and also updates *p and *q to point to the latest polling islands)
-
- This function is needed because calling the following block of code to obtain
- locks on polling islands (*p and *q) is prone to deadlocks.
- {
- polling_island_lock(*p, true);
- polling_island_lock(*q, true);
- }
-
- Usage/example:
- polling_island *p1;
- polling_island *p2;
- ..
- polling_island_lock_pair(&p1, &p2);
- ..
- .. Critical section with both p1 and p2 locked
- ..
- // Release locks: Always call polling_island_unlock_pair() to release locks
- polling_island_unlock_pair(p1, p2);
-*/
-static void polling_island_lock_pair(polling_island **p, polling_island **q) {
- polling_island *pi_1 = *p;
- polling_island *pi_2 = *q;
- polling_island *next_1 = NULL;
- polling_island *next_2 = NULL;
-
- /* The algorithm is simple:
- - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
- keep updating pi_1 and pi_2)
- - Then obtain locks on the islands by following a lock order rule of
- locking polling_island with lower address first
- Special case: Before obtaining the locks, check if pi_1 and pi_2 are
- pointing to the same island. If that is the case, we can just call
- polling_island_lock()
- - After obtaining both the locks, double check that the polling islands
- are still the last polling islands in their respective linked lists
- (this is because there might have been polling island merges before
- we got the lock)
- - If the polling islands are the last islands, we are done. If not,
- release the locks and continue the process from the first step */
- while (true) {
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- while (next_1 != NULL) {
- pi_1 = next_1;
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- }
-
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- while (next_2 != NULL) {
- pi_2 = next_2;
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- }
-
- if (pi_1 == pi_2) {
- pi_1 = pi_2 = polling_island_lock(pi_1);
- break;
- }
-
- if (pi_1 < pi_2) {
- gpr_mu_lock(&pi_1->mu);
- gpr_mu_lock(&pi_2->mu);
- } else {
- gpr_mu_lock(&pi_2->mu);
- gpr_mu_lock(&pi_1->mu);
- }
-
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
- if (next_1 == NULL && next_2 == NULL) {
- break;
- }
-
- gpr_mu_unlock(&pi_1->mu);
- gpr_mu_unlock(&pi_2->mu);
- }
-
- *p = pi_1;
- *q = pi_2;
-}
-
-static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
- if (p == q) {
- gpr_mu_unlock(&p->mu);
- } else {
- gpr_mu_unlock(&p->mu);
- gpr_mu_unlock(&q->mu);
- }
-}
-
-static polling_island *polling_island_merge(polling_island *p,
- polling_island *q,
- grpc_error **error) {
- /* Get locks on both the polling islands */
- polling_island_lock_pair(&p, &q);
-
- if (p != q) {
- /* Make sure that p points to the polling island with fewer fds than q */
- if (p->fd_cnt > q->fd_cnt) {
- GPR_SWAP(polling_island *, p, q);
- }
-
- /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
- Note that the refcounts on the fds being moved will not change here.
- This is why the last param in the following two functions is 'false') */
- polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error);
- polling_island_remove_all_fds_locked(p, false, error);
-
- /* Wakeup all the pollers (if any) on p so that they pickup this change */
- polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error);
-
- /* Add the 'merged_to' link from p --> q */
- gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
- PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
- }
- /* else if p == q, nothing needs to be done */
-
- polling_island_unlock_pair(p, q);
-
- /* Return the merged polling island (Note that no merge would have happened
- if p == q which is ok) */
- return q;
-}
-
-static grpc_error *polling_island_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
-
- error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
- if (error == GRPC_ERROR_NONE) {
- error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
- }
-
- return error;
-}
-
-static void polling_island_global_shutdown() {
- grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-#ifndef NDEBUG
-#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
- }
-#else
-#define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(fd, n, reason) unref_by(fd, n)
-static void ref_by(grpc_fd *fd, int n) {
-#endif
- GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
-}
-
-#ifndef NDEBUG
-static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
- }
-#else
-static void unref_by(grpc_fd *fd, int n) {
-#endif
- gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
- if (old == n) {
- /* Add the fd to the freelist */
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
- } else {
- GPR_ASSERT(old > n);
- }
-}
-
-/* Increment refcount by two to avoid changing the orphan bit */
-#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- ref_by(fd, 2, reason, file, line);
-}
-
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
- int line) {
- unref_by(fd, 2, reason, file, line);
-}
-#else
-static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
-static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
-#endif
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->po.mu);
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
-
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->po.mu);
- }
-
- /* Note: It is not really needed to get the new_fd->po.mu lock here. If this
- * is a newly created fd (or an fd we got from the freelist), no one else
- * would be holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->po.mu);
- new_fd->po.pi = NULL;
-#ifndef NDEBUG
- new_fd->po.obj_type = POLL_OBJ_FD;
-#endif
-
- gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
- new_fd->fd = fd;
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
- gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- gpr_mu_unlock(&new_fd->po.mu);
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
- }
-#endif
- gpr_free(fd_name);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->po.mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->po.mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *unref_pi = NULL;
-
- gpr_mu_lock(&fd->po.mu);
- fd->on_done_closure = on_done;
-
- /* Remove the active status but keep referenced. We want this grpc_fd struct
- to be alive (and not added to freelist) until the end of this function */
- REF_BY(fd, 1, reason);
-
- /* Remove the fd from the polling island:
- - Get a lock on the latest polling island (i.e the last island in the
- linked list pointed by fd->po.pi). This is the island that
- would actually contain the fd
- - Remove the fd from the latest polling island
- - Unlock the latest polling island
- - Set fd->po.pi to NULL (but remove the ref on the polling island
- before doing this.) */
- if (fd->po.pi != NULL) {
- polling_island *pi_latest = polling_island_lock(fd->po.pi);
- polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error);
- gpr_mu_unlock(&pi_latest->mu);
-
- unref_pi = fd->po.pi;
- fd->po.pi = NULL;
- }
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else {
- close(fd->fd);
- }
-
- fd->orphaned = true;
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->po.mu);
- UNREF_BY(fd, 2, reason); /* Drop the reference */
- if (unref_pi != NULL) {
- /* Unref stale polling island here, outside the fd lock above.
- The polling island owns a workqueue which owns an fd, and unreffing
- inside the lock can cause an eventual lock loop that makes TSAN very
- unhappy. */
- PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
- }
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-static __thread bool g_initialized_sigmask;
-static __thread sigset_t g_orig_sigmask;
-static __thread sigset_t g_wakeup_sig_set;
-
-static void sig_handler(int sig_num) {
-#ifdef GRPC_EPOLL_DEBUG
- gpr_log(GPR_INFO, "Received signal %d", sig_num);
-#endif
-}
-
-static void pollset_worker_init(grpc_pollset_worker *worker) {
- worker->pt_id = pthread_self();
- worker->next = worker->prev = NULL;
- gpr_atm_no_barrier_store(&worker->is_kicked, (gpr_atm)0);
- gpr_atm_no_barrier_store(&worker->is_polling_turn, (gpr_atm)0);
- worker_node_init(&worker->pi_list_link);
-}
-
-static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- poller_kick_init();
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *worker_kick(grpc_pollset_worker *worker,
- gpr_atm *is_kicked) {
- grpc_error *err = GRPC_ERROR_NONE;
-
- /* Kick the worker only if it was not already kicked */
- if (gpr_atm_no_barrier_cas(is_kicked, (gpr_atm)0, (gpr_atm)1)) {
- GRPC_POLLING_TRACE(
- "pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
- (void *)worker, (long int)worker->pt_id);
- int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
- if (err_num != 0) {
- err = GRPC_OS_ERROR(err_num, "pthread_kill");
- }
- }
- return err;
-}
-
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- return worker_kick(worker, &worker->is_kicked);
-}
-
-static grpc_error *poller_kick(grpc_pollset_worker *worker) {
- return worker_kick(worker, &worker->is_polling_turn);
-}
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
- if (worker != NULL) {
- if (worker == GRPC_POLLSET_KICK_BROADCAST) {
- if (pollset_has_workers(p)) {
- GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
- for (worker = p->root_worker.next; worker != &p->root_worker;
- worker = worker->next) {
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- GPR_TIMER_END("pollset_kick.broadcast", 0);
- } else {
- p->kicked_without_pollers = true;
- }
- } else {
- GPR_TIMER_MARK("kicked_specifically", 0);
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
- /* Since worker == NULL, it means that we can kick "any" worker on this
- pollset 'p'. If 'p' happens to be the same pollset this thread is
- currently polling (i.e in pollset_work() function), then there is no need
- to kick any other worker since the current thread can just absorb the
- kick. This is the reason why we enter this case only when
- g_current_thread_pollset is != p */
-
- GPR_TIMER_MARK("kick_anonymous", 0);
- worker = pop_front_worker(p);
- if (worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, worker);
- append_error(&error, pollset_worker_kick(worker), err_desc);
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick", 0);
- GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->po.mu);
- *mu = &pollset->po.mu;
- pollset->po.pi = NULL;
-#ifndef NDEBUG
- pollset->po.obj_type = POLL_OBJ_POLLSET;
-#endif
-
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->kicked_without_pollers = false;
-
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->shutdown_done = NULL;
-}
-
-/* Convert millis to timespec (clock-type is assumed to be GPR_TIMESPAN) */
-static struct timespec millis_to_timespec(int millis) {
- struct timespec linux_ts;
- gpr_timespec gpr_ts;
-
- if (millis == -1) {
- gpr_ts = gpr_inf_future(GPR_TIMESPAN);
- } else {
- gpr_ts = gpr_time_from_millis(millis, GPR_TIMESPAN);
- }
-
- linux_ts.tv_sec = (time_t)gpr_ts.tv_sec;
- linux_ts.tv_nsec = gpr_ts.tv_nsec;
- return linux_ts;
-}
-
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
- return millis >= 1 ? millis : 1;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-
- /* Note, it is possible that fd_become_readable might be called twice with
- different 'notifier's when an fd becomes readable and it is in two epoll
- sets (This can happen briefly during polling island merges). In such cases
- it does not really matter which notifer is set as the read_notifier_pollset
- (They would both point to the same polling island anyway) */
- /* Use release store to match with acquire load in fd_get_read_notifier */
- gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps, char *reason) {
- if (ps->po.pi != NULL) {
- PI_UNREF(exec_ctx, ps->po.pi, reason);
- }
- ps->po.pi = NULL;
-}
-
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- /* The pollset cannot have any workers if we are at this stage */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- pollset->finish_shutdown_called = true;
-
- /* Release the ref and set pollset->po.pi to NULL */
- pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_TIMER_BEGIN("pollset_shutdown", 0);
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = true;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
- /* If the pollset has any workers, we cannot call finish_shutdown_locked()
- because it would release the underlying polling island. In such a case, we
- let the last worker call finish_shutdown_locked() from pollset_work() */
- if (!pollset_has_workers(pollset)) {
- GPR_ASSERT(!pollset->finish_shutdown_called);
- GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
- }
- GPR_TIMER_END("pollset_shutdown", 0);
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
- * than destroying the mutexes, there is nothing special that needs to be done
- * here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->po.mu);
-}
-
-/* NOTE: This function may modify 'now' */
-static bool acquire_polling_lease(grpc_exec_ctx *exec_ctx,
- grpc_pollset_worker *worker,
- polling_island *pi, gpr_timespec deadline,
- gpr_timespec *now) {
- bool is_lease_acquired = false;
-
- gpr_mu_lock(&pi->worker_list_mu); // LOCK
- long num_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
-
- if (num_pollers >= g_max_pollers_per_pi) {
- push_back_worker_node(&pi->worker_list_head, &worker->pi_list_link);
- gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK
-
- bool is_timeout = false;
- int ret;
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, *now);
- if (timeout_ms == -1) {
- ret = sigwaitinfo(&g_wakeup_sig_set, NULL);
- } else {
- struct timespec sigwait_timeout = millis_to_timespec(timeout_ms);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx);
- ret = sigtimedwait(&g_wakeup_sig_set, NULL, &sigwait_timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- }
-
- if (ret == -1) {
- if (errno == EAGAIN) {
- is_timeout = true;
- } else {
- /* NOTE: This should not happen. If we see these log messages, it means
- we are most likely doing something incorrect in the setup * needed
- for sigwaitinfo/sigtimedwait */
- gpr_log(GPR_ERROR,
- "sigtimedwait failed with retcode: %d (timeout_ms: %d)", errno,
- timeout_ms);
- }
- }
-
- /* Did the worker come out of sigtimedwait due to a thread that just
- exited epoll and kicking it (in release_polling_lease function). */
- bool is_polling_turn = gpr_atm_acq_load(&worker->is_polling_turn);
-
- /* Did the worker come out of sigtimedwait due to a thread alerting it that
- some completion event was (likely) available in the completion queue */
- bool is_kicked = gpr_atm_no_barrier_load(&worker->is_kicked);
-
- if (is_kicked || is_timeout) {
- *now = deadline; /* Essentially make the epoll timeout = 0 */
- } else if (is_polling_turn) {
- *now = gpr_now(GPR_CLOCK_MONOTONIC); /* Reduce the epoll timeout */
- }
-
- gpr_mu_lock(&pi->worker_list_mu); // LOCK
- /* The node might have already been removed from the list by the poller
- that kicked this. However it is safe to call 'remove_worker_node' on
- an already detached node */
- remove_worker_node(&worker->pi_list_link);
- /* It is important to read the num_pollers again under the lock so that we
- * have the latest num_pollers value that doesn't change while we are doing
- * the "(num_pollers < g_max_pollers_per_pi)" a a few lines below */
- num_pollers = gpr_atm_no_barrier_load(&pi->poller_count);
- }
-
- if (num_pollers < g_max_pollers_per_pi) {
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1);
- is_lease_acquired = true;
- }
-
- gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK
- return is_lease_acquired;
-}
-
-static void release_polling_lease(polling_island *pi, grpc_error **error) {
- gpr_mu_lock(&pi->worker_list_mu);
-
- gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1);
- worker_node *node = pop_front_worker_node(&pi->worker_list_head);
- if (node != NULL) {
- grpc_pollset_worker *next_worker = WORKER_FROM_WORKER_LIST_NODE(node);
- append_error(error, poller_kick(next_worker), "poller kick error");
- }
-
- gpr_mu_unlock(&pi->worker_list_mu);
-}
-
-#define GRPC_EPOLL_MAX_EVENTS 100
-static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd,
- grpc_pollset *pollset, polling_island *pi,
- grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline,
- sigset_t *sig_mask, grpc_error **error) {
- /* Only g_max_pollers_per_pi threads can be doing polling in parallel.
- If we cannot get a lease, we cannot continue to do epoll_pwait() */
- if (!acquire_polling_lease(exec_ctx, worker, pi, deadline, &now)) {
- return;
- }
-
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- char *err_msg;
- const char *err_desc = "pollset_work_and_unlock";
-
- /* timeout_ms is the time between 'now' and 'deadline' */
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- ep_rv =
- epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- /* Give back the lease right away so that some other thread can enter */
- release_polling_lease(pi, error);
-
- if (ep_rv < 0) {
- if (errno != EINTR) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- } else {
- /* We were interrupted. Save an interation by doing a zero timeout
- epoll_wait to see if there are any other events of interest */
- GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick",
- (void *)pollset, (void *)worker);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
- }
- }
-
-#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
-#endif /* defined(GRPC_TSAN) */
-
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &polling_island_wakeup_fd) {
- GRPC_POLLING_TRACE(
- "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
- "%d) got merged",
- (void *)pollset, (void *)worker, epoll_fd);
- /* This means that our polling island is merged with a different
- island. We do not have to do anything here since the subsequent call
- to the function pollset_work_and_unlock() will pick up the correct
- epoll_fd */
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-}
-
-/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
-static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- gpr_timespec now, gpr_timespec deadline,
- sigset_t *sig_mask, grpc_error **error) {
- int epoll_fd = -1;
- polling_island *pi = NULL;
- GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
-
- /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
- latest polling island pointed by pollset->po.pi
-
- Since epoll_fd is immutable, it is safe to read it without a lock on the
- polling island. There is however a possibility that the polling island from
- which we got the epoll_fd, got merged with another island in the meantime.
- This is okay because in such a case, we will wakeup right-away from
- epoll_pwait() (because any merge will poison the old polling island's epoll
- set 'polling_island_wakeup_fd') and then pick up the latest polling_island
- the next time this function - pollset_work_and_unlock()) is called */
-
- if (pollset->po.pi == NULL) {
- pollset->po.pi = polling_island_create(exec_ctx, NULL, error);
- if (pollset->po.pi == NULL) {
- GPR_TIMER_END("pollset_work_and_unlock", 0);
- return; /* Fatal error. Cannot continue */
- }
-
- PI_ADD_REF(pollset->po.pi, "ps");
- GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
- (void *)pollset, (void *)pollset->po.pi);
- }
-
- pi = polling_island_maybe_get_latest(pollset->po.pi);
- epoll_fd = pi->epoll_fd;
-
- /* Update the pollset->po.pi since the island being pointed by
- pollset->po.pi maybe older than the one pointed by pi) */
- if (pollset->po.pi != pi) {
- /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
- polling island to be deleted */
- PI_ADD_REF(pi, "ps");
- PI_UNREF(exec_ctx, pollset->po.pi, "ps");
- pollset->po.pi = pi;
- }
-
- /* Add an extra ref so that the island does not get destroyed (which means
- the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
- epoll_fd */
- PI_ADD_REF(pi, "ps_work");
- gpr_mu_unlock(&pollset->po.mu);
-
- g_current_thread_polling_island = pi;
- pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline,
- sig_mask, error);
- g_current_thread_polling_island = NULL;
-
- GPR_ASSERT(pi != NULL);
-
- /* Before leaving, release the extra ref we added to the polling island. It
- is important to use "pi" here (i.e our old copy of pollset->po.pi
- that we got before releasing the polling island lock). This is because
- pollset->po.pi pointer might get udpated in other parts of the
- code when there is an island merge while we are doing epoll_wait() above */
- PI_UNREF(exec_ctx, pi, "ps_work");
-
- GPR_TIMER_END("pollset_work_and_unlock", 0);
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->po.mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- grpc_pollset_worker worker;
- pollset_worker_init(&worker);
-
- if (worker_hdl) *worker_hdl = &worker;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
-
- if (pollset->kicked_without_pollers) {
- /* If the pollset was kicked without pollers, pretend that the current
- worker got the kick and skip polling. A kick indicates that there is some
- work that needs attention like an event on the completion queue or an
- alarm */
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- } else if (!pollset->shutting_down) {
- /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up
- (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the
- worker that there is some pending work that needs immediate attention
- (like an event on the completion queue, or a polling island merge that
- results in a new epoll-fd to wait on) and that the worker should not
- spend time waiting in epoll_pwait().
-
- A worker can be kicked anytime from the point it is added to the pollset
- via push_front_worker() (or push_back_worker()) to the point it is
- removed via remove_worker().
- If the worker is kicked before/during it calls epoll_pwait(), it should
- immediately exit from epoll_wait(). If the worker is kicked after it
- returns from epoll_wait(), then nothing really needs to be done.
-
- To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all
- times *except* when it is in epoll_pwait(). This way, the worker never
- misses acting on a kick */
-
- if (!g_initialized_sigmask) {
- sigemptyset(&g_wakeup_sig_set);
- sigaddset(&g_wakeup_sig_set, grpc_wakeup_signal);
- pthread_sigmask(SIG_BLOCK, &g_wakeup_sig_set, &g_orig_sigmask);
- sigdelset(&g_orig_sigmask, grpc_wakeup_signal);
- g_initialized_sigmask = true;
- /* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'.
- This is the mask used at all times *except during
- epoll_wait()*"
- g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and
- this is the mask to use *during epoll_wait()*
-
- The new_mask is set on the worker before it is added to the pollset
- (i.e before it can be kicked) */
- }
-
- push_front_worker(pollset, &worker); /* Add worker to pollset */
-
- pollset_work_and_unlock(exec_ctx, pollset, &worker, now, deadline,
- &g_orig_sigmask, &error);
- grpc_exec_ctx_flush(exec_ctx);
-
- gpr_mu_lock(&pollset->po.mu);
-
- /* Note: There is no need to reset worker.is_kicked to 0 since we are no
- longer going to use this worker */
- remove_worker(pollset, &worker);
- }
-
- /* If we are the last worker on the pollset (i.e pollset_has_workers() is
- false at this point) and the pollset is shutting down, we may have to
- finish the shutdown process by calling finish_shutdown_locked().
- See pollset_shutdown() for more details.
-
- Note: Continuing to access pollset here is safe; it is the caller's
- responsibility to not destroy a pollset when it has outstanding calls to
- pollset_work() */
- if (pollset->shutting_down && !pollset_has_workers(pollset) &&
- !pollset->finish_shutdown_called) {
- GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
-
- gpr_mu_unlock(&pollset->po.mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->po.mu);
- }
-
- if (worker_hdl) *worker_hdl = NULL;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
-
- GPR_TIMER_END("pollset_work", 0);
-
- GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
- poll_obj_type bag_type, poll_obj *item,
- poll_obj_type item_type) {
- GPR_TIMER_BEGIN("add_poll_object", 0);
-
-#ifndef NDEBUG
- GPR_ASSERT(item->obj_type == item_type);
- GPR_ASSERT(bag->obj_type == bag_type);
-#endif
-
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *pi_new = NULL;
-
- gpr_mu_lock(&bag->mu);
- gpr_mu_lock(&item->mu);
-
-retry:
- /*
- * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing
- * 2) If item->pi and bag->pi are both NULL, create a new polling island (with
- * a refcount of 2) and point item->pi and bag->pi to the new island
- * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to
- * the other's non-NULL pi
- * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the
- * polling islands and update item->pi and bag->pi to point to the new
- * island
- */
-
- /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already
- * orphaned */
- if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) {
- gpr_mu_unlock(&item->mu);
- gpr_mu_unlock(&bag->mu);
- return;
- }
-
- if (item->pi == bag->pi) {
- pi_new = item->pi;
- if (pi_new == NULL) {
- /* GPR_ASSERT(item->pi == bag->pi == NULL) */
-
- /* If we are adding an fd to a bag (i.e pollset or pollset_set), then
- * we need to do some extra work to make TSAN happy */
- if (item_type == POLL_OBJ_FD) {
- /* Unlock before creating a new polling island: the polling island will
- create a workqueue which creates a file descriptor, and holding an fd
- lock here can eventually cause a loop to appear to TSAN (making it
- unhappy). We don't think it's a real loop (there's an epoch point
- where that loop possibility disappears), but the advantages of
- keeping TSAN happy outweigh any performance advantage we might have
- by keeping the lock held. */
- gpr_mu_unlock(&item->mu);
- pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
- gpr_mu_lock(&item->mu);
-
- /* Need to reverify any assumptions made between the initial lock and
- getting to this branch: if they've changed, we need to throw away our
- work and figure things out again. */
- if (item->pi != NULL) {
- GRPC_POLLING_TRACE(
- "add_poll_object: Raced creating new polling island. pi_new: %p "
- "(fd: %d, %s: %p)",
- (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
- (void *)bag);
- /* No need to lock 'pi_new' here since this is a new polling island
- and no one has a reference to it yet */
- polling_island_remove_all_fds_locked(pi_new, true, &error);
-
- /* Ref and unref so that the polling island gets deleted during unref
- */
- PI_ADD_REF(pi_new, "dance_of_destruction");
- PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
- goto retry;
- }
- } else {
- pi_new = polling_island_create(exec_ctx, NULL, &error);
- }
-
- GRPC_POLLING_TRACE(
- "add_poll_object: Created new polling island. pi_new: %p (%s: %p, "
- "%s: %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else {
- GRPC_POLLING_TRACE(
- "add_poll_object: Same polling island. pi: %p (%s, %s)",
- (void *)pi_new, poll_obj_string(item_type),
- poll_obj_string(bag_type));
- }
- } else if (item->pi == NULL) {
- /* GPR_ASSERT(bag->pi != NULL) */
- /* Make pi_new point to latest pi*/
- pi_new = polling_island_lock(bag->pi);
-
- if (item_type == POLL_OBJ_FD) {
- grpc_fd *fd = FD_FROM_PO(item);
- polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
- }
-
- gpr_mu_unlock(&pi_new->mu);
- GRPC_POLLING_TRACE(
- "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else if (bag->pi == NULL) {
- /* GPR_ASSERT(item->pi != NULL) */
- /* Make pi_new to point to latest pi */
- pi_new = polling_island_lock(item->pi);
- gpr_mu_unlock(&pi_new->mu);
- GRPC_POLLING_TRACE(
- "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- } else {
- pi_new = polling_island_merge(item->pi, bag->pi, &error);
- GRPC_POLLING_TRACE(
- "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, "
- "bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
- }
-
- /* At this point, pi_new is the polling island that both item->pi and bag->pi
- MUST be pointing to */
-
- if (item->pi != pi_new) {
- PI_ADD_REF(pi_new, poll_obj_string(item_type));
- if (item->pi != NULL) {
- PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
- }
- item->pi = pi_new;
- }
-
- if (bag->pi != pi_new) {
- PI_ADD_REF(pi_new, poll_obj_string(bag_type));
- if (bag->pi != NULL) {
- PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
- }
- bag->pi = pi_new;
- }
-
- gpr_mu_unlock(&item->mu);
- gpr_mu_unlock(&bag->mu);
-
- GRPC_LOG_IF_ERROR("add_poll_object", error);
- GPR_TIMER_END("add_poll_object", 0);
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
- POLL_OBJ_FD);
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
- gpr_mu_init(&pss->po.mu);
- pss->po.pi = NULL;
-#ifndef NDEBUG
- pss->po.obj_type = POLL_OBJ_POLLSET_SET;
-#endif
- return pss;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- gpr_mu_destroy(&pss->po.mu);
-
- if (pss->po.pi != NULL) {
- PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
- }
-
- gpr_free(pss);
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
- POLL_OBJ_FD);
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
- POLL_OBJ_POLLSET);
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
- POLL_OBJ_POLLSET_SET);
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- fd_global_shutdown();
- pollset_global_shutdown();
- polling_island_global_shutdown();
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
-static bool is_epoll_available() {
- int fd = epoll_create1(EPOLL_CLOEXEC);
- if (fd < 0) {
- gpr_log(
- GPR_ERROR,
- "epoll_create1 failed with error: %d. Not using epoll polling engine",
- fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-/* This is mainly for testing purposes. Checks to see if environment variable
- * GRPC_MAX_POLLERS_PER_PI is set and if so, assigns that value to
- * g_max_pollers_per_pi (any negative value is considered INT_MAX) */
-static void set_max_pollers_per_island() {
- char *s = gpr_getenv("GRPC_MAX_POLLERS_PER_PI");
- if (s) {
- g_max_pollers_per_pi = (int)strtol(s, NULL, 10);
- if (g_max_pollers_per_pi < 0) {
- g_max_pollers_per_pi = INT_MAX;
- }
- } else {
- g_max_pollers_per_pi = INT_MAX;
- }
-
- gpr_log(GPR_INFO, "Max number of pollers per polling island: %d",
- g_max_pollers_per_pi);
-}
-
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested) {
- if (!explicitly_requested) {
- return NULL;
- }
-
- /* If use of signals is disabled, we cannot use epoll engine*/
- if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
- return NULL;
- }
-
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!is_epoll_available()) {
- return NULL;
- }
-
- if (!is_grpc_wakeup_signal_initialized) {
- grpc_use_signal(SIGRTMIN + 6);
- }
-
- set_max_pollers_per_island();
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- return NULL;
- }
-
- if (!GRPC_LOG_IF_ERROR("polling_island_global_init",
- polling_island_global_init())) {
- return NULL;
- }
-
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
deleted file mode 100644
index 1d6af5f52c..0000000000
--- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
-#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H
-
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/port.h"
-
-const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux(
- bool explicitly_requested);
-
-#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
deleted file mode 100644
index ddb8c74d2d..0000000000
--- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <poll.h>
-#include <pthread.h>
-#include <string.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/debug/stats.h"
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-
-/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
-#define GRPC_POLLING_TRACE(fmt, ...) \
- if (GRPC_TRACER_ON(grpc_polling_trace)) { \
- gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
- }
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-struct epoll_set;
-
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
-
-/*******************************************************************************
- * Fd Declarations
- */
-struct grpc_fd {
- gpr_mu mu;
- struct epoll_set *eps;
-
- int fd;
-
- /* The fd is either closed or we relinquished control of it. In either cases,
- this indicates that the 'fd' on this structure is no longer valid */
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- grpc_iomgr_object iomgr_object;
-};
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * epoll set Declarations
- */
-
-#ifndef NDEBUG
-
-#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define EPS_UNREF(exec_ctx, p, r) \
- eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
-
-#else
-
-#define EPS_ADD_REF(p, r) eps_add_ref((p))
-#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p))
-
-#endif
-
-typedef struct epoll_set {
- /* Mutex poller should acquire to poll this. This enforces that only one
- * poller can be polling on epoll_set at any time */
- gpr_mu mu;
-
- /* Ref count. Use EPS_ADD_REF() and EPS_UNREF() macros to increment/decrement
- the refcount. Once the ref count becomes zero, this structure is destroyed
- which means we should ensure that there is never a scenario where a
- EPS_ADD_REF() is racing with a EPS_UNREF() that just made the ref_count
- zero. */
- gpr_atm ref_count;
-
- /* Number of threads currently polling on this epoll set*/
- gpr_atm poller_count;
-
- /* Is the epoll set shutdown */
- gpr_atm is_shutdown;
-
- /* The fd of the underlying epoll set */
- int epoll_fd;
-} epoll_set;
-
-/*******************************************************************************
- * Pollset Declarations
- */
-struct grpc_pollset_worker {
- gpr_cv kick_cv;
-
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
-};
-
-struct grpc_pollset {
- gpr_mu mu;
- struct epoll_set *eps;
-
- grpc_pollset_worker root_worker;
- bool kicked_without_pollers;
-
- bool shutting_down; /* Is the pollset shutting down ? */
- bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- char unused;
-};
-
-/*****************************************************************************
- * Dedicated polling threads and pollsets - Declarations
- */
-
-size_t g_num_eps = 1;
-struct epoll_set **g_epoll_sets = NULL;
-gpr_atm g_next_eps;
-size_t g_num_threads_per_eps = 1;
-gpr_thd_id *g_poller_threads = NULL;
-
-/* Used as read-notifier pollsets for fds. We won't be using read notifier
- * pollsets with this polling engine. So it does not matter what pollset we
- * return */
-grpc_pollset g_read_notifier;
-
-static void add_fd_to_eps(grpc_fd *fd);
-static bool init_epoll_sets();
-static void shutdown_epoll_sets();
-static void poller_thread_loop(void *arg);
-static void start_poller_threads();
-static void shutdown_poller_threads();
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * epoll set Definitions
- */
-
-/* The wakeup fd that is used to wake up all threads in an epoll_set informing
- that the epoll set is shutdown. This wakeup fd initialized to be readable
- and MUST NOT be consumed i.e the threads that woke up MUST NOT call
- grpc_wakeup_fd_consume_wakeup() */
-static grpc_wakeup_fd epoll_set_wakeup_fd;
-
-/* The epoll set being polled right now.
- See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread epoll_set *g_current_thread_epoll_set;
-
-/* Forward declaration */
-static void epoll_set_delete(epoll_set *eps);
-
-#ifdef GRPC_TSAN
-/* Currently TSAN may incorrectly flag data races between epoll_ctl and
- epoll_wait for any grpc_fd structs that are added to the epoll set via
- epoll_ctl and are returned (within a very short window) via epoll_wait().
-
- To work-around this race, we establish a happens-before relation between
- the code just-before epoll_ctl() and the code after epoll_wait() by using
- this atomic */
-gpr_atm g_epoll_sync;
-#endif /* defined(GRPC_TSAN) */
-
-static void eps_add_ref(epoll_set *eps);
-static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps);
-
-#ifndef NDEBUG
-static void eps_add_ref_dbg(epoll_set *eps, const char *reason,
- const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
- gpr_log(GPR_DEBUG, "Add ref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- eps, old_cnt, old_cnt + 1, reason, file, line);
- }
- eps_add_ref(eps);
-}
-
-static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count);
- gpr_log(GPR_DEBUG, "Unref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
- eps, old_cnt, (old_cnt - 1), reason, file, line);
- }
- eps_unref(exec_ctx, eps);
-}
-#endif
-
-static void eps_add_ref(epoll_set *eps) {
- gpr_atm_no_barrier_fetch_add(&eps->ref_count, 1);
-}
-
-static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps) {
- /* If ref count went to zero, delete the epoll set. This deletion is
- not done under a lock since once the ref count goes to zero, we are
- guaranteed that no one else holds a reference to the epoll set (and
- that there is no racing eps_add_ref() call either).*/
- if (1 == gpr_atm_full_fetch_add(&eps->ref_count, -1)) {
- epoll_set_delete(eps);
- }
-}
-
-static void epoll_set_add_fd_locked(epoll_set *eps, grpc_fd *fd,
- grpc_error **error) {
- int err;
- struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "epoll_set_add_fd_locked";
-
-#ifdef GRPC_TSAN
- /* See the definition of g_epoll_sync for more context */
- gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
-#endif /* defined(GRPC_TSAN) */
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
- ev.data.ptr = fd;
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
- eps->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-static void epoll_set_add_wakeup_fd_locked(epoll_set *eps,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
- struct epoll_event ev;
- int err;
- char *err_msg;
- const char *err_desc = "epoll_set_add_wakeup_fd";
-
- ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = wakeup_fd;
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD,
- GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
- if (err < 0 && errno != EEXIST) {
- gpr_asprintf(&err_msg,
- "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
- "error: %d (%s)",
- eps->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno,
- strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
-}
-
-static void epoll_set_remove_fd(epoll_set *eps, grpc_fd *fd, bool is_fd_closed,
- grpc_error **error) {
- int err;
- char *err_msg;
- const char *err_desc = "epoll_set_remove_fd";
-
- /* If fd is already closed, then it would have been automatically been removed
- from the epoll set */
- if (!is_fd_closed) {
- err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
- if (err < 0 && errno != ENOENT) {
- gpr_asprintf(
- &err_msg,
- "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
- eps->epoll_fd, fd->fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- gpr_free(err_msg);
- }
- }
-}
-
-/* Might return NULL in case of an error */
-static epoll_set *epoll_set_create(grpc_error **error) {
- epoll_set *eps = NULL;
- const char *err_desc = "epoll_set_create";
-
- *error = GRPC_ERROR_NONE;
-
- eps = gpr_malloc(sizeof(*eps));
- eps->epoll_fd = -1;
-
- gpr_mu_init(&eps->mu);
-
- gpr_atm_rel_store(&eps->ref_count, 0);
- gpr_atm_rel_store(&eps->poller_count, 0);
-
- gpr_atm_rel_store(&eps->is_shutdown, false);
-
- eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-
- if (eps->epoll_fd < 0) {
- append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
- goto done;
- }
-
-done:
- if (*error != GRPC_ERROR_NONE) {
- epoll_set_delete(eps);
- eps = NULL;
- }
- return eps;
-}
-
-static void epoll_set_delete(epoll_set *eps) {
- if (eps->epoll_fd >= 0) {
- close(eps->epoll_fd);
- }
-
- gpr_mu_destroy(&eps->mu);
-
- gpr_free(eps);
-}
-
-static grpc_error *epoll_set_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
-
- error = grpc_wakeup_fd_init(&epoll_set_wakeup_fd);
- if (error == GRPC_ERROR_NONE) {
- error = grpc_wakeup_fd_wakeup(&epoll_set_wakeup_fd);
- }
-
- return error;
-}
-
-static void epoll_set_global_shutdown() {
- grpc_wakeup_fd_destroy(&epoll_set_wakeup_fd);
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-static grpc_fd *get_fd_from_freelist() {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
- return new_fd;
-}
-
-static void add_fd_to_freelist(grpc_fd *fd) {
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
- grpc_iomgr_unregister_object(&fd->iomgr_object);
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
-}
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_mu_destroy(&fd->mu);
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = get_fd_from_freelist();
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- gpr_mu_init(&new_fd->mu);
- }
-
- /* Note: It is not really needed to get the new_fd->mu lock here. If this
- * is a newly created fd (or an fd we got from the freelist), no one else
- * would be holding a lock to it anyway. */
- gpr_mu_lock(&new_fd->mu);
- new_fd->eps = NULL;
-
- new_fd->fd = fd;
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- gpr_mu_unlock(&new_fd->mu);
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
- gpr_free(fd_name);
-
- /* Associate the fd with one of the eps */
- add_fd_to_eps(new_fd);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- bool is_fd_closed = already_closed;
- grpc_error *error = GRPC_ERROR_NONE;
- epoll_set *unref_eps = NULL;
-
- gpr_mu_lock(&fd->mu);
- fd->on_done_closure = on_done;
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else if (!is_fd_closed) {
- close(fd->fd);
- is_fd_closed = true;
- }
-
- fd->orphaned = true;
-
- /* Remove the fd from the epoll set */
- if (fd->eps != NULL) {
- epoll_set_remove_fd(fd->eps, fd, is_fd_closed, &error);
- unref_eps = fd->eps;
- fd->eps = NULL;
- }
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->mu);
-
- /* We are done with this fd. Release it (i.e add back to freelist) */
- add_fd_to_freelist(fd);
-
- if (unref_eps != NULL) {
- /* Unref stale epoll set here, outside the fd lock above.
- The epoll set owns a workqueue which owns an fd, and unreffing
- inside the lock can cause an eventual lock loop that makes TSAN very
- unhappy. */
- EPS_UNREF(exec_ctx, unref_eps, "fd_orphan");
- }
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-/* This polling engine doesn't really need the read notifier functionality. So
- * it just returns a dummy read notifier pollset */
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- return &g_read_notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-/* TODO: sreek - Not needed anymore */
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-
-static void pollset_worker_init(grpc_pollset_worker *worker) {
- worker->next = worker->prev = NULL;
- gpr_cv_init(&worker->kick_cv);
-}
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- gpr_cv_signal(&worker->kick_cv);
- return GRPC_ERROR_NONE;
-}
-
-/* Return 1 if the pollset has active threads in pollset_work (pollset must
- * be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
- return p->root_worker.next != &p->root_worker;
-}
-
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev->next = worker->next;
- worker->next->prev = worker->prev;
-}
-
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
- if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
- remove_worker(p, w);
- return w;
- } else {
- return NULL;
- }
-}
-
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->next = &p->root_worker;
- worker->prev = worker->next->prev;
- worker->prev->next = worker->next->prev = worker;
-}
-
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
- worker->prev = &p->root_worker;
- worker->next = worker->prev->next;
- worker->prev->next = worker->next->prev = worker;
-}
-
-/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
- GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
- if (worker != NULL) {
- if (worker == GRPC_POLLSET_KICK_BROADCAST) {
- if (pollset_has_workers(p)) {
- GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
- for (worker = p->root_worker.next; worker != &p->root_worker;
- worker = worker->next) {
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- GPR_TIMER_END("pollset_kick.broadcast", 0);
- } else {
- p->kicked_without_pollers = true;
- }
- } else {
- GPR_TIMER_MARK("kicked_specifically", 0);
- if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
- append_error(&error, pollset_worker_kick(worker), err_desc);
- }
- }
- } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
- /* Since worker == NULL, it means that we can kick "any" worker on this
- pollset 'p'. If 'p' happens to be the same pollset this thread is
- currently polling (i.e in pollset_work() function), then there is no need
- to kick any other worker since the current thread can just absorb the
- kick. This is the reason why we enter this case only when
- g_current_thread_pollset is != p */
-
- GPR_TIMER_MARK("kick_anonymous", 0);
- worker = pop_front_worker(p);
- if (worker != NULL) {
- GPR_TIMER_MARK("finally_kick", 0);
- push_back_worker(p, worker);
- append_error(&error, pollset_worker_kick(worker), err_desc);
- } else {
- GPR_TIMER_MARK("kicked_no_pollers", 0);
- p->kicked_without_pollers = true;
- }
- }
-
- GPR_TIMER_END("pollset_kick", 0);
- GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- gpr_mu_init(&pollset->mu);
- *mu = &pollset->mu;
- pollset->eps = NULL;
-
- pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
- pollset->kicked_without_pollers = false;
-
- pollset->shutting_down = false;
- pollset->finish_shutdown_called = false;
- pollset->shutdown_done = NULL;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
- char *reason) {
- if (ps->eps != NULL) {
- EPS_UNREF(exec_ctx, ps->eps, reason);
- }
- ps->eps = NULL;
-}
-
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- /* The pollset cannot have any workers if we are at this stage */
- GPR_ASSERT(!pollset_has_workers(pollset));
-
- pollset->finish_shutdown_called = true;
-
- /* Release the ref and set pollset->eps to NULL */
- pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown");
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
-}
-
-/* pollset->mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_TIMER_BEGIN("pollset_shutdown", 0);
- GPR_ASSERT(!pollset->shutting_down);
- pollset->shutting_down = true;
- pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
-
- /* If the pollset has any workers, we cannot call finish_shutdown_locked()
- because it would release the underlying epoll set. In such a case, we
- let the last worker call finish_shutdown_locked() from pollset_work() */
- if (!pollset_has_workers(pollset)) {
- GPR_ASSERT(!pollset->finish_shutdown_called);
- GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
- }
- GPR_TIMER_END("pollset_shutdown", 0);
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
- * than destroying the mutexes, there is nothing special that needs to be done
- * here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- GPR_ASSERT(!pollset_has_workers(pollset));
- gpr_mu_destroy(&pollset->mu);
-}
-
-/* Blocking call */
-static void acquire_epoll_lease(epoll_set *eps) {
- if (g_num_threads_per_eps > 1) {
- gpr_mu_lock(&eps->mu);
- }
-}
-
-static void release_epoll_lease(epoll_set *eps) {
- if (g_num_threads_per_eps > 1) {
- gpr_mu_unlock(&eps->mu);
- }
-}
-
-#define GRPC_EPOLL_MAX_EVENTS 100
-static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps,
- grpc_error **error) {
- struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
- int ep_rv;
- char *err_msg;
- const char *err_desc = "do_epoll_wait";
-
- int timeout_ms = -1;
-
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- acquire_epoll_lease(eps);
- GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms);
- release_epoll_lease(eps);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
-
- if (ep_rv < 0) {
- gpr_asprintf(&err_msg,
- "epoll_wait() epoll fd: %d failed with error: %d (%s)",
- epoll_fd, errno, strerror(errno));
- append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
- }
-
-#ifdef GRPC_TSAN
- /* See the definition of g_poll_sync for more details */
- gpr_atm_acq_load(&g_epoll_sync);
-#endif /* defined(GRPC_TSAN) */
-
- for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
- if (data_ptr == &epoll_set_wakeup_fd) {
- gpr_atm_rel_store(&eps->is_shutdown, 1);
- gpr_log(GPR_INFO, "pollset poller: shutdown set");
- } else {
- grpc_fd *fd = data_ptr;
- int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
- int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
- int write_ev = ep_ev[i].events & EPOLLOUT;
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-}
-
-static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps,
- grpc_error **error) {
- int epoll_fd = -1;
- GPR_TIMER_BEGIN("epoll_set_work", 0);
-
- /* Since epoll_fd is immutable, it is safe to read it without a lock on the
- epoll set. */
- epoll_fd = eps->epoll_fd;
-
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1);
- g_current_thread_epoll_set = eps;
-
- do_epoll_wait(exec_ctx, epoll_fd, eps, error);
-
- g_current_thread_epoll_set = NULL;
- gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1);
-
- GPR_TIMER_END("epoll_set_work", 0);
-}
-
-/* pollset->mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
-
- grpc_pollset_worker worker;
- pollset_worker_init(&worker);
-
- if (worker_hdl) *worker_hdl = &worker;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
-
- if (pollset->kicked_without_pollers) {
- /* If the pollset was kicked without pollers, pretend that the current
- worker got the kick and skip polling. A kick indicates that there is some
- work that needs attention like an event on the completion queue or an
- alarm */
- GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
- pollset->kicked_without_pollers = 0;
- } else if (!pollset->shutting_down) {
- push_front_worker(pollset, &worker);
-
- gpr_cv_wait(&worker.kick_cv, &pollset->mu,
- gpr_convert_clock_type(deadline, GPR_CLOCK_REALTIME));
- /* pollset->mu locked here */
-
- remove_worker(pollset, &worker);
- }
-
- /* If we are the last worker on the pollset (i.e pollset_has_workers() is
- false at this point) and the pollset is shutting down, we may have to
- finish the shutdown process by calling finish_shutdown_locked().
- See pollset_shutdown() for more details.
-
- Note: Continuing to access pollset here is safe; it is the caller's
- responsibility to not destroy a pollset when it has outstanding calls to
- pollset_work() */
- if (pollset->shutting_down && !pollset_has_workers(pollset) &&
- !pollset->finish_shutdown_called) {
- GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
- finish_shutdown_locked(exec_ctx, pollset);
-
- gpr_mu_unlock(&pollset->mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->mu);
- }
-
- if (worker_hdl) *worker_hdl = NULL;
-
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
-
- GPR_TIMER_END("pollset_work", 0);
-
- GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
- return error;
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-grpc_pollset_set g_dummy_pollset_set;
-static grpc_pollset_set *pollset_set_create(void) {
- return &g_dummy_pollset_set;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- /* Nothing to do */
-}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- /* Nothing to do */
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- shutdown_poller_threads();
- shutdown_epoll_sets();
- fd_global_shutdown();
- pollset_global_shutdown();
- epoll_set_global_shutdown();
- gpr_log(GPR_INFO, "ev-epoll-threadpool engine shutdown complete");
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-/*****************************************************************************
- * Dedicated polling threads and pollsets - Definitions
- */
-static void add_fd_to_eps(grpc_fd *fd) {
- GPR_ASSERT(fd->eps == NULL);
- GPR_TIMER_BEGIN("add_fd_to_eps", 0);
-
- grpc_error *error = GRPC_ERROR_NONE;
- size_t idx = (size_t)gpr_atm_no_barrier_fetch_add(&g_next_eps, 1) % g_num_eps;
- epoll_set *eps = g_epoll_sets[idx];
-
- gpr_mu_lock(&fd->mu);
-
- if (fd->orphaned) {
- gpr_mu_unlock(&fd->mu);
- return; /* Early out */
- }
-
- epoll_set_add_fd_locked(eps, fd, &error);
- EPS_ADD_REF(eps, "fd");
- fd->eps = eps;
-
- GRPC_POLLING_TRACE("add_fd_to_eps (fd: %d, eps idx = %" PRIdPTR ")", fd->fd,
- idx);
- gpr_mu_unlock(&fd->mu);
-
- GRPC_LOG_IF_ERROR("add_fd_to_eps", error);
- GPR_TIMER_END("add_fd_to_eps", 0);
-}
-
-static bool init_epoll_sets() {
- grpc_error *error = GRPC_ERROR_NONE;
- bool is_success = true;
-
- g_epoll_sets = (epoll_set **)malloc(g_num_eps * sizeof(epoll_set *));
-
- for (size_t i = 0; i < g_num_eps; i++) {
- g_epoll_sets[i] = epoll_set_create(&error);
- if (g_epoll_sets[i] == NULL) {
- gpr_log(GPR_ERROR, "Error in creating a epoll set");
- g_num_eps = i; /* Helps cleanup */
- shutdown_epoll_sets();
- is_success = false;
- goto done;
- }
-
- EPS_ADD_REF(g_epoll_sets[i], "init_epoll_sets");
- }
-
- gpr_atm_no_barrier_store(&g_next_eps, 0);
- gpr_mu *mu;
- pollset_init(&g_read_notifier, &mu);
-
-done:
- GRPC_LOG_IF_ERROR("init_epoll_sets", error);
- return is_success;
-}
-
-static void shutdown_epoll_sets() {
- if (!g_epoll_sets) {
- return;
- }
-
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- for (size_t i = 0; i < g_num_eps; i++) {
- EPS_UNREF(&exec_ctx, g_epoll_sets[i], "shutdown_epoll_sets");
- }
- grpc_exec_ctx_flush(&exec_ctx);
-
- gpr_free(g_epoll_sets);
- g_epoll_sets = NULL;
- pollset_destroy(&exec_ctx, &g_read_notifier);
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void poller_thread_loop(void *arg) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *error = GRPC_ERROR_NONE;
- epoll_set *eps = (epoll_set *)arg;
-
- while (!gpr_atm_acq_load(&eps->is_shutdown)) {
- epoll_set_work(&exec_ctx, eps, &error);
- grpc_exec_ctx_flush(&exec_ctx);
- }
-
- grpc_exec_ctx_finish(&exec_ctx);
- GRPC_LOG_IF_ERROR("poller_thread_loop", error);
-}
-
-/* g_epoll_sets MUST be initialized before calling this */
-static void start_poller_threads() {
- GPR_ASSERT(g_epoll_sets);
-
- gpr_log(GPR_INFO, "Starting poller threads");
-
- size_t num_threads = g_num_eps * g_num_threads_per_eps;
- g_poller_threads = (gpr_thd_id *)malloc(num_threads * sizeof(gpr_thd_id));
- gpr_thd_options options = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&options);
-
- for (size_t i = 0; i < num_threads; i++) {
- gpr_thd_new(&g_poller_threads[i], poller_thread_loop,
- (void *)g_epoll_sets[i % g_num_eps], &options);
- }
-}
-
-static void shutdown_poller_threads() {
- GPR_ASSERT(g_poller_threads);
- GPR_ASSERT(g_epoll_sets);
- grpc_error *error = GRPC_ERROR_NONE;
-
- gpr_log(GPR_INFO, "Shutting down pollers");
-
- epoll_set *eps = NULL;
- size_t num_threads = g_num_eps * g_num_threads_per_eps;
- for (size_t i = 0; i < num_threads; i++) {
- eps = g_epoll_sets[i];
- epoll_set_add_wakeup_fd_locked(eps, &epoll_set_wakeup_fd, &error);
- }
-
- for (size_t i = 0; i < g_num_eps; i++) {
- gpr_thd_join(g_poller_threads[i]);
- }
-
- GRPC_LOG_IF_ERROR("shutdown_poller_threads", error);
- gpr_free(g_poller_threads);
- g_poller_threads = NULL;
-}
-
-/****************************************************************************/
-
-/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
- * Create a dummy epoll_fd to make sure epoll support is available */
-static bool is_epoll_available() {
- int fd = epoll_create1(EPOLL_CLOEXEC);
- if (fd < 0) {
- gpr_log(
- GPR_ERROR,
- "epoll_create1 failed with error: %d. Not using epoll polling engine",
- fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly) {
- if (!requested_explicitly) return NULL;
-
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!is_epoll_available()) {
- return NULL;
- }
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- return NULL;
- }
-
- if (!GRPC_LOG_IF_ERROR("epoll_set_global_init", epoll_set_global_init())) {
- return NULL;
- }
-
- if (!init_epoll_sets()) {
- return NULL;
- }
-
- /* TODO (sreek): Maynot be a good idea to start threads here (especially if
- * this engine doesn't get picked. Consider introducing an engine_init
- * function in the vtable */
- start_poller_threads();
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
- bool requested_explicitly) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c
deleted file mode 100644
index 0e42f76af3..0000000000
--- a/src/core/lib/iomgr/ev_epollex_linux.c
+++ /dev/null
@@ -1,1450 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-
-/* This polling engine is only relevant on linux kernels supporting epoll() */
-#ifdef GRPC_LINUX_EPOLL
-
-#include "src/core/lib/iomgr/ev_epollex_linux.h"
-
-#include <assert.h>
-#include <errno.h>
-#include <poll.h>
-#include <pthread.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/debug/stats.h"
-#include "src/core/lib/iomgr/ev_posix.h"
-#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/is_epollexclusive_available.h"
-#include "src/core/lib/iomgr/lockfree_event.h"
-#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
-#include "src/core/lib/iomgr/timer.h"
-#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
-#include "src/core/lib/support/spinlock.h"
-
-/*******************************************************************************
- * Polling object
- */
-
-typedef enum {
- PO_POLLING_GROUP,
- PO_POLLSET_SET,
- PO_POLLSET,
- PO_FD, /* ordering is important: we always want to lock pollsets before fds:
- this guarantees that using an fd as a pollable is safe */
- PO_EMPTY_POLLABLE,
- PO_COUNT
-} polling_obj_type;
-
-typedef struct polling_obj polling_obj;
-typedef struct polling_group polling_group;
-
-struct polling_obj {
- gpr_mu mu;
- polling_obj_type type;
- polling_group *group;
- struct polling_obj *next;
- struct polling_obj *prev;
-};
-
-struct polling_group {
- polling_obj po;
- gpr_refcount refs;
-};
-
-static void po_init(polling_obj *po, polling_obj_type type);
-static void po_destroy(polling_obj *po);
-static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b);
-static int po_cmp(polling_obj *a, polling_obj *b);
-
-static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
- size_t initial_po_count);
-static polling_group *pg_ref(polling_group *pg);
-static void pg_unref(polling_group *pg);
-static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
- polling_group *b);
-static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
- polling_obj *po);
-
-/*******************************************************************************
- * pollable Declarations
- */
-
-typedef struct pollable {
- polling_obj po;
- int epfd;
- grpc_wakeup_fd wakeup;
- grpc_pollset_worker *root_worker;
-} pollable;
-
-static const char *polling_obj_type_string(polling_obj_type t) {
- switch (t) {
- case PO_POLLING_GROUP:
- return "polling_group";
- case PO_POLLSET_SET:
- return "pollset_set";
- case PO_POLLSET:
- return "pollset";
- case PO_FD:
- return "fd";
- case PO_EMPTY_POLLABLE:
- return "empty_pollable";
- case PO_COUNT:
- return "<invalid:count>";
- }
- return "<invalid>";
-}
-
-static char *pollable_desc(pollable *p) {
- char *out;
- gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d",
- polling_obj_type_string(p->po.type), p->po.group, p->epfd,
- p->wakeup.read_fd);
- return out;
-}
-
-static pollable g_empty_pollable;
-
-static void pollable_init(pollable *p, polling_obj_type type);
-static void pollable_destroy(pollable *p);
-/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
-static grpc_error *pollable_materialize(pollable *p);
-
-/*******************************************************************************
- * Fd Declarations
- */
-
-struct grpc_fd {
- pollable pollable;
- int fd;
- /* refst format:
- bit 0 : 1=Active / 0=Orphaned
- bits 1-n : refcount
- Ref/Unref by two to avoid altering the orphaned bit */
- gpr_atm refst;
-
- /* The fd is either closed or we relinquished control of it. In either
- cases, this indicates that the 'fd' on this structure is no longer
- valid */
- gpr_mu orphaned_mu;
- bool orphaned;
-
- gpr_atm read_closure;
- gpr_atm write_closure;
-
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
-
- /* The pollset that last noticed that the fd is readable. The actual type
- * stored in this is (grpc_pollset *) */
- gpr_atm read_notifier_pollset;
-
- grpc_iomgr_object iomgr_object;
-};
-
-static void fd_global_init(void);
-static void fd_global_shutdown(void);
-
-/*******************************************************************************
- * Pollset Declarations
- */
-
-typedef struct pollset_worker_link {
- grpc_pollset_worker *next;
- grpc_pollset_worker *prev;
-} pollset_worker_link;
-
-typedef enum {
- PWL_POLLSET,
- PWL_POLLABLE,
- POLLSET_WORKER_LINK_COUNT
-} pollset_worker_links;
-
-struct grpc_pollset_worker {
- bool kicked;
- bool initialized_cv;
- pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
- gpr_cv cv;
- grpc_pollset *pollset;
- pollable *pollable;
-};
-
-#define MAX_EPOLL_EVENTS 100
-#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
-
-struct grpc_pollset {
- pollable pollable;
- pollable *current_pollable;
- int kick_alls_pending;
- bool kicked_without_poller;
- grpc_closure *shutdown_closure;
- grpc_pollset_worker *root_worker;
-
- int event_cursor;
- int event_count;
- struct epoll_event events[MAX_EPOLL_EVENTS];
-};
-
-/*******************************************************************************
- * Pollset-set Declarations
- */
-struct grpc_pollset_set {
- polling_obj po;
-};
-
-/*******************************************************************************
- * Common helpers
- */
-
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
- if (error == GRPC_ERROR_NONE) return true;
- if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
- }
- *composite = grpc_error_add_child(*composite, error);
- return false;
-}
-
-/*******************************************************************************
- * Fd Definitions
- */
-
-/* We need to keep a freelist not because of any concerns of malloc performance
- * but instead so that implementations with multiple threads in (for example)
- * epoll_wait deal with the race between pollset removal and incoming poll
- * notifications.
- *
- * The problem is that the poller ultimately holds a reference to this
- * object, so it is very difficult to know when is safe to free it, at least
- * without some expensive synchronization.
- *
- * If we keep the object freelisted, in the worst case losing this race just
- * becomes a spurious read notification on a reused fd.
- */
-
-/* The alarm system needs to be able to wakeup 'some poller' sometimes
- * (specifically when a new alarm needs to be triggered earlier than the next
- * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
- * case occurs. */
-
-static grpc_fd *fd_freelist = NULL;
-static gpr_mu fd_freelist_mu;
-
-#ifndef NDEBUG
-#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(ec, fd, n, reason) \
- unref_by(ec, fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
- int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
- }
-#else
-#define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
-static void ref_by(grpc_fd *fd, int n) {
-#endif
- GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
-}
-
-static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_fd *fd = arg;
- /* Add the fd to the freelist */
- grpc_iomgr_unregister_object(&fd->iomgr_object);
- pollable_destroy(&fd->pollable);
- gpr_mu_destroy(&fd->orphaned_mu);
- gpr_mu_lock(&fd_freelist_mu);
- fd->freelist_next = fd_freelist;
- fd_freelist = fd;
-
- grpc_lfev_destroy(&fd->read_closure);
- grpc_lfev_destroy(&fd->write_closure);
-
- gpr_mu_unlock(&fd_freelist_mu);
-}
-
-#ifndef NDEBUG
-static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
- const char *reason, const char *file, int line) {
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG,
- "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
- fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
- gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
- }
-#else
-static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
-#endif
- gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
- if (old == n) {
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
- } else {
- GPR_ASSERT(old > n);
- }
-}
-
-static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
-
-static void fd_global_shutdown(void) {
- gpr_mu_lock(&fd_freelist_mu);
- gpr_mu_unlock(&fd_freelist_mu);
- while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- gpr_free(fd);
- }
- gpr_mu_destroy(&fd_freelist_mu);
-}
-
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
-
- gpr_mu_lock(&fd_freelist_mu);
- if (fd_freelist != NULL) {
- new_fd = fd_freelist;
- fd_freelist = fd_freelist->freelist_next;
- }
- gpr_mu_unlock(&fd_freelist_mu);
-
- if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
- }
-
- pollable_init(&new_fd->pollable, PO_FD);
-
- gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
- new_fd->fd = fd;
- gpr_mu_init(&new_fd->orphaned_mu);
- new_fd->orphaned = false;
- grpc_lfev_init(&new_fd->read_closure);
- grpc_lfev_init(&new_fd->write_closure);
- gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
-
- new_fd->freelist_next = NULL;
- new_fd->on_done_closure = NULL;
-
- char *fd_name;
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifndef NDEBUG
- if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
- }
-#endif
- gpr_free(fd_name);
- return new_fd;
-}
-
-static int fd_wrapped_fd(grpc_fd *fd) {
- int ret_fd = -1;
- gpr_mu_lock(&fd->orphaned_mu);
- if (!fd->orphaned) {
- ret_fd = fd->fd;
- }
- gpr_mu_unlock(&fd->orphaned_mu);
-
- return ret_fd;
-}
-
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- bool is_fd_closed = already_closed;
- grpc_error *error = GRPC_ERROR_NONE;
-
- gpr_mu_lock(&fd->pollable.po.mu);
- gpr_mu_lock(&fd->orphaned_mu);
- fd->on_done_closure = on_done;
-
- /* If release_fd is not NULL, we should be relinquishing control of the file
- descriptor fd->fd (but we still own the grpc_fd structure). */
- if (release_fd != NULL) {
- *release_fd = fd->fd;
- } else if (!is_fd_closed) {
- close(fd->fd);
- is_fd_closed = true;
- }
-
- fd->orphaned = true;
-
- if (!is_fd_closed) {
- gpr_log(GPR_DEBUG, "TODO: handle fd removal?");
- }
-
- /* Remove the active status but keep referenced. We want this grpc_fd struct
- to be alive (and not added to freelist) until the end of this function */
- REF_BY(fd, 1, reason);
-
- GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
-
- gpr_mu_unlock(&fd->orphaned_mu);
- gpr_mu_unlock(&fd->pollable.po.mu);
- UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
- GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
- GRPC_ERROR_UNREF(error);
-}
-
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
-}
-
-static bool fd_is_shutdown(grpc_fd *fd) {
- return grpc_lfev_is_shutdown(&fd->read_closure);
-}
-
-/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
- if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
- GRPC_ERROR_REF(why))) {
- shutdown(fd->fd, SHUT_RDWR);
- grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
- }
- GRPC_ERROR_UNREF(why);
-}
-
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
-}
-
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
- grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
-}
-
-/*******************************************************************************
- * Pollable Definitions
- */
-
-static void pollable_init(pollable *p, polling_obj_type type) {
- po_init(&p->po, type);
- p->root_worker = NULL;
- p->epfd = -1;
-}
-
-static void pollable_destroy(pollable *p) {
- po_destroy(&p->po);
- if (p->epfd != -1) {
- close(p->epfd);
- grpc_wakeup_fd_destroy(&p->wakeup);
- }
-}
-
-/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
-static grpc_error *pollable_materialize(pollable *p) {
- if (p->epfd == -1) {
- int new_epfd = epoll_create1(EPOLL_CLOEXEC);
- if (new_epfd < 0) {
- return GRPC_OS_ERROR(errno, "epoll_create1");
- }
- grpc_error *err = grpc_wakeup_fd_init(&p->wakeup);
- if (err != GRPC_ERROR_NONE) {
- close(new_epfd);
- return err;
- }
- struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
- .data.ptr = (void *)(1 | (intptr_t)&p->wakeup)};
- if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
- err = GRPC_OS_ERROR(errno, "epoll_ctl");
- close(new_epfd);
- grpc_wakeup_fd_destroy(&p->wakeup);
- return err;
- }
-
- p->epfd = new_epfd;
- }
- return GRPC_ERROR_NONE;
-}
-
-/* pollable must be materialized */
-static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollable_add_fd";
- const int epfd = p->epfd;
- GPR_ASSERT(epfd != -1);
-
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
- }
-
- gpr_mu_lock(&fd->orphaned_mu);
- if (fd->orphaned) {
- gpr_mu_unlock(&fd->orphaned_mu);
- return GRPC_ERROR_NONE;
- }
- struct epoll_event ev_fd = {
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE),
- .data.ptr = fd};
- if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
- switch (errno) {
- case EEXIST:
- break;
- default:
- append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
- }
- }
- gpr_mu_unlock(&fd->orphaned_mu);
-
- return error;
-}
-
-/*******************************************************************************
- * Pollset Definitions
- */
-
-GPR_TLS_DECL(g_current_thread_pollset);
-GPR_TLS_DECL(g_current_thread_worker);
-
-/* Global state management */
-static grpc_error *pollset_global_init(void) {
- gpr_tls_init(&g_current_thread_pollset);
- gpr_tls_init(&g_current_thread_worker);
- pollable_init(&g_empty_pollable, PO_EMPTY_POLLABLE);
- return GRPC_ERROR_NONE;
-}
-
-static void pollset_global_shutdown(void) {
- pollable_destroy(&g_empty_pollable);
- gpr_tls_destroy(&g_current_thread_pollset);
- gpr_tls_destroy(&g_current_thread_worker);
-}
-
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
- pollset->kick_alls_pending == 0) {
- GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
- pollset->shutdown_closure = NULL;
- }
-}
-
-static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_unused) {
- grpc_error *error = GRPC_ERROR_NONE;
- grpc_pollset *pollset = arg;
- gpr_mu_lock(&pollset->pollable.po.mu);
- if (pollset->root_worker != NULL) {
- grpc_pollset_worker *worker = pollset->root_worker;
- do {
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_lock(&worker->pollable->po.mu);
- }
- if (worker->initialized_cv && worker != pollset->root_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)",
- pollset, worker, &pollset->pollable, worker->pollable);
- }
- worker->kicked = true;
- gpr_cv_signal(&worker->cv);
- } else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)",
- pollset, worker, &pollset->pollable, worker->pollable);
- }
- append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
- "pollset_shutdown");
- }
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker->pollable->po.mu);
- }
-
- worker = worker->links[PWL_POLLSET].next;
- } while (worker != pollset->root_worker);
- }
- pollset->kick_alls_pending--;
- pollset_maybe_finish_shutdown(exec_ctx, pollset);
- gpr_mu_unlock(&pollset->pollable.po.mu);
- GRPC_LOG_IF_ERROR("kick_all", error);
-}
-
-static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- pollset->kick_alls_pending++;
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(do_kick_all, pollset,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
-}
-
-static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
- grpc_pollset_worker *specific_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG,
- "PS:%p kick %p tls_pollset=%p tls_worker=%p "
- "root_worker=(pollset:%p pollable:%p)",
- p, specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset),
- (void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker,
- p->root_worker);
- }
- if (specific_worker == NULL) {
- if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
- if (pollset->root_worker == NULL) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", p);
- }
- pollset->kicked_without_poller = true;
- return GRPC_ERROR_NONE;
- } else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_any_via_wakeup_fd", p);
- }
- grpc_error *err = pollable_materialize(p);
- if (err != GRPC_ERROR_NONE) return err;
- return grpc_wakeup_fd_wakeup(&p->wakeup);
- }
- } else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", p);
- }
- return GRPC_ERROR_NONE;
- }
- } else if (specific_worker->kicked) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
- }
- return GRPC_ERROR_NONE;
- } else if (gpr_tls_get(&g_current_thread_worker) ==
- (intptr_t)specific_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
- }
- specific_worker->kicked = true;
- return GRPC_ERROR_NONE;
- } else if (specific_worker == p->root_worker) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
- }
- grpc_error *err = pollable_materialize(p);
- if (err != GRPC_ERROR_NONE) return err;
- specific_worker->kicked = true;
- return grpc_wakeup_fd_wakeup(&p->wakeup);
- } else {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
- }
- specific_worker->kicked = true;
- gpr_cv_signal(&specific_worker->cv);
- return GRPC_ERROR_NONE;
- }
-}
-
-/* p->po.mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker) {
- pollable *p = pollset->current_pollable;
- if (p != &pollset->pollable) {
- gpr_mu_lock(&p->po.mu);
- }
- grpc_error *error = pollset_kick_inner(pollset, p, specific_worker);
- if (p != &pollset->pollable) {
- gpr_mu_unlock(&p->po.mu);
- }
- return error;
-}
-
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
- pollable_init(&pollset->pollable, PO_POLLSET);
- pollset->current_pollable = &g_empty_pollable;
- pollset->kicked_without_poller = false;
- pollset->shutdown_closure = NULL;
- pollset->root_worker = NULL;
- *mu = &pollset->pollable.po.mu;
-}
-
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, now) <= 0) {
- return 0;
- }
-
- static const gpr_timespec round_up = {
- .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
- return millis >= 1 ? millis : 1;
-}
-
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
- grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
-
- /* Note, it is possible that fd_become_readable might be called twice with
- different 'notifier's when an fd becomes readable and it is in two epoll
- sets (This can happen briefly during polling island merges). In such cases
- it does not really matter which notifer is set as the read_notifier_pollset
- (They would both point to the same polling island anyway) */
- /* Use release store to match with acquire load in fd_get_read_notifier */
- gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
-}
-
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
-}
-
-static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "fd_become_pollable";
- if (append_error(&error, pollable_materialize(&fd->pollable), err_desc)) {
- append_error(&error, pollable_add_fd(&fd->pollable, fd), err_desc);
- }
- return error;
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
- GPR_ASSERT(pollset->shutdown_closure == NULL);
- pollset->shutdown_closure = closure;
- pollset_kick_all(exec_ctx, pollset);
- pollset_maybe_finish_shutdown(exec_ctx, pollset);
-}
-
-static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
- return p != &g_empty_pollable && p != &pollset->pollable;
-}
-
-static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset, bool drain) {
- static const char *err_desc = "pollset_process_events";
- grpc_error *error = GRPC_ERROR_NONE;
- for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
- pollset->event_cursor != pollset->event_count;
- i++) {
- int n = pollset->event_cursor++;
- struct epoll_event *ev = &pollset->events[n];
- void *data_ptr = ev->data.ptr;
- if (1 & (intptr_t)data_ptr) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
- }
- append_error(&error, grpc_wakeup_fd_consume_wakeup(
- (void *)((~(intptr_t)1) & (intptr_t)data_ptr)),
- err_desc);
- } else {
- grpc_fd *fd = (grpc_fd *)data_ptr;
- bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
- bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
- bool write_ev = (ev->events & EPOLLOUT) != 0;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG,
- "PS:%p got fd %p: cancel=%d read=%d "
- "write=%d",
- pollset, fd, cancel, read_ev, write_ev);
- }
- if (read_ev || cancel) {
- fd_become_readable(exec_ctx, fd, pollset);
- }
- if (write_ev || cancel) {
- fd_become_writable(exec_ctx, fd);
- }
- }
- }
-
- return error;
-}
-
-/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
- pollable_destroy(&pollset->pollable);
- if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) {
- UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
- "pollset_pollable");
- }
- GRPC_LOG_IF_ERROR("pollset_process_events",
- pollset_process_events(exec_ctx, pollset, true));
-}
-
-static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- pollable *p, gpr_timespec now,
- gpr_timespec deadline) {
- int timeout = poll_deadline_to_millis_timeout(deadline, now);
-
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- char *desc = pollable_desc(p);
- gpr_log(GPR_DEBUG, "PS:%p poll %p[%s] for %dms", pollset, p, desc, timeout);
- gpr_free(desc);
- }
-
- if (timeout != 0) {
- GRPC_SCHEDULING_START_BLOCKING_REGION;
- }
- int r;
- do {
- GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout);
- } while (r < 0 && errno == EINTR);
- if (timeout != 0) {
- GRPC_SCHEDULING_END_BLOCKING_REGION;
- }
-
- if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
-
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p poll %p got %d events", pollset, p, r);
- }
-
- pollset->event_cursor = 0;
- pollset->event_count = r;
-
- return GRPC_ERROR_NONE;
-}
-
-/* Return true if first in list */
-static bool worker_insert(grpc_pollset_worker **root, pollset_worker_links link,
- grpc_pollset_worker *worker) {
- if (*root == NULL) {
- *root = worker;
- worker->links[link].next = worker->links[link].prev = worker;
- return true;
- } else {
- worker->links[link].next = *root;
- worker->links[link].prev = worker->links[link].next->links[link].prev;
- worker->links[link].next->links[link].prev = worker;
- worker->links[link].prev->links[link].next = worker;
- return false;
- }
-}
-
-/* Return true if last in list */
-typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
-
-static worker_remove_result worker_remove(grpc_pollset_worker **root,
- pollset_worker_links link,
- grpc_pollset_worker *worker) {
- if (worker == *root) {
- if (worker == worker->links[link].next) {
- *root = NULL;
- return EMPTIED;
- } else {
- *root = worker->links[link].next;
- worker->links[link].prev->links[link].next = worker->links[link].next;
- worker->links[link].next->links[link].prev = worker->links[link].prev;
- return NEW_ROOT;
- }
- } else {
- worker->links[link].prev->links[link].next = worker->links[link].next;
- worker->links[link].next->links[link].prev = worker->links[link].prev;
- return REMOVED;
- }
-}
-
-/* Return true if this thread should poll */
-static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl, gpr_timespec *now,
- gpr_timespec deadline) {
- bool do_poll = true;
- if (worker_hdl != NULL) *worker_hdl = worker;
- worker->initialized_cv = false;
- worker->kicked = false;
- worker->pollset = pollset;
- worker->pollable = pollset->current_pollable;
-
- if (pollset_is_pollable_fd(pollset, worker->pollable)) {
- REF_BY((grpc_fd *)worker->pollable, 2, "one_poll");
- }
-
- worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
- if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
- worker->initialized_cv = true;
- gpr_cv_init(&worker->cv);
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&pollset->pollable.po.mu);
- }
- if (GRPC_TRACER_ON(grpc_polling_trace) &&
- worker->pollable->root_worker != worker) {
- gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
- worker->pollable, worker,
- poll_deadline_to_millis_timeout(deadline, *now));
- }
- while (do_poll && worker->pollable->root_worker != worker) {
- if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
- worker->pollable, worker);
- }
- do_poll = false;
- } else if (worker->kicked) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable,
- worker);
- }
- do_poll = false;
- } else if (GRPC_TRACER_ON(grpc_polling_trace) &&
- worker->pollable->root_worker != worker) {
- gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
- worker->pollable, worker);
- }
- }
- if (worker->pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker->pollable->po.mu);
- gpr_mu_lock(&pollset->pollable.po.mu);
- gpr_mu_lock(&worker->pollable->po.mu);
- }
- *now = gpr_now(now->clock_type);
- }
-
- return do_poll && pollset->shutdown_closure == NULL &&
- pollset->current_pollable == worker->pollable;
-}
-
-static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl) {
- if (NEW_ROOT ==
- worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
- gpr_cv_signal(&worker->pollable->root_worker->cv);
- }
- if (worker->initialized_cv) {
- gpr_cv_destroy(&worker->cv);
- }
- if (pollset_is_pollable_fd(pollset, worker->pollable)) {
- UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
- }
- if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
- pollset_maybe_finish_shutdown(exec_ctx, pollset);
- }
-}
-
-/* pollset->po.mu lock must be held by the caller before calling this.
- The function pollset_work() may temporarily release the lock (pollset->po.mu)
- during the course of its execution but it will always re-acquire the lock and
- ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
- grpc_pollset_worker worker;
- if (0 && GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRId64
- ".%09d deadline=%" PRId64 ".%09d kwp=%d root_worker=%p",
- pollset, worker_hdl, &worker, now.tv_sec, now.tv_nsec,
- deadline.tv_sec, deadline.tv_nsec, pollset->kicked_without_poller,
- pollset->root_worker);
- }
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollset_work";
- if (pollset->kicked_without_poller) {
- pollset->kicked_without_poller = false;
- return GRPC_ERROR_NONE;
- }
- if (pollset->current_pollable != &pollset->pollable) {
- gpr_mu_lock(&pollset->current_pollable->po.mu);
- }
- if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
- gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
- gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
- GPR_ASSERT(!pollset->shutdown_closure);
- append_error(&error, pollable_materialize(worker.pollable), err_desc);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker.pollable->po.mu);
- }
- gpr_mu_unlock(&pollset->pollable.po.mu);
- if (pollset->event_cursor == pollset->event_count) {
- append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable,
- now, deadline),
- err_desc);
- }
- append_error(&error, pollset_process_events(exec_ctx, pollset, false),
- err_desc);
- gpr_mu_lock(&pollset->pollable.po.mu);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_lock(&worker.pollable->po.mu);
- }
- gpr_tls_set(&g_current_thread_pollset, 0);
- gpr_tls_set(&g_current_thread_worker, 0);
- pollset_maybe_finish_shutdown(exec_ctx, pollset);
- }
- end_worker(exec_ctx, pollset, &worker, worker_hdl);
- if (worker.pollable != &pollset->pollable) {
- gpr_mu_unlock(&worker.pollable->po.mu);
- }
- if (grpc_exec_ctx_has_work(exec_ctx)) {
- gpr_mu_unlock(&pollset->pollable.po.mu);
- grpc_exec_ctx_flush(exec_ctx);
- gpr_mu_lock(&pollset->pollable.po.mu);
- }
- return error;
-}
-
-static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_fd *fd = arg;
- UNREF_BY(exec_ctx, fd, 2, "pollset_pollable");
-}
-
-/* expects pollsets locked, flag whether fd is locked or not */
-static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset, grpc_fd *fd,
- bool fd_locked) {
- static const char *err_desc = "pollset_add_fd";
- grpc_error *error = GRPC_ERROR_NONE;
- if (pollset->current_pollable == &g_empty_pollable) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG,
- "PS:%p add fd %p; transition pollable from empty to fd", pollset,
- fd);
- }
- /* empty pollable --> single fd pollable */
- pollset_kick_all(exec_ctx, pollset);
- pollset->current_pollable = &fd->pollable;
- if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);
- append_error(&error, fd_become_pollable_locked(fd), err_desc);
- if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu);
- REF_BY(fd, 2, "pollset_pollable");
- } else if (pollset->current_pollable == &pollset->pollable) {
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
- }
- append_error(&error, pollable_add_fd(pollset->current_pollable, fd),
- err_desc);
- } else if (pollset->current_pollable != &fd->pollable) {
- grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable;
- if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG,
- "PS:%p add fd %p; transition pollable from fd %p to multipoller",
- pollset, fd, had_fd);
- }
- /* Introduce a spurious completion.
- If we do not, then it may be that the fd-specific epoll set consumed
- a completion without being polled, leading to a missed edge going up. */
- grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
- grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
- pollset_kick_all(exec_ctx, pollset);
- pollset->current_pollable = &pollset->pollable;
- if (append_error(&error, pollable_materialize(&pollset->pollable),
- err_desc)) {
- pollable_add_fd(&pollset->pollable, had_fd);
- pollable_add_fd(&pollset->pollable, fd);
- }
- GRPC_CLOSURE_SCHED(exec_ctx,
- GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
- }
- return error;
-}
-
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
- gpr_mu_lock(&pollset->pollable.po.mu);
- grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false);
- gpr_mu_unlock(&pollset->pollable.po.mu);
- GRPC_LOG_IF_ERROR("pollset_add_fd", error);
-}
-
-/*******************************************************************************
- * Pollset-set Definitions
- */
-
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_zalloc(sizeof(*pss));
- po_init(&pss->po, PO_POLLSET_SET);
- return pss;
-}
-
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
- po_destroy(&pss->po);
- gpr_free(pss);
-}
-
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
- po_join(exec_ctx, &pss->po, &fd->pollable.po);
-}
-
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {}
-
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
- po_join(exec_ctx, &pss->po, &ps->pollable.po);
-}
-
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {}
-
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
- po_join(exec_ctx, &bag->po, &item->po);
-}
-
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {}
-
-static void po_init(polling_obj *po, polling_obj_type type) {
- gpr_mu_init(&po->mu);
- po->type = type;
- po->group = NULL;
- po->next = po;
- po->prev = po;
-}
-
-static polling_group *pg_lock_latest(polling_group *pg) {
- /* assumes pg unlocked; consumes ref, returns ref */
- gpr_mu_lock(&pg->po.mu);
- while (pg->po.group != NULL) {
- polling_group *new_pg = pg_ref(pg->po.group);
- gpr_mu_unlock(&pg->po.mu);
- pg_unref(pg);
- pg = new_pg;
- gpr_mu_lock(&pg->po.mu);
- }
- return pg;
-}
-
-static void po_destroy(polling_obj *po) {
- if (po->group != NULL) {
- polling_group *pg = pg_lock_latest(po->group);
- po->prev->next = po->next;
- po->next->prev = po->prev;
- gpr_mu_unlock(&pg->po.mu);
- pg_unref(pg);
- }
- gpr_mu_destroy(&po->mu);
-}
-
-static polling_group *pg_ref(polling_group *pg) {
- gpr_ref(&pg->refs);
- return pg;
-}
-
-static void pg_unref(polling_group *pg) {
- if (gpr_unref(&pg->refs)) {
- po_destroy(&pg->po);
- gpr_free(pg);
- }
-}
-
-static int po_cmp(polling_obj *a, polling_obj *b) {
- if (a == b) return 0;
- if (a->type < b->type) return -1;
- if (a->type > b->type) return 1;
- if (a < b) return -1;
- assert(a > b);
- return 1;
-}
-
-static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) {
- switch (po_cmp(a, b)) {
- case 0:
- return;
- case 1:
- GPR_SWAP(polling_obj *, a, b);
- /* fall through */
- case -1:
- gpr_mu_lock(&a->mu);
- gpr_mu_lock(&b->mu);
-
- if (a->group == NULL) {
- if (b->group == NULL) {
- polling_obj *initial_po[] = {a, b};
- pg_create(exec_ctx, initial_po, GPR_ARRAY_SIZE(initial_po));
- gpr_mu_unlock(&a->mu);
- gpr_mu_unlock(&b->mu);
- } else {
- polling_group *b_group = pg_ref(b->group);
- gpr_mu_unlock(&b->mu);
- gpr_mu_unlock(&a->mu);
- pg_join(exec_ctx, b_group, a);
- }
- } else if (b->group == NULL) {
- polling_group *a_group = pg_ref(a->group);
- gpr_mu_unlock(&a->mu);
- gpr_mu_unlock(&b->mu);
- pg_join(exec_ctx, a_group, b);
- } else if (a->group == b->group) {
- /* nothing to do */
- gpr_mu_unlock(&a->mu);
- gpr_mu_unlock(&b->mu);
- } else {
- polling_group *a_group = pg_ref(a->group);
- polling_group *b_group = pg_ref(b->group);
- gpr_mu_unlock(&a->mu);
- gpr_mu_unlock(&b->mu);
- pg_merge(exec_ctx, a_group, b_group);
- }
- }
-}
-
-static void pg_notify(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) {
- if (a->type == PO_FD && b->type == PO_POLLSET) {
- pollset_add_fd_locked(exec_ctx, (grpc_pollset *)b, (grpc_fd *)a, true);
- } else if (a->type == PO_POLLSET && b->type == PO_FD) {
- pollset_add_fd_locked(exec_ctx, (grpc_pollset *)a, (grpc_fd *)b, true);
- }
-}
-
-static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from,
- polling_group *to) {
- for (polling_obj *a = from->po.next; a != &from->po; a = a->next) {
- for (polling_obj *b = to->po.next; b != &to->po; b = b->next) {
- if (po_cmp(a, b) < 0) {
- gpr_mu_lock(&a->mu);
- gpr_mu_lock(&b->mu);
- } else {
- GPR_ASSERT(po_cmp(a, b) != 0);
- gpr_mu_lock(&b->mu);
- gpr_mu_lock(&a->mu);
- }
- pg_notify(exec_ctx, a, b);
- gpr_mu_unlock(&a->mu);
- gpr_mu_unlock(&b->mu);
- }
- }
-}
-
-static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
- size_t initial_po_count) {
- /* assumes all polling objects in initial_po are locked */
- polling_group *pg = gpr_malloc(sizeof(*pg));
- po_init(&pg->po, PO_POLLING_GROUP);
- gpr_ref_init(&pg->refs, (int)initial_po_count);
- for (size_t i = 0; i < initial_po_count; i++) {
- GPR_ASSERT(initial_po[i]->group == NULL);
- initial_po[i]->group = pg;
- }
- for (size_t i = 1; i < initial_po_count; i++) {
- initial_po[i]->prev = initial_po[i - 1];
- }
- for (size_t i = 0; i < initial_po_count - 1; i++) {
- initial_po[i]->next = initial_po[i + 1];
- }
- initial_po[0]->prev = &pg->po;
- initial_po[initial_po_count - 1]->next = &pg->po;
- pg->po.next = initial_po[0];
- pg->po.prev = initial_po[initial_po_count - 1];
- for (size_t i = 1; i < initial_po_count; i++) {
- for (size_t j = 0; j < i; j++) {
- pg_notify(exec_ctx, initial_po[i], initial_po[j]);
- }
- }
-}
-
-static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
- polling_obj *po) {
- /* assumes neither pg nor po are locked; consumes one ref to pg */
- pg = pg_lock_latest(pg);
- /* pg locked */
- for (polling_obj *existing = pg->po.next /* skip pg - it's just a stub */;
- existing != &pg->po; existing = existing->next) {
- if (po_cmp(po, existing) < 0) {
- gpr_mu_lock(&po->mu);
- gpr_mu_lock(&existing->mu);
- } else {
- GPR_ASSERT(po_cmp(po, existing) != 0);
- gpr_mu_lock(&existing->mu);
- gpr_mu_lock(&po->mu);
- }
- /* pg, po, existing locked */
- if (po->group != NULL) {
- gpr_mu_unlock(&pg->po.mu);
- polling_group *po_group = pg_ref(po->group);
- gpr_mu_unlock(&po->mu);
- gpr_mu_unlock(&existing->mu);
- pg_merge(exec_ctx, pg, po_group);
- /* early exit: polling obj picked up a group during joining: we needed
- to do a full merge */
- return;
- }
- pg_notify(exec_ctx, po, existing);
- gpr_mu_unlock(&po->mu);
- gpr_mu_unlock(&existing->mu);
- }
- gpr_mu_lock(&po->mu);
- if (po->group != NULL) {
- gpr_mu_unlock(&pg->po.mu);
- polling_group *po_group = pg_ref(po->group);
- gpr_mu_unlock(&po->mu);
- pg_merge(exec_ctx, pg, po_group);
- /* early exit: polling obj picked up a group during joining: we needed
- to do a full merge */
- return;
- }
- po->group = pg;
- po->next = &pg->po;
- po->prev = pg->po.prev;
- po->prev->next = po->next->prev = po;
- gpr_mu_unlock(&pg->po.mu);
- gpr_mu_unlock(&po->mu);
-}
-
-static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
- polling_group *b) {
- for (;;) {
- if (a == b) {
- pg_unref(a);
- pg_unref(b);
- return;
- }
- if (a > b) GPR_SWAP(polling_group *, a, b);
- gpr_mu_lock(&a->po.mu);
- gpr_mu_lock(&b->po.mu);
- if (a->po.group != NULL) {
- polling_group *m2 = pg_ref(a->po.group);
- gpr_mu_unlock(&a->po.mu);
- gpr_mu_unlock(&b->po.mu);
- pg_unref(a);
- a = m2;
- } else if (b->po.group != NULL) {
- polling_group *m2 = pg_ref(b->po.group);
- gpr_mu_unlock(&a->po.mu);
- gpr_mu_unlock(&b->po.mu);
- pg_unref(b);
- b = m2;
- } else {
- break;
- }
- }
- polling_group **unref = NULL;
- size_t unref_count = 0;
- size_t unref_cap = 0;
- b->po.group = a;
- pg_broadcast(exec_ctx, a, b);
- pg_broadcast(exec_ctx, b, a);
- while (b->po.next != &b->po) {
- polling_obj *po = b->po.next;
- gpr_mu_lock(&po->mu);
- if (unref_count == unref_cap) {
- unref_cap = GPR_MAX(8, 3 * unref_cap / 2);
- unref = gpr_realloc(unref, unref_cap * sizeof(*unref));
- }
- unref[unref_count++] = po->group;
- po->group = pg_ref(a);
- // unlink from b
- po->prev->next = po->next;
- po->next->prev = po->prev;
- // link to a
- po->next = &a->po;
- po->prev = a->po.prev;
- po->next->prev = po->prev->next = po;
- gpr_mu_unlock(&po->mu);
- }
- gpr_mu_unlock(&a->po.mu);
- gpr_mu_unlock(&b->po.mu);
- for (size_t i = 0; i < unref_count; i++) {
- pg_unref(unref[i]);
- }
- gpr_free(unref);
- pg_unref(b);
-}
-
-/*******************************************************************************
- * Event engine binding
- */
-
-static void shutdown_engine(void) {
- fd_global_shutdown();
- pollset_global_shutdown();
-}
-
-static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
-};
-
-const grpc_event_engine_vtable *grpc_init_epollex_linux(
- bool explicitly_requested) {
- if (!grpc_has_wakeup_fd()) {
- return NULL;
- }
-
- if (!grpc_is_epollexclusive_available()) {
- return NULL;
- }
-
- fd_global_init();
-
- if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
- pollset_global_shutdown();
- fd_global_shutdown();
- return NULL;
- }
-
- return &vtable;
-}
-
-#else /* defined(GRPC_LINUX_EPOLL) */
-#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
-/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
- * NULL */
-const grpc_event_engine_vtable *grpc_init_epollex_linux(
- bool explicitly_requested) {
- return NULL;
-}
-#endif /* defined(GRPC_POSIX_SOCKET) */
-
-#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
new file mode 100644
index 0000000000..0809d574a9
--- /dev/null
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -0,0 +1,1483 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+/* This polling engine is only relevant on linux kernels supporting epoll() */
+#ifdef GRPC_LINUX_EPOLL
+
+#include "src/core/lib/iomgr/ev_epollex_linux.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <poll.h>
+#include <pthread.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/tls.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/is_epollexclusive_available.h"
+#include "src/core/lib/iomgr/lockfree_event.h"
+#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/spinlock.h"
+
+// debug aid: create workers on the heap (allows asan to spot
+// use-after-destruction)
+//#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
+
+#define MAX_EPOLL_EVENTS 100
+#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
+
+#ifndef NDEBUG
+grpc_tracer_flag grpc_trace_pollable_refcount =
+ GRPC_TRACER_INITIALIZER(false, "pollable_refcount");
+#endif
+
+/*******************************************************************************
+ * pollable Declarations
+ */
+
+typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
+
+typedef struct pollable pollable;
+
+/// A pollable is something that can be polled: it has an epoll set to poll on,
+/// and a wakeup fd for kicks
+/// There are three broad types:
+/// - PO_EMPTY - the empty pollable, used before file descriptors are added to
+/// a pollset
+/// - PO_FD - a pollable containing only one FD - used to optimize single-fd
+/// pollsets (which are common with synchronous api usage)
+/// - PO_MULTI - a pollable containing many fds
+struct pollable {
+ pollable_type type; // immutable
+ gpr_refcount refs;
+
+ int epfd;
+ grpc_wakeup_fd wakeup;
+
+ // only for type fd... one ref to the owner fd
+ grpc_fd *owner_fd;
+
+ grpc_pollset_set *pollset_set;
+ pollable *next;
+ pollable *prev;
+
+ gpr_mu mu;
+ grpc_pollset_worker *root_worker;
+
+ int event_cursor;
+ int event_count;
+ struct epoll_event events[MAX_EPOLL_EVENTS];
+};
+
+static const char *pollable_type_string(pollable_type t) {
+ switch (t) {
+ case PO_MULTI:
+ return "pollset";
+ case PO_FD:
+ return "fd";
+ case PO_EMPTY:
+ return "empty";
+ }
+ return "<invalid>";
+}
+
+static char *pollable_desc(pollable *p) {
+ char *out;
+ gpr_asprintf(&out, "type=%s epfd=%d wakeup=%d", pollable_type_string(p->type),
+ p->epfd, p->wakeup.read_fd);
+ return out;
+}
+
+/// Shared empty pollable - used by pollset to poll on until the first fd is
+/// added
+static pollable *g_empty_pollable;
+
+static grpc_error *pollable_create(pollable_type type, pollable **p);
+#ifdef NDEBUG
+static pollable *pollable_ref(pollable *p);
+static void pollable_unref(pollable *p);
+#define POLLABLE_REF(p, r) pollable_ref(p)
+#define POLLABLE_UNREF(p, r) pollable_unref(p)
+#else
+static pollable *pollable_ref(pollable *p, int line, const char *reason);
+static void pollable_unref(pollable *p, int line, const char *reason);
+#define POLLABLE_REF(p, r) pollable_ref((p), __LINE__, (r))
+#define POLLABLE_UNREF(p, r) pollable_unref((p), __LINE__, (r))
+#endif
+
+/*******************************************************************************
+ * Fd Declarations
+ */
+
+struct grpc_fd {
+ int fd;
+ /* refst format:
+ bit 0 : 1=Active / 0=Orphaned
+ bits 1-n : refcount
+ Ref/Unref by two to avoid altering the orphaned bit */
+ gpr_atm refst;
+
+ gpr_mu orphan_mu;
+
+ gpr_mu pollable_mu;
+ pollable *pollable_obj;
+
+ gpr_atm read_closure;
+ gpr_atm write_closure;
+
+ struct grpc_fd *freelist_next;
+ grpc_closure *on_done_closure;
+
+ /* The pollset that last noticed that the fd is readable. The actual type
+ * stored in this is (grpc_pollset *) */
+ gpr_atm read_notifier_pollset;
+
+ grpc_iomgr_object iomgr_object;
+};
+
+static void fd_global_init(void);
+static void fd_global_shutdown(void);
+
+/*******************************************************************************
+ * Pollset Declarations
+ */
+
+typedef struct {
+ grpc_pollset_worker *next;
+ grpc_pollset_worker *prev;
+} pwlink;
+
+typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
+
+struct grpc_pollset_worker {
+ bool kicked;
+ bool initialized_cv;
+#ifndef NDEBUG
+ // debug aid: which thread started this worker
+ pid_t originator;
+#endif
+ gpr_cv cv;
+ grpc_pollset *pollset;
+ pollable *pollable_obj;
+
+ pwlink links[PWLINK_COUNT];
+};
+
+struct grpc_pollset {
+ gpr_mu mu;
+ pollable *active_pollable;
+ bool kicked_without_poller;
+ grpc_closure *shutdown_closure;
+ grpc_pollset_worker *root_worker;
+ int containing_pollset_set_count;
+};
+
+/*******************************************************************************
+ * Pollset-set Declarations
+ */
+
+struct grpc_pollset_set {
+ gpr_refcount refs;
+ gpr_mu mu;
+ grpc_pollset_set *parent;
+
+ size_t pollset_count;
+ size_t pollset_capacity;
+ grpc_pollset **pollsets;
+
+ size_t fd_count;
+ size_t fd_capacity;
+ grpc_fd **fds;
+};
+
+/*******************************************************************************
+ * Common helpers
+ */
+
+static bool append_error(grpc_error **composite, grpc_error *error,
+ const char *desc) {
+ if (error == GRPC_ERROR_NONE) return true;
+ if (*composite == GRPC_ERROR_NONE) {
+ *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
+ }
+ *composite = grpc_error_add_child(*composite, error);
+ return false;
+}
+
+/*******************************************************************************
+ * Fd Definitions
+ */
+
+/* We need to keep a freelist not because of any concerns of malloc performance
+ * but instead so that implementations with multiple threads in (for example)
+ * epoll_wait deal with the race between pollset removal and incoming poll
+ * notifications.
+ *
+ * The problem is that the poller ultimately holds a reference to this
+ * object, so it is very difficult to know when is safe to free it, at least
+ * without some expensive synchronization.
+ *
+ * If we keep the object freelisted, in the worst case losing this race just
+ * becomes a spurious read notification on a reused fd.
+ */
+
+static grpc_fd *fd_freelist = NULL;
+static gpr_mu fd_freelist_mu;
+
+#ifndef NDEBUG
+#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
+#define UNREF_BY(ec, fd, n, reason) \
+ unref_by(ec, fd, n, reason, __FILE__, __LINE__)
+static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+ int line) {
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
+ }
+#else
+#define REF_BY(fd, n, reason) ref_by(fd, n)
+#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
+static void ref_by(grpc_fd *fd, int n) {
+#endif
+ GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
+}
+
+static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_fd *fd = (grpc_fd *)arg;
+ /* Add the fd to the freelist */
+ grpc_iomgr_unregister_object(&fd->iomgr_object);
+ POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
+ gpr_mu_destroy(&fd->pollable_mu);
+ gpr_mu_destroy(&fd->orphan_mu);
+ gpr_mu_lock(&fd_freelist_mu);
+ fd->freelist_next = fd_freelist;
+ fd_freelist = fd;
+
+ grpc_lfev_destroy(&fd->read_closure);
+ grpc_lfev_destroy(&fd->write_closure);
+
+ gpr_mu_unlock(&fd_freelist_mu);
+}
+
+#ifndef NDEBUG
+static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
+ const char *reason, const char *file, int line) {
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG,
+ "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
+ fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
+ gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
+ }
+#else
+static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
+#endif
+ gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
+ if (old == n) {
+ GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
+ grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
+ } else {
+ GPR_ASSERT(old > n);
+ }
+}
+
+static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
+
+static void fd_global_shutdown(void) {
+ gpr_mu_lock(&fd_freelist_mu);
+ gpr_mu_unlock(&fd_freelist_mu);
+ while (fd_freelist != NULL) {
+ grpc_fd *fd = fd_freelist;
+ fd_freelist = fd_freelist->freelist_next;
+ gpr_free(fd);
+ }
+ gpr_mu_destroy(&fd_freelist_mu);
+}
+
+static grpc_fd *fd_create(int fd, const char *name) {
+ grpc_fd *new_fd = NULL;
+
+ gpr_mu_lock(&fd_freelist_mu);
+ if (fd_freelist != NULL) {
+ new_fd = fd_freelist;
+ fd_freelist = fd_freelist->freelist_next;
+ }
+ gpr_mu_unlock(&fd_freelist_mu);
+
+ if (new_fd == NULL) {
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
+ }
+
+ gpr_mu_init(&new_fd->pollable_mu);
+ gpr_mu_init(&new_fd->orphan_mu);
+ new_fd->pollable_obj = NULL;
+ gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
+ new_fd->fd = fd;
+ grpc_lfev_init(&new_fd->read_closure);
+ grpc_lfev_init(&new_fd->write_closure);
+ gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
+
+ new_fd->freelist_next = NULL;
+ new_fd->on_done_closure = NULL;
+
+ char *fd_name;
+ gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
+ grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
+#ifndef NDEBUG
+ if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
+ }
+#endif
+ gpr_free(fd_name);
+ return new_fd;
+}
+
+static int fd_wrapped_fd(grpc_fd *fd) {
+ int ret_fd = fd->fd;
+ return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
+}
+
+static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *on_done, int *release_fd,
+ bool already_closed, const char *reason) {
+ bool is_fd_closed = already_closed;
+
+ gpr_mu_lock(&fd->orphan_mu);
+
+ fd->on_done_closure = on_done;
+
+ /* If release_fd is not NULL, we should be relinquishing control of the file
+ descriptor fd->fd (but we still own the grpc_fd structure). */
+ if (release_fd != NULL) {
+ *release_fd = fd->fd;
+ } else if (!is_fd_closed) {
+ close(fd->fd);
+ is_fd_closed = true;
+ }
+
+ if (!is_fd_closed) {
+ gpr_log(GPR_DEBUG, "TODO: handle fd removal?");
+ }
+
+ /* Remove the active status but keep referenced. We want this grpc_fd struct
+ to be alive (and not added to freelist) until the end of this function */
+ REF_BY(fd, 1, reason);
+
+ GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
+
+ gpr_mu_unlock(&fd->orphan_mu);
+
+ UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
+}
+
+static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
+ gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
+ return (grpc_pollset *)notifier;
+}
+
+static bool fd_is_shutdown(grpc_fd *fd) {
+ return grpc_lfev_is_shutdown(&fd->read_closure);
+}
+
+/* Might be called multiple times */
+static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
+ if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
+ GRPC_ERROR_REF(why))) {
+ shutdown(fd->fd, SHUT_RDWR);
+ grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
+ }
+ GRPC_ERROR_UNREF(why);
+}
+
+static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
+}
+
+static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_closure *closure) {
+ grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
+}
+
+/*******************************************************************************
+ * Pollable Definitions
+ */
+
+static grpc_error *pollable_create(pollable_type type, pollable **p) {
+ *p = NULL;
+
+ int epfd = epoll_create1(EPOLL_CLOEXEC);
+ if (epfd == -1) {
+ return GRPC_OS_ERROR(errno, "epoll_create1");
+ }
+ *p = (pollable *)gpr_malloc(sizeof(**p));
+ grpc_error *err = grpc_wakeup_fd_init(&(*p)->wakeup);
+ if (err != GRPC_ERROR_NONE) {
+ close(epfd);
+ gpr_free(*p);
+ *p = NULL;
+ return err;
+ }
+ struct epoll_event ev;
+ ev.events = (uint32_t)(EPOLLIN | EPOLLET);
+ ev.data.ptr = (void *)(1 | (intptr_t) & (*p)->wakeup);
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
+ err = GRPC_OS_ERROR(errno, "epoll_ctl");
+ close(epfd);
+ grpc_wakeup_fd_destroy(&(*p)->wakeup);
+ gpr_free(*p);
+ *p = NULL;
+ return err;
+ }
+
+ (*p)->type = type;
+ gpr_ref_init(&(*p)->refs, 1);
+ gpr_mu_init(&(*p)->mu);
+ (*p)->epfd = epfd;
+ (*p)->owner_fd = NULL;
+ (*p)->pollset_set = NULL;
+ (*p)->next = (*p)->prev = *p;
+ (*p)->root_worker = NULL;
+ (*p)->event_cursor = 0;
+ (*p)->event_count = 0;
+ return GRPC_ERROR_NONE;
+}
+
+#ifdef NDEBUG
+static pollable *pollable_ref(pollable *p) {
+#else
+static pollable *pollable_ref(pollable *p, int line, const char *reason) {
+ if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
+ int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
+ gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
+ "POLLABLE:%p ref %d->%d %s", p, r, r + 1, reason);
+ }
+#endif
+ gpr_ref(&p->refs);
+ return p;
+}
+
+#ifdef NDEBUG
+static void pollable_unref(pollable *p) {
+#else
+static void pollable_unref(pollable *p, int line, const char *reason) {
+ if (p == NULL) return;
+ if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
+ int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
+ gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
+ "POLLABLE:%p unref %d->%d %s", p, r, r - 1, reason);
+ }
+#endif
+ if (p != NULL && gpr_unref(&p->refs)) {
+ close(p->epfd);
+ grpc_wakeup_fd_destroy(&p->wakeup);
+ gpr_free(p);
+ }
+}
+
+static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *err_desc = "pollable_add_fd";
+ const int epfd = p->epfd;
+
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
+ }
+
+ struct epoll_event ev_fd;
+ ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
+ ev_fd.data.ptr = fd;
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
+ switch (errno) {
+ case EEXIST:
+ break;
+ default:
+ append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
+ }
+ }
+
+ return error;
+}
+
+/*******************************************************************************
+ * Pollset Definitions
+ */
+
+GPR_TLS_DECL(g_current_thread_pollset);
+GPR_TLS_DECL(g_current_thread_worker);
+
+/* Global state management */
+static grpc_error *pollset_global_init(void) {
+ gpr_tls_init(&g_current_thread_pollset);
+ gpr_tls_init(&g_current_thread_worker);
+ return pollable_create(PO_EMPTY, &g_empty_pollable);
+}
+
+static void pollset_global_shutdown(void) {
+ POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable");
+ gpr_tls_destroy(&g_current_thread_pollset);
+ gpr_tls_destroy(&g_current_thread_worker);
+}
+
+/* pollset->mu must be held while calling this function */
+static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG,
+ "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
+ "rw=%p (target:NULL) cpsc=%d (target:0)",
+ pollset, pollset->active_pollable, pollset->shutdown_closure,
+ pollset->root_worker, pollset->containing_pollset_set_count);
+ }
+ if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
+ pollset->containing_pollset_set_count == 0) {
+ GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
+ pollset->shutdown_closure = NULL;
+ }
+}
+
+/* pollset->mu must be held before calling this function,
+ * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
+ * held */
+static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_worker *specific_worker) {
+ pollable *p = specific_worker->pollable_obj;
+ grpc_core::mu_guard lock(&p->mu);
+ GPR_ASSERT(specific_worker != NULL);
+ if (specific_worker->kicked) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
+ }
+ GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+ return GRPC_ERROR_NONE;
+ }
+ if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
+ }
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+ specific_worker->kicked = true;
+ return GRPC_ERROR_NONE;
+ }
+ if (specific_worker == p->root_worker) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
+ }
+ specific_worker->kicked = true;
+ grpc_error *error = grpc_wakeup_fd_wakeup(&p->wakeup);
+ return error;
+ }
+ if (specific_worker->initialized_cv) {
+ GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
+ }
+ specific_worker->kicked = true;
+ gpr_cv_signal(&specific_worker->cv);
+ return GRPC_ERROR_NONE;
+ }
+ // we can get here during end_worker after removing specific_worker from the
+ // pollable list but before removing it from the pollset list
+ return GRPC_ERROR_NONE;
+}
+
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG,
+ "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
+ pollset, specific_worker,
+ (void *)gpr_tls_get(&g_current_thread_pollset),
+ (void *)gpr_tls_get(&g_current_thread_worker),
+ pollset->root_worker);
+ }
+ if (specific_worker == NULL) {
+ if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
+ if (pollset->root_worker == NULL) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
+ }
+ GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
+ pollset->kicked_without_poller = true;
+ return GRPC_ERROR_NONE;
+ } else {
+ // We've been asked to kick a poller, but we haven't been told which one
+ // ... any will do
+ // We look at the pollset worker list because:
+ // 1. the pollable list may include workers from other pollers, so we'd
+ // need to do an O(N) search
+ // 2. we'd additionally need to take the pollable lock, which we've so
+ // far avoided
+ // Now, we would prefer to wake a poller in cv_wait, and not in
+ // epoll_wait (since the latter would imply the need to do an additional
+ // wakeup)
+ // We know that if a worker is at the root of a pollable, it's (likely)
+ // also the root of a pollset, and we know that if a worker is NOT at
+ // the root of a pollset, it's (likely) not at the root of a pollable,
+ // so we take our chances and choose the SECOND worker enqueued against
+ // the pollset as a worker that's likely to be in cv_wait
+ return kick_one_worker(
+ exec_ctx, pollset->root_worker->links[PWLINK_POLLSET].next);
+ }
+ } else {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
+ }
+ GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+ return GRPC_ERROR_NONE;
+ }
+ } else {
+ return kick_one_worker(exec_ctx, specific_worker);
+ }
+}
+
+static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ const char *err_desc = "pollset_kick_all";
+ grpc_pollset_worker *w = pollset->root_worker;
+ if (w != NULL) {
+ do {
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+ append_error(&error, kick_one_worker(exec_ctx, w), err_desc);
+ w = w->links[PWLINK_POLLSET].next;
+ } while (w != pollset->root_worker);
+ }
+ return error;
+}
+
+static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+ gpr_mu_init(&pollset->mu);
+ pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
+ *mu = &pollset->mu;
+}
+
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis millis) {
+ if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+ if (delta > INT_MAX)
+ return INT_MAX;
+ else if (delta < 0)
+ return 0;
+ else
+ return (int)delta;
+}
+
+static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
+ grpc_pollset *notifier) {
+ grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
+
+ /* Note, it is possible that fd_become_readable might be called twice with
+ different 'notifier's when an fd becomes readable and it is in two epoll
+ sets (This can happen briefly during polling island merges). In such cases
+ it does not really matter which notifer is set as the read_notifier_pollset
+ (They would both point to the same polling island anyway) */
+ /* Use release store to match with acquire load in fd_get_read_notifier */
+ gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
+}
+
+static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+ grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
+}
+
+static grpc_error *fd_get_or_become_pollable(grpc_fd *fd, pollable **p) {
+ gpr_mu_lock(&fd->pollable_mu);
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *err_desc = "fd_get_or_become_pollable";
+ if (fd->pollable_obj == NULL) {
+ if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
+ err_desc)) {
+ fd->pollable_obj->owner_fd = fd;
+ if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd),
+ err_desc)) {
+ POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
+ fd->pollable_obj = NULL;
+ }
+ }
+ }
+ if (error == GRPC_ERROR_NONE) {
+ GPR_ASSERT(fd->pollable_obj != NULL);
+ *p = POLLABLE_REF(fd->pollable_obj, "pollset");
+ } else {
+ GPR_ASSERT(fd->pollable_obj == NULL);
+ *p = NULL;
+ }
+ gpr_mu_unlock(&fd->pollable_mu);
+ return error;
+}
+
+/* pollset->po.mu lock must be held by the caller before calling this */
+static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_closure *closure) {
+ GPR_ASSERT(pollset->shutdown_closure == NULL);
+ pollset->shutdown_closure = closure;
+ GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
+}
+
+static grpc_error *pollable_process_events(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ pollable *pollable_obj, bool drain) {
+ static const char *err_desc = "pollset_process_events";
+ grpc_error *error = GRPC_ERROR_NONE;
+ for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
+ pollable_obj->event_cursor != pollable_obj->event_count;
+ i++) {
+ int n = pollable_obj->event_cursor++;
+ struct epoll_event *ev = &pollable_obj->events[n];
+ void *data_ptr = ev->data.ptr;
+ if (1 & (intptr_t)data_ptr) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
+ }
+ append_error(&error,
+ grpc_wakeup_fd_consume_wakeup(
+ (grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)),
+ err_desc);
+ } else {
+ grpc_fd *fd = (grpc_fd *)data_ptr;
+ bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
+ bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
+ bool write_ev = (ev->events & EPOLLOUT) != 0;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG,
+ "PS:%p got fd %p: cancel=%d read=%d "
+ "write=%d",
+ pollset, fd, cancel, read_ev, write_ev);
+ }
+ if (read_ev || cancel) {
+ fd_become_readable(exec_ctx, fd, pollset);
+ }
+ if (write_ev || cancel) {
+ fd_become_writable(exec_ctx, fd);
+ }
+ }
+ }
+
+ return error;
+}
+
+/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
+static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ pollset->active_pollable = NULL;
+}
+
+static grpc_error *pollable_epoll(grpc_exec_ctx *exec_ctx, pollable *p,
+ grpc_millis deadline) {
+ int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
+
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ char *desc = pollable_desc(p);
+ gpr_log(GPR_DEBUG, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
+ gpr_free(desc);
+ }
+
+ if (timeout != 0) {
+ GRPC_SCHEDULING_START_BLOCKING_REGION;
+ }
+ int r;
+ do {
+ GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+ r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
+ } while (r < 0 && errno == EINTR);
+ if (timeout != 0) {
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+ }
+
+ if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
+
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "POLLABLE:%p got %d events", p, r);
+ }
+
+ p->event_cursor = 0;
+ p->event_count = r;
+
+ return GRPC_ERROR_NONE;
+}
+
+/* Return true if first in list */
+static bool worker_insert(grpc_pollset_worker **root_worker,
+ grpc_pollset_worker *worker, pwlinks link) {
+ if (*root_worker == NULL) {
+ *root_worker = worker;
+ worker->links[link].next = worker->links[link].prev = worker;
+ return true;
+ } else {
+ worker->links[link].next = *root_worker;
+ worker->links[link].prev = worker->links[link].next->links[link].prev;
+ worker->links[link].next->links[link].prev = worker;
+ worker->links[link].prev->links[link].next = worker;
+ return false;
+ }
+}
+
+/* returns the new root IFF the root changed */
+typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
+
+static worker_remove_result worker_remove(grpc_pollset_worker **root_worker,
+ grpc_pollset_worker *worker,
+ pwlinks link) {
+ if (worker == *root_worker) {
+ if (worker == worker->links[link].next) {
+ *root_worker = NULL;
+ return WRR_EMPTIED;
+ } else {
+ *root_worker = worker->links[link].next;
+ worker->links[link].prev->links[link].next = worker->links[link].next;
+ worker->links[link].next->links[link].prev = worker->links[link].prev;
+ return WRR_NEW_ROOT;
+ }
+ } else {
+ worker->links[link].prev->links[link].next = worker->links[link].next;
+ worker->links[link].next->links[link].prev = worker->links[link].prev;
+ return WRR_REMOVED;
+ }
+}
+
+/* Return true if this thread should poll */
+static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ grpc_pollset_worker **worker_hdl,
+ grpc_millis deadline) {
+ bool do_poll = (pollset->shutdown_closure == nullptr);
+ if (worker_hdl != NULL) *worker_hdl = worker;
+ worker->initialized_cv = false;
+ worker->kicked = false;
+ worker->pollset = pollset;
+ worker->pollable_obj =
+ POLLABLE_REF(pollset->active_pollable, "pollset_worker");
+ worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET);
+ gpr_mu_lock(&worker->pollable_obj->mu);
+ if (!worker_insert(&worker->pollable_obj->root_worker, worker,
+ PWLINK_POLLABLE)) {
+ worker->initialized_cv = true;
+ gpr_cv_init(&worker->cv);
+ gpr_mu_unlock(&pollset->mu);
+ if (GRPC_TRACER_ON(grpc_polling_trace) &&
+ worker->pollable_obj->root_worker != worker) {
+ gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
+ worker->pollable_obj, worker,
+ poll_deadline_to_millis_timeout(exec_ctx, deadline));
+ }
+ while (do_poll && worker->pollable_obj->root_worker != worker) {
+ if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
+ worker->pollable_obj, worker);
+ }
+ do_poll = false;
+ } else if (worker->kicked) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
+ worker->pollable_obj, worker);
+ }
+ do_poll = false;
+ } else if (GRPC_TRACER_ON(grpc_polling_trace) &&
+ worker->pollable_obj->root_worker != worker) {
+ gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
+ worker->pollable_obj, worker);
+ }
+ }
+ grpc_exec_ctx_invalidate_now(exec_ctx);
+ } else {
+ gpr_mu_unlock(&pollset->mu);
+ }
+ gpr_mu_unlock(&worker->pollable_obj->mu);
+
+ return do_poll;
+}
+
+static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *worker,
+ grpc_pollset_worker **worker_hdl) {
+ gpr_mu_lock(&pollset->mu);
+ gpr_mu_lock(&worker->pollable_obj->mu);
+ switch (worker_remove(&worker->pollable_obj->root_worker, worker,
+ PWLINK_POLLABLE)) {
+ case WRR_NEW_ROOT: {
+ // wakeup new poller
+ grpc_pollset_worker *new_root = worker->pollable_obj->root_worker;
+ GPR_ASSERT(new_root->initialized_cv);
+ gpr_cv_signal(&new_root->cv);
+ break;
+ }
+ case WRR_EMPTIED:
+ if (pollset->active_pollable != worker->pollable_obj) {
+ // pollable no longer being polled: flush events
+ pollable_process_events(exec_ctx, pollset, worker->pollable_obj, true);
+ }
+ break;
+ case WRR_REMOVED:
+ break;
+ }
+ gpr_mu_unlock(&worker->pollable_obj->mu);
+ POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
+ if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
+ WRR_EMPTIED) {
+ pollset_maybe_finish_shutdown(exec_ctx, pollset);
+ }
+ if (worker->initialized_cv) {
+ gpr_cv_destroy(&worker->cv);
+ }
+}
+
+#ifndef NDEBUG
+static long gettid(void) { return syscall(__NR_gettid); }
+#endif
+
+/* pollset->mu lock must be held by the caller before calling this.
+ The function pollset_work() may temporarily release the lock (pollset->po.mu)
+ during the course of its execution but it will always re-acquire the lock and
+ ensure that it is held by the time the function returns */
+static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker **worker_hdl,
+ grpc_millis deadline) {
+#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
+ grpc_pollset_worker *worker =
+ (grpc_pollset_worker *)gpr_malloc(sizeof(*worker));
+#define WORKER_PTR (worker)
+#else
+ grpc_pollset_worker worker;
+#define WORKER_PTR (&worker)
+#endif
+#ifndef NDEBUG
+ WORKER_PTR->originator = gettid();
+#endif
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
+ " deadline=%" PRIdPTR " kwp=%d pollable=%p",
+ pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
+ deadline, pollset->kicked_without_poller, pollset->active_pollable);
+ }
+ static const char *err_desc = "pollset_work";
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (pollset->kicked_without_poller) {
+ pollset->kicked_without_poller = false;
+ } else {
+ if (begin_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl, deadline)) {
+ gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
+ gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
+ if (WORKER_PTR->pollable_obj->event_cursor ==
+ WORKER_PTR->pollable_obj->event_count) {
+ append_error(&error, pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj,
+ deadline),
+ err_desc);
+ }
+ append_error(&error,
+ pollable_process_events(exec_ctx, pollset,
+ WORKER_PTR->pollable_obj, false),
+ err_desc);
+ grpc_exec_ctx_flush(exec_ctx);
+ gpr_tls_set(&g_current_thread_pollset, 0);
+ gpr_tls_set(&g_current_thread_worker, 0);
+ }
+ end_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl);
+ }
+#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
+ gpr_free(worker);
+#endif
+#undef WORKER_PTR
+ return error;
+}
+
+static grpc_error *pollset_transition_pollable_from_empty_to_fd_locked(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) {
+ static const char *err_desc = "pollset_transition_pollable_from_empty_to_fd";
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG,
+ "PS:%p add fd %p (%d); transition pollable from empty to fd",
+ pollset, fd, fd->fd);
+ }
+ append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ append_error(&error, fd_get_or_become_pollable(fd, &pollset->active_pollable),
+ err_desc);
+ return error;
+}
+
+static grpc_error *pollset_transition_pollable_from_fd_to_multi_locked(
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *and_add_fd) {
+ static const char *err_desc = "pollset_transition_pollable_from_fd_to_multi";
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(
+ GPR_DEBUG,
+ "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
+ pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
+ pollset->active_pollable->owner_fd);
+ }
+ append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
+ grpc_fd *initial_fd = pollset->active_pollable->owner_fd;
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ pollset->active_pollable = NULL;
+ if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
+ err_desc)) {
+ append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd),
+ err_desc);
+ if (and_add_fd != NULL) {
+ append_error(&error,
+ pollable_add_fd(pollset->active_pollable, and_add_fd),
+ err_desc);
+ }
+ }
+ return error;
+}
+
+/* expects pollsets locked, flag whether fd is locked or not */
+static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset, grpc_fd *fd) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ pollable *po_at_start =
+ POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
+ switch (pollset->active_pollable->type) {
+ case PO_EMPTY:
+ /* empty pollable --> single fd pollable */
+ error = pollset_transition_pollable_from_empty_to_fd_locked(exec_ctx,
+ pollset, fd);
+ break;
+ case PO_FD:
+ gpr_mu_lock(&po_at_start->owner_fd->orphan_mu);
+ if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) &
+ 1) == 0) {
+ error = pollset_transition_pollable_from_empty_to_fd_locked(
+ exec_ctx, pollset, fd);
+ } else {
+ /* fd --> multipoller */
+ error = pollset_transition_pollable_from_fd_to_multi_locked(
+ exec_ctx, pollset, fd);
+ }
+ gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
+ break;
+ case PO_MULTI:
+ error = pollable_add_fd(pollset->active_pollable, fd);
+ break;
+ }
+ if (error != GRPC_ERROR_NONE) {
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ pollset->active_pollable = po_at_start;
+ } else {
+ POLLABLE_UNREF(po_at_start, "pollset_add_fd");
+ }
+ return error;
+}
+
+static grpc_error *pollset_as_multipollable_locked(grpc_exec_ctx *exec_ctx,
+ grpc_pollset *pollset,
+ pollable **pollable_obj) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ pollable *po_at_start =
+ POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
+ switch (pollset->active_pollable->type) {
+ case PO_EMPTY:
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ error = pollable_create(PO_MULTI, &pollset->active_pollable);
+ break;
+ case PO_FD:
+ gpr_mu_lock(&po_at_start->owner_fd->orphan_mu);
+ if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) &
+ 1) == 0) {
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ error = pollable_create(PO_MULTI, &pollset->active_pollable);
+ } else {
+ error = pollset_transition_pollable_from_fd_to_multi_locked(
+ exec_ctx, pollset, NULL);
+ }
+ gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
+ break;
+ case PO_MULTI:
+ break;
+ }
+ if (error != GRPC_ERROR_NONE) {
+ POLLABLE_UNREF(pollset->active_pollable, "pollset");
+ pollset->active_pollable = po_at_start;
+ *pollable_obj = NULL;
+ } else {
+ *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
+ POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
+ }
+ return error;
+}
+
+static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_fd *fd) {
+ gpr_mu_lock(&pollset->mu);
+ grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd);
+ gpr_mu_unlock(&pollset->mu);
+ GRPC_LOG_IF_ERROR("pollset_add_fd", error);
+}
+
+/*******************************************************************************
+ * Pollset-set Definitions
+ */
+
+static grpc_pollset_set *pss_lock_adam(grpc_pollset_set *pss) {
+ gpr_mu_lock(&pss->mu);
+ while (pss->parent != NULL) {
+ gpr_mu_unlock(&pss->mu);
+ pss = pss->parent;
+ gpr_mu_lock(&pss->mu);
+ }
+ return pss;
+}
+
+static grpc_pollset_set *pollset_set_create(void) {
+ grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss));
+ gpr_mu_init(&pss->mu);
+ gpr_ref_init(&pss->refs, 1);
+ return pss;
+}
+
+static void pollset_set_unref(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss) {
+ if (pss == NULL) return;
+ if (!gpr_unref(&pss->refs)) return;
+ pollset_set_unref(exec_ctx, pss->parent);
+ gpr_mu_destroy(&pss->mu);
+ for (size_t i = 0; i < pss->pollset_count; i++) {
+ gpr_mu_lock(&pss->pollsets[i]->mu);
+ if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
+ pollset_maybe_finish_shutdown(exec_ctx, pss->pollsets[i]);
+ }
+ gpr_mu_unlock(&pss->pollsets[i]->mu);
+ }
+ for (size_t i = 0; i < pss->fd_count; i++) {
+ UNREF_BY(exec_ctx, pss->fds[i], 2, "pollset_set");
+ }
+ gpr_free(pss->pollsets);
+ gpr_free(pss->fds);
+ gpr_free(pss);
+}
+
+static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
+ grpc_fd *fd) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
+ }
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *err_desc = "pollset_set_add_fd";
+ pss = pss_lock_adam(pss);
+ for (size_t i = 0; i < pss->pollset_count; i++) {
+ append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd),
+ err_desc);
+ }
+ if (pss->fd_count == pss->fd_capacity) {
+ pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
+ pss->fds =
+ (grpc_fd **)gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds));
+ }
+ REF_BY(fd, 2, "pollset_set");
+ pss->fds[pss->fd_count++] = fd;
+ gpr_mu_unlock(&pss->mu);
+
+ GRPC_LOG_IF_ERROR(err_desc, error);
+}
+
+static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
+ grpc_fd *fd) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
+ }
+ pss = pss_lock_adam(pss);
+ size_t i;
+ for (i = 0; i < pss->fd_count; i++) {
+ if (pss->fds[i] == fd) {
+ UNREF_BY(exec_ctx, fd, 2, "pollset_set");
+ break;
+ }
+ }
+ GPR_ASSERT(i != pss->fd_count);
+ for (; i < pss->fd_count - 1; i++) {
+ pss->fds[i] = pss->fds[i + 1];
+ }
+ pss->fd_count--;
+ gpr_mu_unlock(&pss->mu);
+}
+
+static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pss, grpc_pollset *ps) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
+ }
+ pss = pss_lock_adam(pss);
+ size_t i;
+ for (i = 0; i < pss->pollset_count; i++) {
+ if (pss->pollsets[i] == ps) {
+ break;
+ }
+ }
+ GPR_ASSERT(i != pss->pollset_count);
+ for (; i < pss->pollset_count - 1; i++) {
+ pss->pollsets[i] = pss->pollsets[i + 1];
+ }
+ pss->pollset_count--;
+ gpr_mu_unlock(&pss->mu);
+ gpr_mu_lock(&ps->mu);
+ if (0 == --ps->containing_pollset_set_count) {
+ pollset_maybe_finish_shutdown(exec_ctx, ps);
+ }
+ gpr_mu_unlock(&ps->mu);
+}
+
+// add all fds to pollables, and output a new array of unorphaned out_fds
+// assumes pollsets are multipollable
+static grpc_error *add_fds_to_pollsets(grpc_exec_ctx *exec_ctx, grpc_fd **fds,
+ size_t fd_count, grpc_pollset **pollsets,
+ size_t pollset_count,
+ const char *err_desc, grpc_fd **out_fds,
+ size_t *out_fd_count) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ for (size_t i = 0; i < fd_count; i++) {
+ gpr_mu_lock(&fds[i]->orphan_mu);
+ if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
+ gpr_mu_unlock(&fds[i]->orphan_mu);
+ UNREF_BY(exec_ctx, fds[i], 2, "pollset_set");
+ } else {
+ for (size_t j = 0; j < pollset_count; j++) {
+ append_error(&error,
+ pollable_add_fd(pollsets[j]->active_pollable, fds[i]),
+ err_desc);
+ }
+ gpr_mu_unlock(&fds[i]->orphan_mu);
+ out_fds[(*out_fd_count)++] = fds[i];
+ }
+ }
+ return error;
+}
+
+static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *pss, grpc_pollset *ps) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
+ }
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *err_desc = "pollset_set_add_pollset";
+ pollable *pollable_obj = NULL;
+ gpr_mu_lock(&ps->mu);
+ if (!GRPC_LOG_IF_ERROR(err_desc, pollset_as_multipollable_locked(
+ exec_ctx, ps, &pollable_obj))) {
+ GPR_ASSERT(pollable_obj == NULL);
+ gpr_mu_unlock(&ps->mu);
+ return;
+ }
+ ps->containing_pollset_set_count++;
+ gpr_mu_unlock(&ps->mu);
+ pss = pss_lock_adam(pss);
+ size_t initial_fd_count = pss->fd_count;
+ pss->fd_count = 0;
+ append_error(&error,
+ add_fds_to_pollsets(exec_ctx, pss->fds, initial_fd_count, &ps, 1,
+ err_desc, pss->fds, &pss->fd_count),
+ err_desc);
+ if (pss->pollset_count == pss->pollset_capacity) {
+ pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
+ pss->pollsets = (grpc_pollset **)gpr_realloc(
+ pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets));
+ }
+ pss->pollsets[pss->pollset_count++] = ps;
+ gpr_mu_unlock(&pss->mu);
+ POLLABLE_UNREF(pollable_obj, "pollset_set");
+
+ GRPC_LOG_IF_ERROR(err_desc, error);
+}
+
+static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *a,
+ grpc_pollset_set *b) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
+ }
+ grpc_error *error = GRPC_ERROR_NONE;
+ static const char *err_desc = "pollset_set_add_fd";
+ for (;;) {
+ if (a == b) {
+ // pollset ancestors are the same: nothing to do
+ return;
+ }
+ if (a > b) {
+ GPR_SWAP(grpc_pollset_set *, a, b);
+ }
+ gpr_mu *a_mu = &a->mu;
+ gpr_mu *b_mu = &b->mu;
+ gpr_mu_lock(a_mu);
+ gpr_mu_lock(b_mu);
+ if (a->parent != NULL) {
+ a = a->parent;
+ } else if (b->parent != NULL) {
+ b = b->parent;
+ } else {
+ break; // exit loop, both pollsets locked
+ }
+ gpr_mu_unlock(a_mu);
+ gpr_mu_unlock(b_mu);
+ }
+ // try to do the least copying possible
+ // TODO(ctiller): there's probably a better heuristic here
+ const size_t a_size = a->fd_count + a->pollset_count;
+ const size_t b_size = b->fd_count + b->pollset_count;
+ if (b_size > a_size) {
+ GPR_SWAP(grpc_pollset_set *, a, b);
+ }
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a);
+ }
+ gpr_ref(&a->refs);
+ b->parent = a;
+ if (a->fd_capacity < a->fd_count + b->fd_count) {
+ a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
+ a->fds = (grpc_fd **)gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds));
+ }
+ size_t initial_a_fd_count = a->fd_count;
+ a->fd_count = 0;
+ append_error(&error, add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count,
+ b->pollsets, b->pollset_count,
+ "merge_a2b", a->fds, &a->fd_count),
+ err_desc);
+ append_error(&error, add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count,
+ a->pollsets, a->pollset_count,
+ "merge_b2a", a->fds, &a->fd_count),
+ err_desc);
+ if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
+ a->pollset_capacity =
+ GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
+ a->pollsets = (grpc_pollset **)gpr_realloc(
+ a->pollsets, a->pollset_capacity * sizeof(*a->pollsets));
+ }
+ if (b->pollset_count > 0) {
+ memcpy(a->pollsets + a->pollset_count, b->pollsets,
+ b->pollset_count * sizeof(*b->pollsets));
+ }
+ a->pollset_count += b->pollset_count;
+ gpr_free(b->fds);
+ gpr_free(b->pollsets);
+ b->fds = NULL;
+ b->pollsets = NULL;
+ b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0;
+ gpr_mu_unlock(&a->mu);
+ gpr_mu_unlock(&b->mu);
+}
+
+static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_pollset_set *bag,
+ grpc_pollset_set *item) {}
+
+/*******************************************************************************
+ * Event engine binding
+ */
+
+static void shutdown_engine(void) {
+ fd_global_shutdown();
+ pollset_global_shutdown();
+}
+
+static const grpc_event_engine_vtable vtable = {
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_unref, // destroy ==> unref 1 public ref
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
+};
+
+const grpc_event_engine_vtable *grpc_init_epollex_linux(
+ bool explicitly_requested) {
+ if (!explicitly_requested) {
+ return NULL;
+ }
+
+ if (!grpc_has_wakeup_fd()) {
+ return NULL;
+ }
+
+ if (!grpc_is_epollexclusive_available()) {
+ return NULL;
+ }
+
+#ifndef NDEBUG
+ grpc_register_tracer(&grpc_trace_pollable_refcount);
+#endif
+
+ fd_global_init();
+
+ if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
+ pollset_global_shutdown();
+ fd_global_shutdown();
+ return NULL;
+ }
+
+ return &vtable;
+}
+
+#else /* defined(GRPC_LINUX_EPOLL) */
+#if defined(GRPC_POSIX_SOCKET)
+#include "src/core/lib/iomgr/ev_epollex_linux.h"
+/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
+ * NULL */
+const grpc_event_engine_vtable *grpc_init_epollex_linux(
+ bool explicitly_requested) {
+ return NULL;
+}
+#endif /* defined(GRPC_POSIX_SOCKET) */
+
+#endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.h b/src/core/lib/iomgr/ev_epollex_linux.h
index cff9b43c02..2849a23283 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.h
+++ b/src/core/lib/iomgr/ev_epollex_linux.h
@@ -22,7 +22,15 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
const grpc_event_engine_vtable *grpc_init_epollex_linux(
bool explicitly_requested);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 59c7cdc285..035bdc4cb5 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.c
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <grpc/grpc_posix.h>
+
/* This polling engine is only relevant on linux kernels supporting epoll() */
#ifdef GRPC_LINUX_EPOLL
@@ -25,6 +27,7 @@
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <pthread.h>
#include <signal.h>
@@ -40,13 +43,13 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
@@ -363,7 +366,8 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
- pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
+ pi->fds =
+ (grpc_fd **)gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
}
pi->fds[pi->fd_cnt++] = fds[i];
@@ -466,7 +470,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
- pi = gpr_malloc(sizeof(*pi));
+ pi = (polling_island *)gpr_malloc(sizeof(*pi));
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@@ -810,7 +814,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->po.mu);
}
@@ -1020,10 +1024,11 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
}
/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_pollset *p,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
grpc_error *error = GRPC_ERROR_NONE;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
const char *err_desc = "Kick Failure";
grpc_pollset_worker *worker = specific_worker;
if (worker != NULL) {
@@ -1087,30 +1092,16 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->shutdown_done = NULL;
}
-/* Convert a timespec to milliseconds:
- - Very small or negative poll times are clamped to zero to do a non-blocking
- poll (which becomes spin polling)
- - Other small values are rounded up to one millisecond
- - Longer than a millisecond polls are rounded up to the next nearest
- millisecond to avoid spinning
- - Infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
-
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis millis) {
+ if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
+ grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+ if (delta > INT_MAX)
+ return INT_MAX;
+ else if (delta < 0)
return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- int millis = gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
- return millis >= 1 ? millis : 1;
+ else
+ return (int)delta;
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
@@ -1131,7 +1122,8 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps, char *reason) {
+ grpc_pollset *ps,
+ const char *reason) {
if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason);
}
@@ -1157,7 +1149,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
/* If the pollset has any workers, we cannot call finish_shutdown_locked()
because it would release the underlying polling island. In such a case, we
@@ -1240,7 +1232,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
ep_rv =
epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
if (ep_rv < 0) {
if (errno != EINTR) {
gpr_asprintf(&err_msg,
@@ -1273,7 +1265,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
- grpc_fd *fd = data_ptr;
+ grpc_fd *fd = (grpc_fd *)data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
@@ -1307,10 +1299,10 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
ensure that it is held by the time the function returns */
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
GPR_TIMER_BEGIN("pollset_work", 0);
grpc_error *error = GRPC_ERROR_NONE;
- int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
+ int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline);
sigset_t new_mask;
@@ -1569,7 +1561,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
+ grpc_pollset_set *pss = (grpc_pollset_set *)gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifndef NDEBUG
@@ -1647,8 +1639,8 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
}
bool grpc_are_polling_islands_equal(void *p, void *q) {
- polling_island *p1 = p;
- polling_island *p2 = q;
+ polling_island *p1 = (polling_island *)p;
+ polling_island *p2 = (polling_island *)q;
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */
@@ -1669,34 +1661,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
@@ -1753,7 +1745,7 @@ const grpc_event_engine_vtable *grpc_init_epollsig_linux(
#else /* defined(GRPC_LINUX_EPOLL) */
#if defined(GRPC_POSIX_SOCKET)
-#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/ev_epollsig_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
const grpc_event_engine_vtable *grpc_init_epollsig_linux(
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.h b/src/core/lib/iomgr/ev_epollsig_linux.h
index 88328682b3..c04ff27400 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.h
+++ b/src/core/lib/iomgr/ev_epollsig_linux.h
@@ -22,6 +22,10 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/port.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
const grpc_event_engine_vtable *grpc_init_epollsig_linux(bool explicit_request);
#ifdef GRPC_LINUX_EPOLL
@@ -30,4 +34,8 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps);
bool grpc_are_polling_islands_equal(void *p, void *q);
#endif /* defined(GRPC_LINUX_EPOLL) */
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.cc
index fbd265f3ce..036a35690c 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -24,6 +24,7 @@
#include <assert.h>
#include <errno.h>
+#include <limits.h>
#include <poll.h>
#include <string.h>
#include <sys/socket.h>
@@ -37,12 +38,11 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
-#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/iomgr/wakeup_fd_cv.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/murmur_hash.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
@@ -50,7 +50,6 @@
/*******************************************************************************
* FD declarations
*/
-
typedef struct grpc_fd_watcher {
struct grpc_fd_watcher *next;
struct grpc_fd_watcher *prev;
@@ -200,8 +199,8 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now);
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis deadline);
/* Allow kick to wakeup the currently polling worker */
#define GRPC_POLLSET_CAN_KICK_SELF 1
@@ -209,7 +208,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
+static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT;
@@ -327,7 +326,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *r = gpr_malloc(sizeof(*r));
+ grpc_fd *r = (grpc_fd *)gpr_malloc(sizeof(*r));
gpr_mu_init(&r->mu);
gpr_atm_rel_store(&r->refst, 1);
r->shutdown = 0;
@@ -365,36 +364,39 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
return notifier;
}
-static grpc_error *pollset_kick_locked(grpc_fd_watcher *watcher) {
+static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx,
+ grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
- grpc_error *err = pollset_kick_ext(watcher->pollset, watcher->worker,
- GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+ grpc_error *err =
+ pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
+ GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
return err;
}
-static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
+static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
+ grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
- pollset_kick_locked(fd->inactive_watcher_root.next);
+ pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
} else if (fd->write_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
-static void wake_all_watchers_locked(grpc_fd *fd) {
+static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
- pollset_kick_locked(watcher);
+ pollset_kick_locked(exec_ctx, watcher);
}
if (fd->read_watcher) {
- pollset_kick_locked(fd->read_watcher);
+ pollset_kick_locked(exec_ctx, fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
- pollset_kick_locked(fd->write_watcher);
+ pollset_kick_locked(exec_ctx, fd->write_watcher);
}
}
@@ -435,7 +437,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
if (!has_watchers(fd)) {
close_fd_locked(exec_ctx, fd);
} else {
- wake_all_watchers_locked(fd);
+ wake_all_watchers_locked(exec_ctx, fd);
}
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
@@ -479,7 +481,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
@@ -648,7 +650,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
}
}
if (kick) {
- maybe_wake_one_watcher_locked(fd);
+ maybe_wake_one_watcher_locked(exec_ctx, fd);
}
if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
close_fd_locked(exec_ctx, fd);
@@ -712,11 +714,12 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error *pollset_kick_ext(grpc_pollset *p,
+static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
grpc_error *error = GRPC_ERROR_NONE;
+ GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */
if (specific_worker != NULL) {
@@ -782,9 +785,9 @@ static grpc_error *pollset_kick_ext(grpc_pollset *p,
return error;
}
-static grpc_error *pollset_kick(grpc_pollset *p,
+static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
- return pollset_kick_ext(p, specific_worker, 0);
+ return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
}
/* global state management */
@@ -842,12 +845,12 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->fd_count == pollset->fd_capacity) {
pollset->fd_capacity =
GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
- pollset->fds =
- gpr_realloc(pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
+ pollset->fds = (grpc_fd **)gpr_realloc(
+ pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
- pollset_kick(pollset, NULL);
+ pollset_kick(exec_ctx, pollset, NULL);
exit:
gpr_mu_unlock(&pollset->mu);
}
@@ -872,7 +875,7 @@ static void work_combine_error(grpc_error **composite, grpc_error *error) {
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
grpc_error *error = GRPC_ERROR_NONE;
@@ -895,7 +898,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker.wakeup_fd = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
- worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
+ worker.wakeup_fd =
+ (grpc_cached_wakeup_fd *)gpr_malloc(sizeof(*worker.wakeup_fd));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
@@ -940,7 +944,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd_watcher *watchers;
struct pollfd *pfds;
- timeout = poll_deadline_to_millis_timeout(deadline, now);
+ timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (pollset->fd_count + 2 <= inline_elements) {
pfds = pollfd_space;
@@ -950,8 +954,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2);
const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2);
void *buf = gpr_malloc(pfd_size + watch_size);
- pfds = buf;
- watchers = (void *)((char *)buf + pfd_size);
+ pfds = (struct pollfd *)buf;
+ watchers = (grpc_fd_watcher *)(void *)((char *)buf + pfd_size);
}
fd_count = 0;
@@ -986,7 +990,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GRPC_SCHEDULING_START_BLOCKING_REGION;
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
r = grpc_poll_function(pfds, pfd_count, timeout);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
+ }
if (r < 0) {
if (errno != EINTR) {
@@ -1008,6 +1016,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
+ }
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@@ -1015,6 +1026,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
+ if (GRPC_TRACER_ON(grpc_polling_trace)) {
+ gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
+ pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
+ (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
+ }
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@@ -1051,13 +1067,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (queued_work || worker.kicked_specifically) {
/* If there's queued work on the list, then set the deadline to be
immediate so we get back out of the polling loop quickly */
- deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+ deadline = 0;
}
keep_polling = 1;
}
- if (keep_polling) {
- now = gpr_now(now.clock_type);
- }
}
gpr_tls_set(&g_current_thread_poller, 0);
if (added_worker) {
@@ -1070,7 +1083,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* check shutdown conditions */
if (pollset->shutting_down) {
if (pollset_has_workers(pollset)) {
- pollset_kick(pollset, NULL);
+ pollset_kick(exec_ctx, pollset, NULL);
} else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
@@ -1099,7 +1112,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
pollset->shutdown_done = closure;
- pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset_has_workers(pollset)) {
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
}
@@ -1109,21 +1122,14 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
-static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
- return -1;
- }
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- return gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis deadline) {
+ if (deadline == GRPC_MILLIS_INF_FUTURE) return -1;
+ if (deadline == 0) return 0;
+ grpc_millis n = deadline - grpc_exec_ctx_now(exec_ctx);
+ if (n < 0) return 0;
+ if (n > INT_MAX) return -1;
+ return (int)n;
}
/*******************************************************************************
@@ -1131,7 +1137,8 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
*/
static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set = gpr_zalloc(sizeof(*pollset_set));
+ grpc_pollset_set *pollset_set =
+ (grpc_pollset_set *)gpr_zalloc(sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
@@ -1174,9 +1181,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets =
- gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
- sizeof(*pollset_set->pollsets));
+ pollset_set->pollsets = (grpc_pollset **)gpr_realloc(
+ pollset_set->pollsets,
+ pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
@@ -1225,9 +1232,9 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets =
- gpr_realloc(bag->pollset_sets,
- bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
+ bag->pollset_sets = (grpc_pollset_set **)gpr_realloc(
+ bag->pollset_sets,
+ bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
@@ -1264,7 +1271,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = gpr_realloc(
+ pollset_set->fds = (grpc_fd **)gpr_realloc(
pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
}
GRPC_FD_REF(fd, "pollset_set");
@@ -1318,11 +1325,12 @@ static void cache_insert_locked(poll_args *args) {
}
static void init_result(poll_args *pargs) {
- pargs->result = gpr_malloc(sizeof(poll_result));
+ pargs->result = (poll_result *)gpr_malloc(sizeof(poll_result));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = NULL;
pargs->result->watchcount = 0;
- pargs->result->fds = gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
+ pargs->result->fds =
+ (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
@@ -1361,7 +1369,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
return pargs;
}
- poll_args *pargs = gpr_malloc(sizeof(struct poll_args));
+ poll_args *pargs = (poll_args *)gpr_malloc(sizeof(struct poll_args));
gpr_cv_init(&pargs->trigger);
pargs->fds = fds;
pargs->nfds = count;
@@ -1408,7 +1416,8 @@ static void cache_poller_locked(poll_args *args) {
poll_args **old_active_pollers = poll_cache.active_pollers;
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
- poll_cache.active_pollers = gpr_malloc(sizeof(void *) * poll_cache.size);
+ poll_cache.active_pollers =
+ (poll_args **)gpr_malloc(sizeof(void *) * poll_cache.size);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@@ -1513,17 +1522,17 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
nfds_t nsockfds = 0;
poll_result *result = NULL;
gpr_mu_lock(&g_cvfds.mu);
- pollcv = gpr_malloc(sizeof(cv_node));
+ pollcv = (cv_node *)gpr_malloc(sizeof(cv_node));
pollcv->next = NULL;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
- cv_node *fd_cvs = gpr_malloc(nfds * sizeof(cv_node));
+ cv_node *fd_cvs = (cv_node *)gpr_malloc(nfds * sizeof(cv_node));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
- idx = FD_TO_IDX(fds[i].fd);
+ idx = GRPC_FD_TO_IDX(fds[i].fd);
fd_cvs[i].cv = &pollcv_cv;
fd_cvs[i].prev = NULL;
fd_cvs[i].next = g_cvfds.cvfds[idx].cvs;
@@ -1550,7 +1559,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
res = 0;
if (!skip_poll && nsockfds > 0) {
- struct pollfd *pollfds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ struct pollfd *pollfds =
+ (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * nsockfds);
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
@@ -1585,8 +1595,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd < 0 && (fds[i].events & POLLIN)) {
- remove_cvn(&g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
- if (g_cvfds.cvfds[FD_TO_IDX(fds[i].fd)].is_set) {
+ remove_cvn(&g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].cvs, &(fd_cvs[i]));
+ if (g_cvfds.cvfds[GRPC_FD_TO_IDX(fds[i].fd)].is_set) {
fds[i].revents = POLLIN;
if (res >= 0) res++;
}
@@ -1613,7 +1623,8 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
- g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+ g_cvfds.cvfds =
+ (fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@@ -1630,7 +1641,7 @@ static void global_cv_fd_table_init() {
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = NULL;
- poll_cache.active_pollers = gpr_malloc(sizeof(void *) * 32);
+ poll_cache.active_pollers = (poll_args **)gpr_malloc(sizeof(void *) * 32);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@@ -1670,34 +1681,34 @@ static void shutdown_engine(void) {
}
static const grpc_event_engine_vtable vtable = {
- .pollset_size = sizeof(grpc_pollset),
-
- .fd_create = fd_create,
- .fd_wrapped_fd = fd_wrapped_fd,
- .fd_orphan = fd_orphan,
- .fd_shutdown = fd_shutdown,
- .fd_is_shutdown = fd_is_shutdown,
- .fd_notify_on_read = fd_notify_on_read,
- .fd_notify_on_write = fd_notify_on_write,
- .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
-
- .pollset_init = pollset_init,
- .pollset_shutdown = pollset_shutdown,
- .pollset_destroy = pollset_destroy,
- .pollset_work = pollset_work,
- .pollset_kick = pollset_kick,
- .pollset_add_fd = pollset_add_fd,
-
- .pollset_set_create = pollset_set_create,
- .pollset_set_destroy = pollset_set_destroy,
- .pollset_set_add_pollset = pollset_set_add_pollset,
- .pollset_set_del_pollset = pollset_set_del_pollset,
- .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
- .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
- .pollset_set_add_fd = pollset_set_add_fd,
- .pollset_set_del_fd = pollset_set_del_fd,
-
- .shutdown_engine = shutdown_engine,
+ sizeof(grpc_pollset),
+
+ fd_create,
+ fd_wrapped_fd,
+ fd_orphan,
+ fd_shutdown,
+ fd_notify_on_read,
+ fd_notify_on_write,
+ fd_is_shutdown,
+ fd_get_read_notifier_pollset,
+
+ pollset_init,
+ pollset_shutdown,
+ pollset_destroy,
+ pollset_work,
+ pollset_kick,
+ pollset_add_fd,
+
+ pollset_set_create,
+ pollset_set_destroy,
+ pollset_set_add_pollset,
+ pollset_set_del_pollset,
+ pollset_set_add_pollset_set,
+ pollset_set_del_pollset_set,
+ pollset_set_add_fd,
+ pollset_set_del_fd,
+
+ shutdown_engine,
};
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {
diff --git a/src/core/lib/iomgr/ev_poll_posix.h b/src/core/lib/iomgr/ev_poll_posix.h
index d444e60944..861257204b 100644
--- a/src/core/lib/iomgr/ev_poll_posix.h
+++ b/src/core/lib/iomgr/ev_poll_posix.h
@@ -21,7 +21,15 @@
#include "src/core/lib/iomgr/ev_posix.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request);
const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.cc
index 424b40e425..677ee675a6 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -31,8 +31,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
-#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h"
-#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h"
#include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_epollsig_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h"
@@ -63,14 +61,40 @@ typedef struct {
event_engine_factory_fn factory;
} event_engine_factory;
+namespace {
+
+extern "C" {
+
+grpc_poll_function_type real_poll_function;
+
+int dummy_poll(struct pollfd fds[], nfds_t nfds, int timeout) {
+ if (timeout == 0) {
+ return real_poll_function(fds, nfds, 0);
+ } else {
+ gpr_log(GPR_ERROR, "Attempted a blocking poll when declared non-polling.");
+ GPR_ASSERT(false);
+ return -1;
+ }
+}
+} // extern "C"
+
+const grpc_event_engine_vtable *init_non_polling(bool explicit_request) {
+ if (!explicit_request) {
+ return nullptr;
+ }
+ // return the simplest engine as a dummy but also override the poller
+ auto ret = grpc_init_poll_posix(explicit_request);
+ real_poll_function = grpc_poll_function;
+ grpc_poll_function = dummy_poll;
+
+ return ret;
+}
+} // namespace
+
static const event_engine_factory g_factories[] = {
- {"epoll1", grpc_init_epoll1_linux},
- {"epollsig", grpc_init_epollsig_linux},
- {"epoll-threadpool", grpc_init_epoll_thread_pool_linux},
- {"epoll-limited", grpc_init_epoll_limited_pollers_linux},
- {"poll", grpc_init_poll_posix},
- {"poll-cv", grpc_init_poll_cv_posix},
- {"epollex", grpc_init_epollex_linux},
+ {"epollex", grpc_init_epollex_linux}, {"epoll1", grpc_init_epoll1_linux},
+ {"epollsig", grpc_init_epollsig_linux}, {"poll", grpc_init_poll_posix},
+ {"poll-cv", grpc_init_poll_cv_posix}, {"none", init_non_polling},
};
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
@@ -80,10 +104,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = gpr_malloc(len + 1);
+ s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}
@@ -209,14 +233,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline) {
- return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline);
+ grpc_pollset_worker **worker,
+ grpc_millis deadline) {
+ return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline);
}
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
- return g_event_engine->pollset_kick(pollset, specific_worker);
+ return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
}
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 1108e46ef8..bc4456c2a2 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -27,6 +27,10 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */
typedef struct grpc_fd grpc_fd;
@@ -52,9 +56,9 @@ typedef struct grpc_event_engine_vtable {
grpc_closure *closure);
void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline);
- grpc_error *(*pollset_kick)(grpc_pollset *pollset,
+ grpc_pollset_worker **worker,
+ grpc_millis deadline);
+ grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);
@@ -158,4 +162,8 @@ extern grpc_poll_function_type grpc_poll_function;
void grpc_set_event_engine_test_only(const grpc_event_engine_vtable *);
const grpc_event_engine_vtable *grpc_get_event_engine_test_only();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_POSIX_H */
diff --git a/src/core/lib/iomgr/ev_windows.c b/src/core/lib/iomgr/ev_windows.cc
index c24dfaeaf7..c24dfaeaf7 100644
--- a/src/core/lib/iomgr/ev_windows.c
+++ b/src/core/lib/iomgr/ev_windows.cc
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.cc
index 41c69add17..0394a00f3e 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -104,9 +104,78 @@ static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
}
-void grpc_exec_ctx_global_init(void) {}
+static gpr_timespec
+ g_start_time[GPR_TIMESPAN + 1]; // assumes GPR_TIMESPAN is the
+ // last enum value in
+ // gpr_clock_type
+
+void grpc_exec_ctx_global_init(void) {
+ for (int i = 0; i < GPR_TIMESPAN; i++) {
+ g_start_time[i] = gpr_now((gpr_clock_type)i);
+ }
+ // allows uniform treatment in conversion functions
+ g_start_time[GPR_TIMESPAN] = gpr_time_0(GPR_TIMESPAN);
+}
+
void grpc_exec_ctx_global_shutdown(void) {}
+static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
+ ts = gpr_time_sub(ts, g_start_time[ts.clock_type]);
+ double x =
+ GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
+ if (x < 0) return 0;
+ if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
+ return (gpr_atm)x;
+}
+
+static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
+ ts = gpr_time_sub(ts, g_start_time[ts.clock_type]);
+ double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
+ (double)ts.tv_nsec / GPR_NS_PER_MS +
+ (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
+ if (x < 0) return 0;
+ if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
+ return (gpr_atm)x;
+}
+
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) {
+ if (!exec_ctx->now_is_valid) {
+ exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
+ exec_ctx->now_is_valid = true;
+ }
+ return exec_ctx->now;
+}
+
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx) {
+ exec_ctx->now_is_valid = false;
+}
+
+gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
+ gpr_clock_type clock_type) {
+ // special-case infinities as grpc_millis can be 32bit on some platforms
+ // while gpr_time_from_millis always takes an int64_t.
+ if (millis == GRPC_MILLIS_INF_FUTURE) {
+ return gpr_inf_future(clock_type);
+ }
+ if (millis == GRPC_MILLIS_INF_PAST) {
+ return gpr_inf_past(clock_type);
+ }
+
+ if (clock_type == GPR_TIMESPAN) {
+ return gpr_time_from_millis(millis, GPR_TIMESPAN);
+ }
+ return gpr_time_add(g_start_time[clock_type],
+ gpr_time_from_millis(millis, GPR_TIMESPAN));
+}
+
+grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) {
+ return timespec_to_atm_round_down(ts);
+}
+
+grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) {
+ return timespec_to_atm_round_up(ts);
+}
+
static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
exec_ctx_run, exec_ctx_sched, "exec_ctx"};
static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index c89792c8c4..44b9be7aa9 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -19,10 +19,19 @@
#ifndef GRPC_CORE_LIB_IOMGR_EXEC_CTX_H
#define GRPC_CORE_LIB_IOMGR_EXEC_CTX_H
+#include <grpc/support/atm.h>
#include <grpc/support/cpu.h>
+
#include "src/core/lib/iomgr/closure.h"
-/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef gpr_atm grpc_millis;
+
+#define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX
+#define GRPC_MILLIS_INF_PAST GPR_ATM_MIN
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */
@@ -66,6 +75,9 @@ struct grpc_exec_ctx {
unsigned starting_cpu;
void *check_ready_to_finish_arg;
bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
+
+ bool now_is_valid;
+ grpc_millis now;
};
/* initializer for grpc_exec_ctx:
@@ -73,7 +85,7 @@ struct grpc_exec_ctx {
#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \
{ \
GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \
- finish_check_arg, finish_check \
+ finish_check_arg, finish_check, false, 0 \
}
/* initialize an execution context at the top level of an API call into grpc
@@ -106,4 +118,14 @@ void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_shutdown(void);
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx);
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx);
+gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
+grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
+grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
deleted file mode 100644
index 7621a7fe75..0000000000
--- a/src/core/lib/iomgr/executor.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/executor.h"
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/cpu.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/tls.h>
-#include <grpc/support/useful.h>
-
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/support/spinlock.h"
-
-#define MAX_DEPTH 2
-
-typedef struct {
- gpr_mu mu;
- gpr_cv cv;
- grpc_closure_list elems;
- size_t depth;
- bool shutdown;
- gpr_thd_id id;
-} thread_state;
-
-static thread_state *g_thread_state;
-static size_t g_max_threads;
-static gpr_atm g_cur_threads;
-static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
-
-GPR_TLS_DECL(g_this_thread_state);
-
-static void executor_thread(void *arg);
-
-static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
- size_t n = 0;
-
- grpc_closure *c = list.head;
- while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
-#ifndef NDEBUG
- c->scheduled = false;
-#endif
- c->cb(exec_ctx, c->cb_arg, error);
- GRPC_ERROR_UNREF(error);
- c = next;
- n++;
- }
-
- return n;
-}
-
-bool grpc_executor_is_threaded() {
- return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
-}
-
-void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
- gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
- if (threading) {
- if (cur_threads > 0) return;
- g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
- gpr_atm_no_barrier_store(&g_cur_threads, 1);
- gpr_tls_init(&g_this_thread_state);
- g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
- for (size_t i = 0; i < g_max_threads; i++) {
- gpr_mu_init(&g_thread_state[i].mu);
- gpr_cv_init(&g_thread_state[i].cv);
- g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
- }
-
- gpr_thd_options opt = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&opt);
- gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
- &opt);
- } else {
- if (cur_threads == 0) return;
- for (size_t i = 0; i < g_max_threads; i++) {
- gpr_mu_lock(&g_thread_state[i].mu);
- g_thread_state[i].shutdown = true;
- gpr_cv_signal(&g_thread_state[i].cv);
- gpr_mu_unlock(&g_thread_state[i].mu);
- }
- /* ensure no thread is adding a new thread... once this is past, then
- no thread will try to add a new one either (since shutdown is true) */
- gpr_spinlock_lock(&g_adding_thread_lock);
- gpr_spinlock_unlock(&g_adding_thread_lock);
- for (gpr_atm i = 0; i < g_cur_threads; i++) {
- gpr_thd_join(g_thread_state[i].id);
- }
- gpr_atm_no_barrier_store(&g_cur_threads, 0);
- for (size_t i = 0; i < g_max_threads; i++) {
- gpr_mu_destroy(&g_thread_state[i].mu);
- gpr_cv_destroy(&g_thread_state[i].cv);
- run_closures(exec_ctx, g_thread_state[i].elems);
- }
- gpr_free(g_thread_state);
- gpr_tls_destroy(&g_this_thread_state);
- }
-}
-
-void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
- gpr_atm_no_barrier_store(&g_cur_threads, 0);
- grpc_executor_set_threading(exec_ctx, true);
-}
-
-void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
- grpc_executor_set_threading(exec_ctx, false);
-}
-
-static void executor_thread(void *arg) {
- thread_state *ts = arg;
- gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
-
- grpc_exec_ctx exec_ctx =
- GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
-
- size_t subtract_depth = 0;
- for (;;) {
- gpr_mu_lock(&ts->mu);
- ts->depth -= subtract_depth;
- while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
- gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
- }
- if (ts->shutdown) {
- gpr_mu_unlock(&ts->mu);
- break;
- }
- grpc_closure_list exec = ts->elems;
- ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
- gpr_mu_unlock(&ts->mu);
-
- subtract_depth = run_closures(&exec_ctx, exec);
- grpc_exec_ctx_flush(&exec_ctx);
- }
- grpc_exec_ctx_finish(&exec_ctx);
-}
-
-static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
- size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- if (cur_thread_count == 0) {
- grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
- return;
- }
- thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
- if (ts == NULL) {
- ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
- }
- gpr_mu_lock(&ts->mu);
- if (grpc_closure_list_empty(ts->elems)) {
- gpr_cv_signal(&ts->cv);
- }
- grpc_closure_list_append(&ts->elems, closure, error);
- ts->depth++;
- bool try_new_thread = ts->depth > MAX_DEPTH &&
- cur_thread_count < g_max_threads && !ts->shutdown;
- gpr_mu_unlock(&ts->mu);
- if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
- cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
- if (cur_thread_count < g_max_threads) {
- gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
-
- gpr_thd_options opt = gpr_thd_options_default();
- gpr_thd_options_set_joinable(&opt);
- gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
- &g_thread_state[cur_thread_count], &opt);
- }
- gpr_spinlock_unlock(&g_adding_thread_lock);
- }
-}
-
-static const grpc_closure_scheduler_vtable executor_vtable = {
- executor_push, executor_push, "executor"};
-static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
-grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
new file mode 100644
index 0000000000..92c3e70301
--- /dev/null
+++ b/src/core/lib/iomgr/executor.cc
@@ -0,0 +1,302 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/executor.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/cpu.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/tls.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/debug/stats.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/support/spinlock.h"
+
+#define MAX_DEPTH 2
+
+typedef struct {
+ gpr_mu mu;
+ gpr_cv cv;
+ grpc_closure_list elems;
+ size_t depth;
+ bool shutdown;
+ bool queued_long_job;
+ gpr_thd_id id;
+} thread_state;
+
+static thread_state *g_thread_state;
+static size_t g_max_threads;
+static gpr_atm g_cur_threads;
+static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
+
+GPR_TLS_DECL(g_this_thread_state);
+
+static grpc_tracer_flag executor_trace =
+ GRPC_TRACER_INITIALIZER(false, "executor");
+
+static void executor_thread(void *arg);
+
+static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
+ size_t n = 0;
+
+ grpc_closure *c = list.head;
+ while (c != NULL) {
+ grpc_closure *next = c->next_data.next;
+ grpc_error *error = c->error_data.error;
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
+ c->file_created, c->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
+#endif
+ }
+#ifndef NDEBUG
+ c->scheduled = false;
+#endif
+ c->cb(exec_ctx, c->cb_arg, error);
+ GRPC_ERROR_UNREF(error);
+ c = next;
+ n++;
+ grpc_exec_ctx_flush(exec_ctx);
+ }
+
+ return n;
+}
+
+bool grpc_executor_is_threaded() {
+ return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
+}
+
+void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
+ gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
+ if (threading) {
+ if (cur_threads > 0) return;
+ g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
+ gpr_atm_no_barrier_store(&g_cur_threads, 1);
+ gpr_tls_init(&g_this_thread_state);
+ g_thread_state =
+ (thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads);
+ for (size_t i = 0; i < g_max_threads; i++) {
+ gpr_mu_init(&g_thread_state[i].mu);
+ gpr_cv_init(&g_thread_state[i].cv);
+ g_thread_state[i].elems = GRPC_CLOSURE_LIST_INIT;
+ }
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
+ &opt);
+ } else {
+ if (cur_threads == 0) return;
+ for (size_t i = 0; i < g_max_threads; i++) {
+ gpr_mu_lock(&g_thread_state[i].mu);
+ g_thread_state[i].shutdown = true;
+ gpr_cv_signal(&g_thread_state[i].cv);
+ gpr_mu_unlock(&g_thread_state[i].mu);
+ }
+ /* ensure no thread is adding a new thread... once this is past, then
+ no thread will try to add a new one either (since shutdown is true) */
+ gpr_spinlock_lock(&g_adding_thread_lock);
+ gpr_spinlock_unlock(&g_adding_thread_lock);
+ for (gpr_atm i = 0; i < g_cur_threads; i++) {
+ gpr_thd_join(g_thread_state[i].id);
+ }
+ gpr_atm_no_barrier_store(&g_cur_threads, 0);
+ for (size_t i = 0; i < g_max_threads; i++) {
+ gpr_mu_destroy(&g_thread_state[i].mu);
+ gpr_cv_destroy(&g_thread_state[i].cv);
+ run_closures(exec_ctx, g_thread_state[i].elems);
+ }
+ gpr_free(g_thread_state);
+ gpr_tls_destroy(&g_this_thread_state);
+ }
+}
+
+void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
+ grpc_register_tracer(&executor_trace);
+ gpr_atm_no_barrier_store(&g_cur_threads, 0);
+ grpc_executor_set_threading(exec_ctx, true);
+}
+
+void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
+ grpc_executor_set_threading(exec_ctx, false);
+}
+
+static void executor_thread(void *arg) {
+ thread_state *ts = (thread_state *)arg;
+ gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
+
+ grpc_exec_ctx exec_ctx =
+ GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
+
+ size_t subtract_depth = 0;
+ for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
+ (int)(ts - g_thread_state), subtract_depth);
+ }
+ gpr_mu_lock(&ts->mu);
+ ts->depth -= subtract_depth;
+ while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
+ ts->queued_long_job = false;
+ gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+ }
+ if (ts->shutdown) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
+ (int)(ts - g_thread_state));
+ }
+ gpr_mu_unlock(&ts->mu);
+ break;
+ }
+ GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
+ grpc_closure_list exec = ts->elems;
+ ts->elems = GRPC_CLOSURE_LIST_INIT;
+ gpr_mu_unlock(&ts->mu);
+ if (GRPC_TRACER_ON(executor_trace)) {
+ gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
+ }
+
+ grpc_exec_ctx_invalidate_now(&exec_ctx);
+ subtract_depth = run_closures(&exec_ctx, exec);
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
+static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error, bool is_short) {
+ bool retry_push;
+ if (is_short) {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
+ } else {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
+ }
+ do {
+ retry_push = false;
+ size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count == 0) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
+ closure, closure->file_created, closure->line_created);
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
+#endif
+ }
+ grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+ return;
+ }
+ thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
+ if (ts == NULL) {
+ ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+ } else {
+ GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
+ }
+ thread_state *orig_ts = ts;
+
+ bool try_new_thread;
+ for (;;) {
+ if (GRPC_TRACER_ON(executor_trace)) {
+#ifndef NDEBUG
+ gpr_log(
+ GPR_DEBUG,
+ "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
+ closure, is_short ? "short" : "long", closure->file_created,
+ closure->line_created, (int)(ts - g_thread_state));
+#else
+ gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
+ closure, is_short ? "short" : "long",
+ (int)(ts - g_thread_state));
+#endif
+ }
+ gpr_mu_lock(&ts->mu);
+ if (ts->queued_long_job) {
+ // if there's a long job queued, we never queue anything else to this
+ // queue (since long jobs can take 'infinite' time and we need to
+ // guarantee no starvation)
+ // ... spin through queues and try again
+ gpr_mu_unlock(&ts->mu);
+ size_t idx = (size_t)(ts - g_thread_state);
+ ts = &g_thread_state[(idx + 1) % cur_thread_count];
+ if (ts == orig_ts) {
+ retry_push = true;
+ try_new_thread = true;
+ break;
+ }
+ continue;
+ }
+ if (grpc_closure_list_empty(ts->elems)) {
+ GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
+ gpr_cv_signal(&ts->cv);
+ }
+ grpc_closure_list_append(&ts->elems, closure, error);
+ ts->depth++;
+ try_new_thread = ts->depth > MAX_DEPTH &&
+ cur_thread_count < g_max_threads && !ts->shutdown;
+ if (!is_short) ts->queued_long_job = true;
+ gpr_mu_unlock(&ts->mu);
+ break;
+ }
+ if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
+ cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
+ if (cur_thread_count < g_max_threads) {
+ gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
+
+ gpr_thd_options opt = gpr_thd_options_default();
+ gpr_thd_options_set_joinable(&opt);
+ gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
+ &g_thread_state[cur_thread_count], &opt);
+ }
+ gpr_spinlock_unlock(&g_adding_thread_lock);
+ }
+ if (retry_push) {
+ GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
+ }
+ } while (retry_push);
+}
+
+static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, true);
+}
+
+static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+ grpc_error *error) {
+ executor_push(exec_ctx, closure, error, false);
+}
+
+static const grpc_closure_scheduler_vtable executor_vtable_short = {
+ executor_push_short, executor_push_short, "executor"};
+static grpc_closure_scheduler executor_scheduler_short = {
+ &executor_vtable_short};
+
+static const grpc_closure_scheduler_vtable executor_vtable_long = {
+ executor_push_long, executor_push_long, "executor"};
+static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
+
+grpc_closure_scheduler *grpc_executor_scheduler(
+ grpc_executor_job_length length) {
+ return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
+ : &executor_scheduler_long;
+}
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index c3382a0a12..ef5ac56c83 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -21,6 +21,15 @@
#include "src/core/lib/iomgr/closure.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ GRPC_EXECUTOR_SHORT,
+ GRPC_EXECUTOR_LONG
+} grpc_executor_job_length;
+
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
@@ -28,7 +37,7 @@
* non-blocking solution available. */
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
-extern grpc_closure_scheduler *grpc_executor_scheduler;
+grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
@@ -40,4 +49,8 @@ bool grpc_executor_is_threaded();
grpc_executor_shutdown */
void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */
diff --git a/src/core/lib/iomgr/gethostname.h b/src/core/lib/iomgr/gethostname.h
index 9c6b9d8d42..f335fea586 100644
--- a/src/core/lib/iomgr/gethostname.h
+++ b/src/core/lib/iomgr/gethostname.h
@@ -19,8 +19,16 @@
#ifndef GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H
#define GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Returns the hostname of the local machine.
// Caller takes ownership of result.
char *grpc_gethostname();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H */
diff --git a/src/core/lib/iomgr/gethostname_fallback.c b/src/core/lib/iomgr/gethostname_fallback.cc
index 6229461568..e6f4c2f760 100644
--- a/src/core/lib/iomgr/gethostname_fallback.c
+++ b/src/core/lib/iomgr/gethostname_fallback.cc
@@ -16,6 +16,7 @@
*
*/
+#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/port.h"
#ifdef GRPC_GETHOSTNAME_FALLBACK
diff --git a/src/core/lib/iomgr/gethostname_host_name_max.c b/src/core/lib/iomgr/gethostname_host_name_max.cc
index 4d0511412e..cdaf097c3e 100644
--- a/src/core/lib/iomgr/gethostname_host_name_max.c
+++ b/src/core/lib/iomgr/gethostname_host_name_max.cc
@@ -16,6 +16,7 @@
*
*/
+#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/port.h"
#ifdef GRPC_POSIX_HOST_NAME_MAX
diff --git a/src/core/lib/iomgr/gethostname_sysconf.c b/src/core/lib/iomgr/gethostname_sysconf.cc
index 51bac5d69d..8441e0615e 100644
--- a/src/core/lib/iomgr/gethostname_sysconf.c
+++ b/src/core/lib/iomgr/gethostname_sysconf.cc
@@ -16,6 +16,7 @@
*
*/
+#include "src/core/lib/iomgr/gethostname.h"
#include "src/core/lib/iomgr/port.h"
#ifdef GRPC_POSIX_SYSCONF
diff --git a/src/core/lib/iomgr/iocp_windows.c b/src/core/lib/iomgr/iocp_windows.cc
index c082179c0b..78185cc084 100644
--- a/src/core/lib/iomgr/iocp_windows.c
+++ b/src/core/lib/iomgr/iocp_windows.cc
@@ -21,11 +21,13 @@
#ifdef GRPC_WINSOCK_SOCKET
#include <winsock2.h>
+#include <limits>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/log_windows.h>
#include <grpc/support/thd.h>
+#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iocp_windows.h"
@@ -40,25 +42,20 @@ static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
-static DWORD deadline_to_millis_timeout(gpr_timespec deadline,
- gpr_timespec now) {
- gpr_timespec timeout;
- static const int64_t max_spin_polling_us = 10;
- if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
+static DWORD deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+ grpc_millis deadline) {
+ if (deadline == GRPC_MILLIS_INF_FUTURE) {
return INFINITE;
}
- if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
- max_spin_polling_us,
- GPR_TIMESPAN))) <= 0) {
- return 0;
- }
- timeout = gpr_time_sub(deadline, now);
- return (DWORD)gpr_time_to_millis(gpr_time_add(
- timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ if (deadline < now) return 0;
+ grpc_millis timeout = deadline - now;
+ if (timeout > std::numeric_limits<DWORD>::max()) return INFINITE;
+ return static_cast<DWORD>(deadline - now);
}
grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
@@ -67,9 +64,10 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
- success = GetQueuedCompletionStatus(
- g_iocp, &bytes, &completion_key, &overlapped,
- deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type)));
+ success =
+ GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped,
+ deadline_to_millis_timeout(exec_ctx, deadline));
+ grpc_exec_ctx_invalidate_now(exec_ctx);
if (success == 0 && overlapped == NULL) {
return GRPC_IOCP_WORK_TIMEOUT;
}
@@ -121,7 +119,7 @@ void grpc_iocp_flush(void) {
grpc_iocp_work_status work_status;
do {
- work_status = grpc_iocp_work(&exec_ctx, gpr_inf_past(GPR_CLOCK_MONOTONIC));
+ work_status = grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_PAST);
} while (work_status == GRPC_IOCP_WORK_KICK ||
grpc_exec_ctx_flush(&exec_ctx));
}
@@ -129,7 +127,7 @@ void grpc_iocp_flush(void) {
void grpc_iocp_shutdown(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (gpr_atm_acq_load(&g_custom_events)) {
- grpc_iocp_work(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_FUTURE);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h
index 9c89e868c5..4efbc94645 100644
--- a/src/core/lib/iomgr/iocp_windows.h
+++ b/src/core/lib/iomgr/iocp_windows.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/socket_windows.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
GRPC_IOCP_WORK_WORK,
GRPC_IOCP_WORK_TIMEOUT,
@@ -30,11 +34,15 @@ typedef enum {
} grpc_iocp_work_status;
grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
- gpr_timespec deadline);
+ grpc_millis deadline);
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
void grpc_iocp_flush(void);
void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_IOCP_WINDOWS_H */
diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.cc
index 3d19953eeb..d6a5b4a76c 100644
--- a/src/core/lib/iomgr/iomgr.c
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -16,8 +16,11 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/iomgr/iomgr.h"
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -48,9 +51,9 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
gpr_cv_init(&g_rcv);
grpc_exec_ctx_global_init();
grpc_executor_init(exec_ctx);
- grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_list_init(exec_ctx);
g_root_object.next = g_root_object.prev = &g_root_object;
- g_root_object.name = "root";
+ g_root_object.name = (char *)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
}
@@ -95,8 +98,9 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
}
last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
}
- if (grpc_timer_check(exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL) ==
- GRPC_TIMERS_FIRED) {
+ exec_ctx->now_is_valid = true;
+ exec_ctx->now = GRPC_MILLIS_INF_FUTURE;
+ if (grpc_timer_check(exec_ctx, NULL) == GRPC_TIMERS_FIRED) {
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_flush(exec_ctx);
grpc_iomgr_platform_flush();
@@ -164,13 +168,7 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
bool grpc_iomgr_abort_on_leaks(void) {
char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS");
- if (env == NULL) return false;
- static const char *truthy[] = {"yes", "Yes", "YES", "true",
- "True", "TRUE", "1"};
- bool should_we = false;
- for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
- if (0 == strcmp(env, truthy[i])) should_we = true;
- }
+ bool should_we = gpr_is_true(env);
gpr_free(env);
return should_we;
}
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index e3cd6ebe79..6c0a08b918 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -22,6 +22,10 @@
#include <grpc/impl/codegen/exec_ctx_fwd.h>
#include "src/core/lib/iomgr/port.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Initializes the iomgr. */
void grpc_iomgr_init(grpc_exec_ctx *exec_ctx);
@@ -32,4 +36,8 @@ void grpc_iomgr_start(grpc_exec_ctx *exec_ctx);
* exec_ctx. */
void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_H */
diff --git a/src/core/lib/iomgr/iomgr_internal.h b/src/core/lib/iomgr/iomgr_internal.h
index 836d82515d..52db37c89a 100644
--- a/src/core/lib/iomgr/iomgr_internal.h
+++ b/src/core/lib/iomgr/iomgr_internal.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/iomgr.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_iomgr_object {
char *name;
struct grpc_iomgr_object *next;
@@ -40,4 +44,8 @@ void grpc_iomgr_platform_shutdown(void);
bool grpc_iomgr_abort_on_leaks(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/iomgr_posix.c b/src/core/lib/iomgr/iomgr_posix.cc
index f5875a247e..f5875a247e 100644
--- a/src/core/lib/iomgr/iomgr_posix.c
+++ b/src/core/lib/iomgr/iomgr_posix.cc
diff --git a/src/core/lib/iomgr/iomgr_uv.c b/src/core/lib/iomgr/iomgr_uv.cc
index df5d23af3b..df5d23af3b 100644
--- a/src/core/lib/iomgr/iomgr_uv.c
+++ b/src/core/lib/iomgr/iomgr_uv.cc
diff --git a/src/core/lib/iomgr/iomgr_uv.h b/src/core/lib/iomgr/iomgr_uv.h
index 3b4daaa73b..bc42ca8c1c 100644
--- a/src/core/lib/iomgr/iomgr_uv.h
+++ b/src/core/lib/iomgr/iomgr_uv.h
@@ -23,10 +23,18 @@
#include <grpc/support/thd.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* The thread ID of the thread on which grpc was initialized. Used to verify
* that all calls into libuv are made on that same thread */
extern gpr_thd_id g_init_thread;
+#ifdef __cplusplus
+}
+#endif
+
#ifdef GRPC_UV_THREAD_CHECK
#define GRPC_UV_ASSERT_SAME_THREAD() \
GPR_ASSERT(gpr_thd_currentid() == g_init_thread)
diff --git a/src/core/lib/iomgr/iomgr_windows.c b/src/core/lib/iomgr/iomgr_windows.cc
index 630370166d..630370166d 100644
--- a/src/core/lib/iomgr/iomgr_windows.c
+++ b/src/core/lib/iomgr/iomgr_windows.cc
diff --git a/src/core/lib/iomgr/is_epollexclusive_available.c b/src/core/lib/iomgr/is_epollexclusive_available.cc
index e8a7d4d52c..d08844c0df 100644
--- a/src/core/lib/iomgr/is_epollexclusive_available.c
+++ b/src/core/lib/iomgr/is_epollexclusive_available.cc
@@ -57,12 +57,12 @@ bool grpc_is_epollexclusive_available(void) {
close(fd);
return false;
}
- struct epoll_event ev = {
- /* choose events that should cause an error on
- EPOLLEXCLUSIVE enabled kernels - specifically the combination of
- EPOLLONESHOT and EPOLLEXCLUSIVE */
- .events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT),
- .data.ptr = NULL};
+ struct epoll_event ev;
+ /* choose events that should cause an error on
+ EPOLLEXCLUSIVE enabled kernels - specifically the combination of
+ EPOLLONESHOT and EPOLLEXCLUSIVE */
+ ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT);
+ ev.data.ptr = NULL;
if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) {
if (errno != EINVAL) {
if (!logged_why_not) {
diff --git a/src/core/lib/iomgr/is_epollexclusive_available.h b/src/core/lib/iomgr/is_epollexclusive_available.h
index 1d2e133a3b..9ae9c5c191 100644
--- a/src/core/lib/iomgr/is_epollexclusive_available.h
+++ b/src/core/lib/iomgr/is_epollexclusive_available.h
@@ -21,6 +21,14 @@
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
bool grpc_is_epollexclusive_available(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H */
diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.cc
index ba77a52afc..5cb4099ea4 100644
--- a/src/core/lib/iomgr/load_file.c
+++ b/src/core/lib/iomgr/load_file.cc
@@ -25,7 +25,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include "src/core/lib/support/block_annotate.h"
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/support/string.h"
grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
@@ -47,7 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
- contents = gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
+ contents = (unsigned char *)gpr_malloc(contents_size +
+ (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");
@@ -72,6 +73,6 @@ end:
GRPC_ERROR_UNREF(error);
error = error_out;
}
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
return error;
}
diff --git a/src/core/lib/iomgr/lockfree_event.c b/src/core/lib/iomgr/lockfree_event.cc
index f967b22ba9..f967b22ba9 100644
--- a/src/core/lib/iomgr/lockfree_event.c
+++ b/src/core/lib/iomgr/lockfree_event.cc
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index 6a14a0f3b2..02229e569e 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_lfev_init(gpr_atm *state);
void grpc_lfev_destroy(gpr_atm *state);
bool grpc_lfev_is_shutdown(gpr_atm *state);
@@ -37,4 +41,8 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
const char *variable);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */
diff --git a/src/core/lib/iomgr/network_status_tracker.c b/src/core/lib/iomgr/network_status_tracker.cc
index 4e5c1d5408..57a7faa9f1 100644
--- a/src/core/lib/iomgr/network_status_tracker.c
+++ b/src/core/lib/iomgr/network_status_tracker.cc
@@ -16,6 +16,7 @@
*
*/
+#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/endpoint.h"
void grpc_network_status_shutdown(void) {}
diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h
index c0295c1f74..cba38d4530 100644
--- a/src/core/lib/iomgr/network_status_tracker.h
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -20,6 +20,10 @@
#define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H
#include "src/core/lib/iomgr/endpoint.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_network_status_init(void);
void grpc_network_status_shutdown(void);
@@ -27,4 +31,8 @@ void grpc_network_status_register_endpoint(grpc_endpoint *ep);
void grpc_network_status_unregister_endpoint(grpc_endpoint *ep);
void grpc_network_status_shutdown_all_endpoints();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */
diff --git a/src/core/lib/iomgr/polling_entity.c b/src/core/lib/iomgr/polling_entity.cc
index 74d8794af5..8591a5518e 100644
--- a/src/core/lib/iomgr/polling_entity.c
+++ b/src/core/lib/iomgr/polling_entity.cc
@@ -25,7 +25,7 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
grpc_pollset_set *pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
- pollent.tag = POPS_POLLSET_SET;
+ pollent.tag = GRPC_POLLS_POLLSET_SET;
return pollent;
}
@@ -33,12 +33,12 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset(
grpc_pollset *pollset) {
grpc_polling_entity pollent;
pollent.pollent.pollset = pollset;
- pollent.tag = POPS_POLLSET;
+ pollent.tag = GRPC_POLLS_POLLSET;
return pollent;
}
grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
return pollent->pollent.pollset;
}
return NULL;
@@ -46,23 +46,23 @@ grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
grpc_pollset_set *grpc_polling_entity_pollset_set(
grpc_polling_entity *pollent) {
- if (pollent->tag == POPS_POLLSET_SET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
return pollent->pollent.pollset_set;
}
return NULL;
}
bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) {
- return pollent->tag == POPS_NONE;
+ return pollent->tag == GRPC_POLLS_NONE;
}
void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
- } else if (pollent->tag == POPS_POLLSET_SET) {
+ } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
@@ -75,10 +75,10 @@ void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst) {
- if (pollent->tag == POPS_POLLSET) {
+ if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
- } else if (pollent->tag == POPS_POLLSET_SET) {
+ } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
GPR_ASSERT(pollent->pollent.pollset_set != NULL);
grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
pollent->pollent.pollset_set);
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index 971fd88b42..009f968fac 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -22,6 +22,16 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum grpc_pollset_tag {
+ GRPC_POLLS_NONE,
+ GRPC_POLLS_POLLSET,
+ GRPC_POLLS_POLLSET_SET
+} grpc_pollset_tag;
+
/* A grpc_polling_entity is a pollset-or-pollset_set container. It allows
* functions that accept a pollset XOR a pollset_set to do so through an
* abstract interface. No ownership is taken. */
@@ -31,7 +41,7 @@ typedef struct grpc_polling_entity {
grpc_pollset *pollset;
grpc_pollset_set *pollset_set;
} pollent;
- enum pops_tag { POPS_NONE, POPS_POLLSET, POPS_POLLSET_SET } tag;
+ grpc_pollset_tag tag;
} grpc_polling_entity;
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
@@ -58,4 +68,8 @@ void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_polling_entity *pollent,
grpc_pollset_set *pss_dst);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H */
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index a609a3877a..799fae154c 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_fd_refcount;
#endif
@@ -71,13 +75,17 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
pollset
lock */
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline) GRPC_MUST_USE_RESULT;
+ grpc_pollset_worker **worker,
+ grpc_millis deadline) GRPC_MUST_USE_RESULT;
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker)
GRPC_MUST_USE_RESULT;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_H */
diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h
index 29c0f03561..5455eda02f 100644
--- a/src/core/lib/iomgr/pollset_set.h
+++ b/src/core/lib/iomgr/pollset_set.h
@@ -21,6 +21,10 @@
#include "src/core/lib/iomgr/pollset.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A grpc_pollset_set is a set of pollsets that are interested in an
action. Adding a pollset to a pollset_set automatically adds any
fd's (etc) that have been registered with the set_set to that pollset.
@@ -44,4 +48,8 @@ void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_H */
diff --git a/src/core/lib/iomgr/pollset_set_uv.c b/src/core/lib/iomgr/pollset_set_uv.cc
index 90186edbb7..90186edbb7 100644
--- a/src/core/lib/iomgr/pollset_set_uv.c
+++ b/src/core/lib/iomgr/pollset_set_uv.cc
diff --git a/src/core/lib/iomgr/pollset_set_windows.c b/src/core/lib/iomgr/pollset_set_windows.cc
index 2105a47ad4..2105a47ad4 100644
--- a/src/core/lib/iomgr/pollset_set_windows.c
+++ b/src/core/lib/iomgr/pollset_set_windows.cc
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.cc
index a79fe89d3e..b9901bf8ef 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -65,7 +65,7 @@ void dummy_handle_close_cb(uv_handle_t *handle) { gpr_free(handle); }
void grpc_pollset_global_init(void) {
gpr_mu_init(&grpc_polling_mu);
- dummy_uv_handle = gpr_malloc(sizeof(uv_timer_t));
+ dummy_uv_handle = (uv_timer_t *)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), dummy_uv_handle);
grpc_pollset_work_run_loop = 1;
}
@@ -116,13 +116,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
uint64_t timeout;
GRPC_UV_ASSERT_SAME_THREAD();
gpr_mu_unlock(&grpc_polling_mu);
if (grpc_pollset_work_run_loop) {
- if (gpr_time_cmp(deadline, now) >= 0) {
- timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ if (deadline >= now) {
+ timeout = deadline - now;
} else {
timeout = 0;
}
@@ -145,7 +146,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
diff --git a/src/core/lib/iomgr/pollset_uv.h b/src/core/lib/iomgr/pollset_uv.h
index 566c110ca6..5cc9faf4ff 100644
--- a/src/core/lib/iomgr/pollset_uv.h
+++ b/src/core/lib/iomgr/pollset_uv.h
@@ -19,9 +19,17 @@
#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
#define GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern int grpc_pollset_work_run_loop;
void grpc_pollset_global_init(void);
void grpc_pollset_global_shutdown(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_UV_H */
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.cc
index ea017a6054..01aff02c36 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -98,7 +98,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
pollset->shutting_down = 1;
- grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
+ grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
@@ -110,7 +110,7 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {}
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
- gpr_timespec now, gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
@@ -159,9 +159,12 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
&worker);
added_worker = 1;
while (!worker.kicked) {
- if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, deadline)) {
+ if (gpr_cv_wait(&worker.cv, &grpc_polling_mu,
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
+ grpc_exec_ctx_invalidate_now(exec_ctx);
break;
}
+ grpc_exec_ctx_invalidate_now(exec_ctx);
}
} else {
pollset->kicked_without_pollers = 0;
@@ -181,7 +184,7 @@ done:
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_pollset *p,
+grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@@ -209,7 +212,7 @@ grpc_error *grpc_pollset_kick(grpc_pollset *p,
specific_worker =
pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) {
- grpc_pollset_kick(p, specific_worker);
+ grpc_pollset_kick(exec_ctx, p, specific_worker);
} else if (p->is_iocp_worker) {
grpc_iocp_kick();
} else {
diff --git a/src/core/lib/iomgr/pollset_windows.h b/src/core/lib/iomgr/pollset_windows.h
index 71878c3d30..2479b25286 100644
--- a/src/core/lib/iomgr/pollset_windows.h
+++ b/src/core/lib/iomgr/pollset_windows.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/socket_windows.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. A Windows "pollset" is merely a mutex
used to synchronize with the IOCP, and workers are condition variables
@@ -60,4 +64,8 @@ struct grpc_pollset {
void grpc_pollset_global_init(void);
void grpc_pollset_global_shutdown(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_WINDOWS_H */
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index 42033d0ba4..1cc6d98491 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -109,6 +109,16 @@
#define GRPC_POSIX_SOCKETUTILS 1
#define GRPC_POSIX_WAKEUP_FD 1
#define GRPC_TIMER_USE_GENERIC 1
+#elif defined(GPR_OPENBSD)
+#define GRPC_HAVE_IFADDRS 1
+#define GRPC_HAVE_IPV6_RECVPKTINFO 1
+#define GRPC_HAVE_UNIX_SOCKET 1
+#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
+#define GRPC_POSIX_SOCKET 1
+#define GRPC_POSIX_SOCKETADDR 1
+#define GRPC_POSIX_SOCKETUTILS 1
+#define GRPC_POSIX_WAKEUP_FD 1
+#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_NACL)
#define GRPC_HAVE_ARPA_NAMESER 1
#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index fe1dd78576..5f0634299e 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -25,6 +25,10 @@
#define GRPC_MAX_SOCKADDR_SIZE 128
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
char addr[GRPC_MAX_SOCKADDR_SIZE];
size_t len;
@@ -52,4 +56,8 @@ extern grpc_error *(*grpc_blocking_resolve_address)(
const char *name, const char *default_port,
grpc_resolved_addresses **addresses);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_H */
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.cc
index 35dedc23de..1b783495df 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -33,10 +33,10 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
static grpc_error *blocking_resolve_address_impl(
@@ -81,16 +81,16 @@ static grpc_error *blocking_resolve_address_impl(
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
if (s != 0) {
/* Retry if well-known service name is recognized */
- char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, svc[i][1], &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
break;
}
}
@@ -112,13 +112,14 @@ static grpc_error *blocking_resolve_address_impl(
}
/* Success path: set addrs non-NULL, fill it in */
- *addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
+ *addresses =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs =
- gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@@ -153,7 +154,7 @@ typedef struct {
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
- request *r = rp;
+ request *r = (request *)rp;
GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
@@ -174,9 +175,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
- request *r = gpr_malloc(sizeof(request));
+ request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.cc
index 2d438e8b48..4f7f234877 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.cc
@@ -49,11 +49,12 @@ static int retry_named_port_failure(int status, request *r,
uv_getaddrinfo_cb getaddrinfo_cb) {
if (status != 0) {
// This loop is copied from resolve_address_posix.c
- char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(r->port, svc[i][0]) == 0) {
int retry_status;
- uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t));
+ uv_getaddrinfo_t *req =
+ (uv_getaddrinfo_t *)gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r;
r->port = gpr_strdup(svc[i][1]);
retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb,
@@ -85,13 +86,14 @@ static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
grpc_slice_from_static_string(uv_strerror(status)));
return error;
}
- (*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses));
+ (*addresses) =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs =
- gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@@ -174,6 +176,7 @@ static grpc_error *blocking_resolve_address_impl(
int s;
grpc_error *err;
int retry_status;
+ request r;
GRPC_UV_ASSERT_SAME_THREAD();
@@ -191,8 +194,10 @@ static grpc_error *blocking_resolve_address_impl(
hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */
s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints);
- request r = {
- .addresses = addresses, .hints = &hints, .host = host, .port = port};
+ r.addresses = addresses;
+ r.hints = &hints;
+ r.host = host;
+ r.port = port;
retry_status = retry_named_port_failure(s, &r, NULL);
if (retry_status <= 0) {
s = retry_status;
@@ -239,16 +244,16 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
gpr_free(port);
return;
}
- r = gpr_malloc(sizeof(request));
+ r = (request *)gpr_malloc(sizeof(request));
r->on_done = on_done;
r->addresses = addrs;
r->host = host;
r->port = port;
- req = gpr_malloc(sizeof(uv_getaddrinfo_t));
+ req = (uv_getaddrinfo_t *)gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r;
/* Call getaddrinfo */
- hints = gpr_malloc(sizeof(struct addrinfo));
+ hints = (addrinfo *)gpr_malloc(sizeof(struct addrinfo));
memset(hints, 0, sizeof(struct addrinfo));
hints->ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
hints->ai_socktype = SOCK_STREAM; /* stream socket */
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.cc
index 45cfd7248d..451f01a701 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -23,6 +23,7 @@
#include "src/core/lib/iomgr/resolve_address.h"
+#include <inttypes.h>
#include <string.h>
#include <sys/types.h>
@@ -33,10 +34,10 @@
#include <grpc/support/string_util.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
typedef struct {
@@ -86,20 +87,21 @@ static grpc_error *blocking_resolve_address_impl(
GRPC_SCHEDULING_START_BLOCKING_REGION;
s = getaddrinfo(host, port, &hints, &result);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
+ GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
if (s != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getaddrinfo");
goto done;
}
/* Success path: set addrs non-NULL, fill it in */
- (*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses));
+ (*addresses) =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs =
- gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@@ -132,7 +134,7 @@ grpc_error *(*grpc_blocking_resolve_address)(
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
- request *r = rp;
+ request *r = (request *)rp;
if (error == GRPC_ERROR_NONE) {
error =
grpc_blocking_resolve_address(r->name, r->default_port, r->addresses);
@@ -157,9 +159,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addresses) {
- request *r = gpr_malloc(sizeof(request));
+ request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
- grpc_executor_scheduler);
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.cc
index a31d9eef93..ecb5747da8 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -18,10 +18,12 @@
#include "src/core/lib/iomgr/resource_quota.h"
+#include <inttypes.h>
#include <limits.h>
#include <stdint.h>
#include <string.h>
+#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -87,6 +89,8 @@ struct grpc_resource_user {
grpc_closure_list on_allocated;
/* True if we are currently trying to allocate from the quota, false if not */
bool allocating;
+ /* How many bytes of allocations are outstanding */
+ int64_t outstanding_allocations;
/* True if we are currently trying to add ourselves to the non-free quota
list, false otherwise */
bool added_to_free_pool;
@@ -151,6 +155,9 @@ struct grpc_resource_quota {
char *name;
};
+static void ru_unref_by(grpc_exec_ctx *exec_ctx,
+ grpc_resource_user *resource_user, gpr_atm amount);
+
/*******************************************************************************
* list management
*/
@@ -241,7 +248,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive);
static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
- grpc_resource_quota *resource_quota = rq;
+ grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->step_scheduled = false;
do {
if (rq_alloc(exec_ctx, resource_quota)) goto done;
@@ -287,6 +294,25 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
+ if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ gpr_log(GPR_DEBUG, "RQ: check allocation for user %p shutdown=%" PRIdPTR
+ " free_pool=%" PRId64,
+ resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
+ resource_user->free_pool);
+ }
+ if (gpr_atm_no_barrier_load(&resource_user->shutdown)) {
+ resource_user->allocating = false;
+ grpc_closure_list_fail_all(
+ &resource_user->on_allocated,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resource user shutdown"));
+ int64_t aborted_allocations = resource_user->outstanding_allocations;
+ resource_user->outstanding_allocations = 0;
+ resource_user->free_pool += aborted_allocations;
+ GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
+ gpr_mu_unlock(&resource_user->mu);
+ ru_unref_by(exec_ctx, resource_user, (gpr_atm)aborted_allocations);
+ continue;
+ }
if (resource_user->free_pool < 0 &&
-resource_user->free_pool <= resource_quota->free_pool) {
int64_t amt = -resource_user->free_pool;
@@ -306,6 +332,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
}
if (resource_user->free_pool >= 0) {
resource_user->allocating = false;
+ resource_user->outstanding_allocations = 0;
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
gpr_mu_unlock(&resource_user->mu);
} else {
@@ -380,12 +407,12 @@ typedef struct {
} ru_slice_refcount;
static void ru_slice_ref(void *p) {
- ru_slice_refcount *rc = p;
+ ru_slice_refcount *rc = (ru_slice_refcount *)p;
gpr_ref(&rc->refs);
}
static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- ru_slice_refcount *rc = p;
+ ru_slice_refcount *rc = (ru_slice_refcount *)p;
if (gpr_unref(&rc->refs)) {
grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
gpr_free(rc);
@@ -398,7 +425,8 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = {
static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
size_t size) {
- ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
+ ru_slice_refcount *rc =
+ (ru_slice_refcount *)gpr_malloc(sizeof(ru_slice_refcount) + size);
rc->base.vtable = &ru_slice_vtable;
rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
@@ -417,7 +445,7 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
*/
static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(exec_ctx, resource_user->resource_quota);
@@ -427,7 +455,7 @@ static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@@ -454,7 +482,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -469,7 +497,7 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -485,7 +513,10 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
}
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
+ gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
+ }
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
@@ -494,10 +525,13 @@ static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
resource_user->reclaimers[1] = NULL;
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
+ if (resource_user->allocating) {
+ rq_step_sched(exec_ctx, resource_user->resource_quota);
+ }
}
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = ru;
+ grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
@@ -518,7 +552,8 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_resource_user_slice_allocator *slice_allocator = arg;
+ grpc_resource_user_slice_allocator *slice_allocator =
+ (grpc_resource_user_slice_allocator *)arg;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
@@ -541,7 +576,7 @@ typedef struct {
} rq_resize_args;
static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
- rq_resize_args *a = args;
+ rq_resize_args *a = (rq_resize_args *)args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
@@ -553,7 +588,7 @@ static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_error *error) {
- grpc_resource_quota *resource_quota = rq;
+ grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->reclaiming = false;
rq_step_sched(exec_ctx, resource_quota);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -565,7 +600,8 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
/* Public API */
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
- grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
+ grpc_resource_quota *resource_quota =
+ (grpc_resource_quota *)gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
@@ -629,7 +665,7 @@ double grpc_resource_quota_get_memory_pressure(
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- rq_resize_args *a = gpr_malloc(sizeof(*a));
+ rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
@@ -653,7 +689,7 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
@@ -663,12 +699,12 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
}
static void *rq_copy(void *rq) {
- grpc_resource_quota_ref(rq);
+ grpc_resource_quota_ref((grpc_resource_quota *)rq);
return rq;
}
static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) {
- grpc_resource_quota_unref_internal(exec_ctx, rq);
+ grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq);
}
static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
@@ -684,7 +720,8 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota *resource_quota, const char *name) {
- grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
+ grpc_resource_user *resource_user =
+ (grpc_resource_user *)gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
@@ -712,6 +749,7 @@ grpc_resource_user *grpc_resource_user_create(
resource_user->reclaimers[1] = NULL;
resource_user->new_reclaimers[0] = NULL;
resource_user->new_reclaimers[1] = NULL;
+ resource_user->outstanding_allocations = 0;
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_user->links[i].next = resource_user->links[i].prev = NULL;
}
@@ -772,6 +810,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&resource_user->mu);
ru_ref_by(resource_user, (gpr_atm)size);
resource_user->free_pool -= (int64_t)size;
+ resource_user->outstanding_allocations += (int64_t)size;
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size,
@@ -786,6 +825,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
} else {
+ resource_user->outstanding_allocations -= (int64_t)size;
GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
}
gpr_mu_unlock(&resource_user->mu);
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index d66f9ae774..1d4249b7e2 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -24,6 +24,10 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** \file Tracks resource usage against a pool.
The current implementation tracks only memory usage, but in the future
@@ -150,4 +154,8 @@ grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
size_t size);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/sockaddr_utils.c b/src/core/lib/iomgr/sockaddr_utils.cc
index 3f4145d104..8a2e6ed89b 100644
--- a/src/core/lib/iomgr/sockaddr_utils.c
+++ b/src/core/lib/iomgr/sockaddr_utils.cc
@@ -19,6 +19,7 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include <errno.h>
+#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h
index a589a19705..1fd552febb 100644
--- a/src/core/lib/iomgr/sockaddr_utils.h
+++ b/src/core/lib/iomgr/sockaddr_utils.h
@@ -21,6 +21,10 @@
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Returns true if addr is an IPv4-mapped IPv6 address within the
::ffff:0.0.0.0/96 range, or false otherwise.
@@ -77,4 +81,8 @@ const char *grpc_sockaddr_get_uri_scheme(const grpc_resolved_address *addr);
int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */
diff --git a/src/core/lib/iomgr/socket_factory_posix.c b/src/core/lib/iomgr/socket_factory_posix.cc
index 0f82dea570..8e907703ae 100644
--- a/src/core/lib/iomgr/socket_factory_posix.c
+++ b/src/core/lib/iomgr/socket_factory_posix.cc
@@ -69,11 +69,11 @@ void grpc_socket_factory_unref(grpc_socket_factory *factory) {
}
static void *socket_factory_arg_copy(void *p) {
- return grpc_socket_factory_ref(p);
+ return grpc_socket_factory_ref((grpc_socket_factory *)p);
}
static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_factory_unref(p);
+ grpc_socket_factory_unref((grpc_socket_factory *)p);
}
static int socket_factory_cmp(void *a, void *b) {
@@ -85,8 +85,8 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_FACTORY, factory,
- &socket_factory_arg_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY,
+ factory, &socket_factory_arg_vtable);
}
#endif
diff --git a/src/core/lib/iomgr/socket_mutator.c b/src/core/lib/iomgr/socket_mutator.cc
index 5d6c2c400e..b0435d5a07 100644
--- a/src/core/lib/iomgr/socket_mutator.c
+++ b/src/core/lib/iomgr/socket_mutator.cc
@@ -60,11 +60,11 @@ void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
}
static void *socket_mutator_arg_copy(void *p) {
- return grpc_socket_mutator_ref(p);
+ return grpc_socket_mutator_ref((grpc_socket_mutator *)p);
}
static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_mutator_unref(p);
+ grpc_socket_mutator_unref((grpc_socket_mutator *)p);
}
static int socket_mutator_cmp(void *a, void *b) {
@@ -76,6 +76,6 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SOCKET_MUTATOR, mutator,
- &socket_mutator_arg_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR,
+ mutator, &socket_mutator_arg_vtable);
}
diff --git a/src/core/lib/iomgr/socket_utils.h b/src/core/lib/iomgr/socket_utils.h
index 03fe46e5e9..d6c538ec6f 100644
--- a/src/core/lib/iomgr/socket_utils.h
+++ b/src/core/lib/iomgr/socket_utils.h
@@ -21,7 +21,15 @@
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.cc
index b8e2a0cdfd..b8e2a0cdfd 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.cc
diff --git a/src/core/lib/iomgr/socket_utils_linux.c b/src/core/lib/iomgr/socket_utils_linux.cc
index e7b094d216..e7b094d216 100644
--- a/src/core/lib/iomgr/socket_utils_linux.c
+++ b/src/core/lib/iomgr/socket_utils_linux.cc
diff --git a/src/core/lib/iomgr/socket_utils_posix.c b/src/core/lib/iomgr/socket_utils_posix.cc
index dfd1ffd1e3..dfd1ffd1e3 100644
--- a/src/core/lib/iomgr/socket_utils_posix.c
+++ b/src/core/lib/iomgr/socket_utils_posix.cc
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index eef80b439e..73809b68d3 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -29,6 +29,10 @@
#include "src/core/lib/iomgr/socket_factory_posix.h"
#include "src/core/lib/iomgr/socket_mutator.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* a wrapper for accept or accept4 */
int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
int cloexec);
@@ -129,4 +133,8 @@ grpc_error *grpc_create_dualstack_socket_using_factory(
grpc_socket_factory *factory, const grpc_resolved_address *addr, int type,
int protocol, grpc_dualstack_mode *dsmode, int *newfd);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H */
diff --git a/src/core/lib/iomgr/socket_utils_uv.c b/src/core/lib/iomgr/socket_utils_uv.cc
index 0f7de4dfad..0f7de4dfad 100644
--- a/src/core/lib/iomgr/socket_utils_uv.c
+++ b/src/core/lib/iomgr/socket_utils_uv.cc
diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.cc
index 2732c159aa..6e85e4b61f 100644
--- a/src/core/lib/iomgr/socket_utils_windows.c
+++ b/src/core/lib/iomgr/socket_utils_windows.cc
@@ -26,12 +26,8 @@
#include <grpc/support/log.h>
const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
-#ifdef GPR_WIN_INET_NTOP
- return inet_ntop(af, src, dst, size);
-#else
/* Windows InetNtopA wants a mutable ip pointer */
return InetNtopA(af, (void *)src, dst, size);
-#endif /* GPR_WIN_INET_NTOP */
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.cc
index a0d731b942..8c7f7cf683 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.cc
@@ -38,7 +38,7 @@
grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) {
char *final_name;
- grpc_winsocket *r = gpr_malloc(sizeof(grpc_winsocket));
+ grpc_winsocket *r = (grpc_winsocket *)gpr_malloc(sizeof(grpc_winsocket));
memset(r, 0, sizeof(grpc_winsocket));
r->socket = socket;
gpr_mu_init(&r->state_mu);
diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h
index 67dc4ca53e..84fa071e89 100644
--- a/src/core/lib/iomgr/socket_windows.h
+++ b/src/core/lib/iomgr/socket_windows.h
@@ -28,6 +28,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* This holds the data for an outstanding read or write on a socket.
The mutex to protect the concurrent access to that data is the one
inside the winsocket wrapper. */
@@ -107,4 +111,8 @@ void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx,
grpc_winsocket *winsocket,
grpc_winsocket_callback_info *ci);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index 6c9e51ae84..b2f365f2af 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Asynchronously connect to an address (specified as (addr, len)), and call
cb with arg and the completed connection when done (or call cb with arg and
NULL on failure).
@@ -35,6 +39,10 @@ void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline);
+ grpc_millis deadline);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.cc
index a25fba4527..5611dd9062 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -48,7 +48,6 @@ extern grpc_tracer_flag grpc_tcp_trace;
typedef struct {
gpr_mu mu;
grpc_fd *fd;
- gpr_timespec deadline;
grpc_timer alarm;
grpc_closure on_alarm;
int refs;
@@ -80,7 +79,8 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
- grpc_socket_mutator *mutator = channel_args->args[i].value.pointer.p;
+ grpc_socket_mutator *mutator =
+ (grpc_socket_mutator *)channel_args->args[i].value.pointer.p;
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
@@ -98,7 +98,7 @@ done:
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
int done;
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
@@ -126,7 +126,7 @@ grpc_endpoint *grpc_tcp_client_create_from_fd(
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
int so_error = 0;
socklen_t so_error_size;
int err;
@@ -243,7 +243,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
@@ -304,7 +304,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
- ac = gpr_malloc(sizeof(async_connect));
+ ac = (async_connect *)gpr_malloc(sizeof(async_connect));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;
@@ -324,9 +324,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&ac->mu);
GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &ac->alarm,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);
@@ -336,18 +334,20 @@ done:
}
// overridden by api_fuzzer.c
+extern "C" {
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_millis deadline) = tcp_client_connect_impl;
+}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
index b5a3814799..8740511804 100644
--- a/src/core/lib/iomgr/tcp_client_posix.h
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -23,8 +23,16 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/tcp_client.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_endpoint *grpc_tcp_client_create_from_fd(
grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
const char *addr_str);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.cc
index 786c456b73..f3e9366299 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.cc
@@ -58,7 +58,7 @@ static void tcp_close_callback(uv_handle_t *handle) { gpr_free(handle); }
static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
grpc_error *error) {
int done;
- grpc_uv_tcp_connect *connect = acp;
+ grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
@@ -77,7 +77,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
}
static void uv_tc_on_connect(uv_connect_t *req, int status) {
- grpc_uv_tcp_connect *connect = req->data;
+ grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)req->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error *error = GRPC_ERROR_NONE;
int done;
@@ -119,7 +119,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *resolved_addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_uv_tcp_connect *connect;
grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
(void)channel_args;
@@ -132,20 +132,20 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
- connect = gpr_zalloc(sizeof(grpc_uv_tcp_connect));
+ connect = (grpc_uv_tcp_connect *)gpr_zalloc(sizeof(grpc_uv_tcp_connect));
connect->closure = closure;
connect->endpoint = ep;
- connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
+ connect->tcp_handle = (uv_tcp_t *)gpr_malloc(sizeof(uv_tcp_t));
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
- connect->refs = 1;
+ connect->refs = 2; // One for the connect operation, one for the timer.
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
@@ -158,24 +158,24 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
uv_tc_on_connect);
GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &connect->alarm,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &connect->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);
}
// overridden by api_fuzzer.c
+extern "C" {
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_millis deadline) = tcp_client_connect_impl;
+}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.cc
index fc62105cc9..9adf7ee4e9 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <inttypes.h>
+
#ifdef GRPC_WINSOCK_SOCKET
#include "src/core/lib/iomgr/sockaddr_windows.h"
@@ -41,7 +43,6 @@ typedef struct {
grpc_closure *on_done;
gpr_mu mu;
grpc_winsocket *socket;
- gpr_timespec deadline;
grpc_timer alarm;
grpc_closure on_alarm;
char *addr_name;
@@ -66,7 +67,7 @@ static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
gpr_mu_lock(&ac->mu);
grpc_winsocket *socket = ac->socket;
ac->socket = NULL;
@@ -77,7 +78,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
}
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = acp;
+ async_connect *ac = (async_connect *)acp;
grpc_endpoint **ep = ac->endpoint;
GPR_ASSERT(*ep == NULL);
grpc_closure *on_done = ac->on_done;
@@ -124,7 +125,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
static void tcp_client_connect_impl(
grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr, gpr_timespec deadline) {
+ const grpc_resolved_address *addr, grpc_millis deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
@@ -193,7 +194,7 @@ static void tcp_client_connect_impl(
}
}
- ac = gpr_malloc(sizeof(async_connect));
+ ac = (async_connect *)gpr_malloc(sizeof(async_connect));
ac->on_done = on_done;
ac->socket = socket;
gpr_mu_init(&ac->mu);
@@ -204,8 +205,7 @@ static void tcp_client_connect_impl(
GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
- grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm,
- gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
return;
@@ -226,18 +226,20 @@ failure:
}
// overridden by api_fuzzer.c
+extern "C" {
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) = tcp_client_connect_impl;
+ grpc_millis deadline) = tcp_client_connect_impl;
+}
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.cc
index 7e789cc5b5..b7c1803ded 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -43,6 +43,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -67,7 +68,6 @@ typedef struct {
grpc_fd *em_fd;
int fd;
bool finished_edge;
- msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */
double target_length;
double bytes_read_this_round;
gpr_refcount refcount;
@@ -91,8 +91,8 @@ typedef struct {
grpc_closure *release_fd_cb;
int *release_fd;
- grpc_closure read_closure;
- grpc_closure write_closure;
+ grpc_closure read_done_closure;
+ grpc_closure write_done_closure;
char *peer_string;
@@ -100,6 +100,146 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
+typedef struct backup_poller {
+ gpr_mu *pollset_mu;
+ grpc_closure run_poller;
+} backup_poller;
+
+#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
+
+static gpr_atm g_uncovered_notifications_pending;
+static gpr_atm g_backup_poller; /* backup_poller* */
+
+static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
+ grpc_error *error);
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg /* grpc_tcp */,
+ grpc_error *error);
+
+static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
+ }
+ grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
+ gpr_free(p);
+}
+
+static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
+ grpc_error *error_ignored) {
+ backup_poller *p = (backup_poller *)bp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
+ }
+ gpr_mu_lock(p->pollset_mu);
+ grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + 13 * GPR_MS_PER_SEC;
+ GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
+ GRPC_LOG_IF_ERROR(
+ "backup_poller:pollset_work",
+ grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, deadline));
+ gpr_mu_unlock(p->pollset_mu);
+ /* last "uncovered" notification is the ref that keeps us polling, if we get
+ * there try a cas to release it */
+ if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
+ gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
+ gpr_mu_lock(p->pollset_mu);
+ bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
+ }
+ gpr_mu_unlock(p->pollset_mu);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
+ }
+ grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
+ GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
+ grpc_schedule_on_exec_ctx));
+ } else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
+ }
+}
+
+static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
+ (int)old_count - 1);
+ }
+ GPR_ASSERT(old_count != 1);
+}
+
+static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ backup_poller *p;
+ gpr_atm old_count =
+ gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
+ 2 + (int)old_count);
+ }
+ if (old_count == 0) {
+ GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
+ p = (backup_poller *)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
+ }
+ grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
+ gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
+ grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
+ GRPC_ERROR_NONE);
+ } else {
+ while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
+ // spin waiting for backup poller
+ }
+ }
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
+ }
+ grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
+ if (old_count != 0) {
+ drop_uncovered(exec_ctx, tcp);
+ }
+}
+
+static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
+ }
+ GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
+}
+
+static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
+ }
+ cover_self(exec_ctx, tcp);
+ GRPC_CLOSURE_INIT(&tcp->write_done_closure,
+ tcp_drop_uncovered_then_handle_write, tcp,
+ grpc_schedule_on_exec_ctx);
+ grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
+}
+
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
+ void *arg, grpc_error *error) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
+ }
+ drop_uncovered(exec_ctx, (grpc_tcp *)arg);
+ tcp_handle_write(exec_ctx, arg, error);
+}
+
static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
@@ -215,6 +355,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_closure *cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@@ -240,7 +381,6 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t i;
GPR_ASSERT(!tcp->finished_edge);
- GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GPR_TIMER_BEGIN("tcp_continue_read", 0);
@@ -252,11 +392,14 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_iov = iov;
- msg.msg_iovlen = tcp->iov_size;
+ msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
+ GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length);
+ GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count);
+
GPR_TIMER_BEGIN("recvmsg", 0);
do {
GRPC_STATS_INC_SYSCALL_READ(exec_ctx);
@@ -270,7 +413,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
tcp->incoming_buffer);
@@ -287,6 +430,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
+ GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes);
add_to_estimate(tcp, (size_t)read_bytes);
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
@@ -305,7 +449,11 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
- grpc_tcp *tcp = tcpp;
+ grpc_tcp *tcp = (grpc_tcp *)tcpp;
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
+ grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
@@ -321,9 +469,15 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
+ }
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
+ }
tcp_do_read(exec_ctx, tcp);
}
}
@@ -332,6 +486,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
+ if (GRPC_TRACER_ON(grpc_tcp_trace)) {
+ gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
+ }
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
@@ -355,9 +512,9 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
- grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
+ notify_on_read(exec_ctx, tcp);
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
@@ -403,6 +560,9 @@ static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
msg.msg_controllen = 0;
msg.msg_flags = 0;
+ GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length);
+ GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size);
+
GPR_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
@@ -467,7 +627,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
@@ -520,7 +680,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
- grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
+ notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@@ -544,6 +704,13 @@ static void tcp_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd);
}
+static void tcp_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *ep,
+ grpc_pollset_set *pollset_set) {
+ grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_pollset_set_del_fd(exec_ctx, pollset_set, tcp->em_fd);
+}
+
static char *tcp_get_peer(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return gpr_strdup(tcp->peer_string);
@@ -559,10 +726,16 @@ static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
return tcp->resource_user;
}
-static const grpc_endpoint_vtable vtable = {
- tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set,
- tcp_shutdown, tcp_destroy, tcp_get_resource_user, tcp_get_peer,
- tcp_get_fd};
+static const grpc_endpoint_vtable vtable = {tcp_read,
+ tcp_write,
+ tcp_add_to_pollset,
+ tcp_add_to_pollset_set,
+ tcp_delete_from_pollset_set,
+ tcp_shutdown,
+ tcp_destroy,
+ tcp_get_resource_user,
+ tcp_get_peer,
+ tcp_get_fd};
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
@@ -597,7 +770,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@@ -621,16 +794,11 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
tcp->min_read_chunk_size = tcp_min_read_chunk_size;
tcp->max_read_chunk_size = tcp_max_read_chunk_size;
tcp->bytes_read_this_round = 0;
- tcp->iov_size = 1;
tcp->finished_edge = true;
/* paired with unref in grpc_tcp_destroy */
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
- GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
- grpc_schedule_on_exec_ctx);
- GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
- grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 6831a4a57f..47e78fa67e 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -33,6 +33,10 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern grpc_tracer_flag grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
@@ -53,4 +57,8 @@ int grpc_tcp_fd(grpc_endpoint *ep);
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
int *fd, grpc_closure *done);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 8a126b6dee..8f9ce3819e 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Forward decl of grpc_tcp_server */
typedef struct grpc_tcp_server grpc_tcp_server;
@@ -98,4 +102,8 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s);
void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_H */
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.cc
index 0fc5c0fd86..06612d639c 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -74,7 +74,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
- grpc_tcp_server *s = gpr_zalloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server *s = (grpc_tcp_server *)gpr_zalloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
@@ -138,7 +138,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
- grpc_tcp_server *s = server;
+ grpc_tcp_server *s = (grpc_tcp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -197,13 +197,13 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
- grpc_tcp_listener *sp = arg;
-
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
+ grpc_pollset *read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
goto error;
}
- grpc_pollset *read_notifier_pollset =
+ read_notifier_pollset =
sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
&sp->server->next_pollset_to_assign, 1) %
sp->server->pollset_count];
@@ -251,7 +251,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
// Create acceptor.
- grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ grpc_tcp_server_acceptor *acceptor =
+ (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
@@ -365,7 +366,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
listener->next = sp;
/* sp (the new listener) is a sibling of 'listener' (the original
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix.h b/src/core/lib/iomgr/tcp_server_utils_posix.h
index 85dea515d9..6746333960 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix.h
+++ b/src/core/lib/iomgr/tcp_server_utils_posix.h
@@ -24,6 +24,10 @@
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_server.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* one listening port */
typedef struct grpc_tcp_listener {
int fd;
@@ -117,4 +121,8 @@ grpc_error *grpc_tcp_server_prepare_socket(int fd,
/* Ruturn true if the platform supports ifaddrs */
bool grpc_tcp_server_have_ifaddrs(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.c b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
index ad535bc43e..a828bee074 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.c
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
@@ -93,7 +93,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
index a243b69f35..a243b69f35 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
index 34eab20d6a..34eab20d6a 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.cc
index 3b9332321f..348838c495 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.cc
@@ -77,14 +77,14 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
- grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server *s = (grpc_tcp_server *)gpr_malloc(sizeof(grpc_tcp_server));
s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
- s->resource_quota =
- grpc_resource_quota_ref_internal(args->args[i].value.pointer.p);
+ s->resource_quota = grpc_resource_quota_ref_internal(
+ (grpc_resource_quota *)args->args[i].value.pointer.p);
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
@@ -190,15 +190,16 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) {
- grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
- uv_tcp_t *client;
+ grpc_tcp_server_acceptor *acceptor =
+ (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
+ uv_tcp_t *client = NULL;
grpc_endpoint *ep = NULL;
grpc_resolved_address peer_name;
char *peer_name_string;
int err;
uv_tcp_t *server = sp->handle;
- client = gpr_malloc(sizeof(uv_tcp_t));
+ client = (uv_tcp_t *)gpr_malloc(sizeof(uv_tcp_t));
uv_tcp_init(uv_default_loop(), client);
// UV documentation says this is guaranteed to succeed
uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
@@ -303,7 +304,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
GPR_ASSERT(port >= 0);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = gpr_zalloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_zalloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -355,7 +356,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
(int *)&sockname_temp.len)) {
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
- allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ allocated_addr = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
@@ -376,7 +378,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
addr = &wildcard;
}
- handle = gpr_malloc(sizeof(uv_tcp_t));
+ handle = (uv_tcp_t *)gpr_malloc(sizeof(uv_tcp_t));
family = grpc_sockaddr_get_family(addr);
status = uv_tcp_init_ex(uv_default_loop(), handle, (unsigned int)family);
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.cc
index 0162afc1ad..f198aaaa5b 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.cc
@@ -22,6 +22,7 @@
#include "src/core/lib/iomgr/sockaddr.h"
+#include <inttypes.h>
#include <io.h>
#include <grpc/support/alloc.h>
@@ -97,7 +98,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
- grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server *s = (grpc_tcp_server *)gpr_malloc(sizeof(grpc_tcp_server));
s->channel_args = grpc_channel_args_copy(args);
gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
@@ -115,7 +116,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_tcp_server *s = arg;
+ grpc_tcp_server *s = (grpc_tcp_server *)arg;
/* Now that the accepts have been aborted, we can destroy the sockets.
The IOCP won't get notified on these, so we can flag them as already
@@ -188,6 +189,7 @@ static grpc_error *prepare_socket(SOCKET sock,
int *port) {
grpc_resolved_address sockname_temp;
grpc_error *error = GRPC_ERROR_NONE;
+ int sockname_temp_len;
error = grpc_tcp_prepare_socket(sock);
if (error != GRPC_ERROR_NONE) {
@@ -205,7 +207,7 @@ static grpc_error *prepare_socket(SOCKET sock,
goto failure;
}
- int sockname_temp_len = sizeof(struct sockaddr_storage);
+ sockname_temp_len = sizeof(struct sockaddr_storage);
if (getsockname(sock, (struct sockaddr *)sockname_temp.addr,
&sockname_temp_len) == SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname");
@@ -294,7 +296,7 @@ failure:
/* Event manager callback when reads are ready. */
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_tcp_listener *sp = arg;
+ grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL;
@@ -368,7 +370,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
managed to accept a connection, and created an endpoint. */
if (ep) {
// Create acceptor.
- grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
+ grpc_tcp_server_acceptor *acceptor =
+ (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;
@@ -421,7 +424,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
GPR_ASSERT(port >= 0);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -472,7 +475,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
sockname_temp.len = (size_t)sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
- allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ allocated_addr = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.cc
index a05c19b4ac..99b9f1ea2d 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -114,7 +114,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
static void uv_close_callback(uv_handle_t *handle) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_tcp *tcp = handle->data;
+ grpc_tcp *tcp = (grpc_tcp *)handle->data;
TCP_UNREF(&exec_ctx, tcp, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -128,7 +128,7 @@ static grpc_slice alloc_read_slice(grpc_exec_ctx *exec_ctx,
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_tcp *tcp = handle->data;
+ grpc_tcp *tcp = (grpc_tcp *)handle->data;
(void)suggested_size;
buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
@@ -140,7 +140,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
grpc_slice sub;
grpc_error *error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_tcp *tcp = stream->data;
+ grpc_tcp *tcp = (grpc_tcp *)stream->data;
grpc_closure *cb = tcp->read_cb;
if (nread == 0) {
// Nothing happened. Wait for the next callback
@@ -207,7 +207,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
static void write_callback(uv_write_t *req, int status) {
- grpc_tcp *tcp = req->data;
+ grpc_tcp *tcp = (grpc_tcp *)req->data;
grpc_error *error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure *cb = tcp->write_cb;
@@ -269,7 +269,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count;
- buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
+ buffers = (uv_buf_t *)gpr_malloc(sizeof(uv_buf_t) * buffer_count);
grpc_resource_user_alloc(exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) {
@@ -304,6 +304,15 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
(void)pollset;
}
+static void uv_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *ep,
+ grpc_pollset_set *pollset) {
+ // No-op. We're ignoring pollsets currently
+ (void)exec_ctx;
+ (void)ep;
+ (void)pollset;
+}
+
static void shutdown_callback(uv_shutdown_t *req, int status) {}
static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -340,10 +349,16 @@ static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
static int uv_get_fd(grpc_endpoint *ep) { return -1; }
-static grpc_endpoint_vtable vtable = {
- uv_endpoint_read, uv_endpoint_write, uv_add_to_pollset,
- uv_add_to_pollset_set, uv_endpoint_shutdown, uv_destroy,
- uv_get_resource_user, uv_get_peer, uv_get_fd};
+static grpc_endpoint_vtable vtable = {uv_endpoint_read,
+ uv_endpoint_write,
+ uv_add_to_pollset,
+ uv_add_to_pollset_set,
+ uv_delete_from_pollset_set,
+ uv_endpoint_shutdown,
+ uv_destroy,
+ uv_get_resource_user,
+ uv_get_peer,
+ uv_get_fd};
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
index 0e67481d35..3399535b42 100644
--- a/src/core/lib/iomgr/tcp_uv.h
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -38,8 +38,16 @@ extern grpc_tracer_flag grpc_tcp_trace;
#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
char *peer_string);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.cc
index 2cbb97403b..6efcff84b8 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -37,6 +37,7 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_windows.h"
#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/iomgr/tcp_windows.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -158,7 +159,7 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
- grpc_tcp *tcp = tcpp;
+ grpc_tcp *tcp = (grpc_tcp *)tcpp;
grpc_closure *cb = tcp->read_cb;
grpc_winsocket *socket = tcp->socket;
grpc_slice sub;
@@ -370,6 +371,10 @@ static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_iocp_add_socket(tcp->socket);
}
+static void win_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *ep,
+ grpc_pollset_set *pss) {}
+
/* Initiates a shutdown of the TCP endpoint. This will queue abort callbacks
for the potential read and write operations. It is up to the caller to
guarantee this isn't called in parallel to a read or write request, so
@@ -411,21 +416,27 @@ static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
static int win_get_fd(grpc_endpoint *ep) { return -1; }
-static grpc_endpoint_vtable vtable = {
- win_read, win_write, win_add_to_pollset, win_add_to_pollset_set,
- win_shutdown, win_destroy, win_get_resource_user, win_get_peer,
- win_get_fd};
+static grpc_endpoint_vtable vtable = {win_read,
+ win_write,
+ win_add_to_pollset,
+ win_add_to_pollset_set,
+ win_delete_from_pollset_set,
+ win_shutdown,
+ win_destroy,
+ win_get_resource_user,
+ win_get_peer,
+ win_get_fd};
grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
grpc_channel_args *channel_args,
- char *peer_string) {
+ const char *peer_string) {
grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@@ -441,6 +452,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
+ grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
return &tcp->base;
}
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 864184ce84..f3697f707c 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -32,13 +32,21 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/socket_windows.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
grpc_channel_args *channel_args,
- char *peer_string);
+ const char *peer_string);
grpc_error *grpc_tcp_prepare_socket(SOCKET sock);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TCP_WINDOWS_H */
diff --git a/src/core/lib/iomgr/time_averaged_stats.c b/src/core/lib/iomgr/time_averaged_stats.cc
index 3bddec04dd..3bddec04dd 100644
--- a/src/core/lib/iomgr/time_averaged_stats.c
+++ b/src/core/lib/iomgr/time_averaged_stats.cc
diff --git a/src/core/lib/iomgr/time_averaged_stats.h b/src/core/lib/iomgr/time_averaged_stats.h
index 8745f7fa13..d38ed272b6 100644
--- a/src/core/lib/iomgr/time_averaged_stats.h
+++ b/src/core/lib/iomgr/time_averaged_stats.h
@@ -19,6 +19,10 @@
#ifndef GRPC_CORE_LIB_IOMGR_TIME_AVERAGED_STATS_H
#define GRPC_CORE_LIB_IOMGR_TIME_AVERAGED_STATS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* This tracks a time-decaying weighted average. It works by collecting
batches of samples and then mixing their average into a time-decaying
weighted mean. It is designed for batch operations where we do many adds
@@ -70,4 +74,8 @@ void grpc_time_averaged_stats_add_sample(grpc_time_averaged_stats* stats,
value. */
double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats* stats);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TIME_AVERAGED_STATS_H */
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index b92b8fb8b8..419e834cf1 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -32,6 +32,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_timer grpc_timer;
/* Initialize *timer. When expired or canceled, closure will be called with
@@ -41,8 +45,11 @@ typedef struct grpc_timer grpc_timer;
application callback is also responsible for maintaining information about
when to free up any user-level state. */
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- gpr_timespec deadline, grpc_closure *closure,
- gpr_timespec now);
+ grpc_millis deadline, grpc_closure *closure);
+
+/* Initialize *timer without setting it. This can later be passed through
+ the regular init or cancel */
+void grpc_timer_init_unset(grpc_timer *timer);
/* Note that there is no timer destroy function. This is because the
timer is a one-time occurrence with a guarantee that the callback will
@@ -88,8 +95,8 @@ typedef enum {
with high probability at least one thread in the system will see an update
at any time slice. */
grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- gpr_timespec now, gpr_timespec *next);
-void grpc_timer_list_init(gpr_timespec now);
+ grpc_millis *next);
+void grpc_timer_list_init(grpc_exec_ctx *exec_ctx);
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx);
/* Consume a kick issued by grpc_kick_poller */
@@ -99,4 +106,8 @@ void grpc_timer_consume_kick(void);
void grpc_kick_poller(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_H */
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.cc
index 12efce241f..b8e895de6f 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/iomgr/port.h"
+#include <inttypes.h>
+
#ifdef GRPC_TIMER_USE_GENERIC
#include "src/core/lib/iomgr/timer.h"
@@ -41,9 +43,11 @@
#define MIN_QUEUE_WINDOW_DURATION 0.01
#define MAX_QUEUE_WINDOW_DURATION 1
+extern "C" {
grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer");
grpc_tracer_flag grpc_timer_check_trace =
GRPC_TRACER_INITIALIZER(false, "timer_check");
+}
/* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
* deadlines earlier than 'queue_deadline" cap are maintained in the heap and
@@ -79,6 +83,125 @@ static timer_shard g_shards[NUM_SHARDS];
* Access to this is protected by g_shared_mutables.mu */
static timer_shard *g_shard_queue[NUM_SHARDS];
+#ifndef NDEBUG
+
+/* == Hash table for duplicate timer detection == */
+
+#define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
+
+static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */
+static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL};
+
+static void init_timer_ht() {
+ for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
+ gpr_mu_init(&g_hash_mu[i]);
+ }
+}
+
+static bool is_in_ht(grpc_timer *t) {
+ size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
+
+ gpr_mu_lock(&g_hash_mu[i]);
+ grpc_timer *p = g_timer_ht[i];
+ while (p != NULL && p != t) {
+ p = p->hash_table_next;
+ }
+ gpr_mu_unlock(&g_hash_mu[i]);
+
+ return (p == t);
+}
+
+static void add_to_ht(grpc_timer *t) {
+ GPR_ASSERT(!t->hash_table_next);
+ size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
+
+ gpr_mu_lock(&g_hash_mu[i]);
+ grpc_timer *p = g_timer_ht[i];
+ while (p != NULL && p != t) {
+ p = p->hash_table_next;
+ }
+
+ if (p == t) {
+ grpc_closure *c = t->closure;
+ gpr_log(GPR_ERROR,
+ "** Duplicate timer (%p) being added. Closure: (%p), created at: "
+ "(%s:%d), scheduled at: (%s:%d) **",
+ t, c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated);
+ abort();
+ }
+
+ /* Timer not present in the bucket. Insert at head of the list */
+ t->hash_table_next = g_timer_ht[i];
+ g_timer_ht[i] = t;
+ gpr_mu_unlock(&g_hash_mu[i]);
+}
+
+static void remove_from_ht(grpc_timer *t) {
+ size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
+ bool removed = false;
+
+ gpr_mu_lock(&g_hash_mu[i]);
+ if (g_timer_ht[i] == t) {
+ g_timer_ht[i] = g_timer_ht[i]->hash_table_next;
+ removed = true;
+ } else if (g_timer_ht[i] != NULL) {
+ grpc_timer *p = g_timer_ht[i];
+ while (p->hash_table_next != NULL && p->hash_table_next != t) {
+ p = p->hash_table_next;
+ }
+
+ if (p->hash_table_next == t) {
+ p->hash_table_next = t->hash_table_next;
+ removed = true;
+ }
+ }
+ gpr_mu_unlock(&g_hash_mu[i]);
+
+ if (!removed) {
+ grpc_closure *c = t->closure;
+ gpr_log(GPR_ERROR,
+ "** Removing timer (%p) that is not added to hash table. Closure "
+ "(%p), created at: (%s:%d), scheduled at: (%s:%d) **",
+ t, c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated);
+ abort();
+ }
+
+ t->hash_table_next = NULL;
+}
+
+/* If a timer is added to a timer shard (either heap or a list), it cannot
+ * be pending. A timer is added to hash table only-if it is added to the
+ * timer shard.
+ * Therefore, if timer->pending is false, it cannot be in hash table */
+static void validate_non_pending_timer(grpc_timer *t) {
+ if (!t->pending && is_in_ht(t)) {
+ grpc_closure *c = t->closure;
+ gpr_log(GPR_ERROR,
+ "** gpr_timer_cancel() called on a non-pending timer (%p) which "
+ "is in the hash table. Closure: (%p), created at: (%s:%d), "
+ "scheduled at: (%s:%d) **",
+ t, c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated);
+ abort();
+ }
+}
+
+#define INIT_TIMER_HASH_TABLE() init_timer_ht()
+#define ADD_TO_HASH_TABLE(t) add_to_ht((t))
+#define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t))
+#define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t))
+
+#else
+
+#define INIT_TIMER_HASH_TABLE()
+#define ADD_TO_HASH_TABLE(t)
+#define REMOVE_FROM_HASH_TABLE(t)
+#define VALIDATE_NON_PENDING_TIMER(t)
+
+#endif
+
/* Thread local variable that stores the deadline of the next timer the thread
* has last-seen. This is an optimization to prevent the thread from checking
* shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
@@ -95,12 +218,7 @@ struct shared_mutables {
gpr_mu mu;
} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
-static struct shared_mutables g_shared_mutables = {
- .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
-};
-
-static gpr_clock_type g_clock_type;
-static gpr_timespec g_start_time;
+static struct shared_mutables g_shared_mutables;
static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
if (a > GPR_ATM_MAX - b) {
@@ -114,51 +232,19 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
gpr_atm *next,
grpc_error *error);
-static gpr_timespec dbl_to_ts(double d) {
- gpr_timespec ts;
- ts.tv_sec = (int64_t)d;
- ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
- ts.clock_type = GPR_TIMESPAN;
- return ts;
-}
-
-static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
- ts = gpr_time_sub(ts, g_start_time);
- double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
- (double)ts.tv_nsec / GPR_NS_PER_MS +
- (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
- if (x < 0) return 0;
- if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return (gpr_atm)x;
-}
-
-static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
- ts = gpr_time_sub(ts, g_start_time);
- double x =
- GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
- if (x < 0) return 0;
- if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return (gpr_atm)x;
-}
-
-static gpr_timespec atm_to_timespec(gpr_atm x) {
- return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0));
-}
-
static gpr_atm compute_min_deadline(timer_shard *shard) {
return grpc_timer_heap_is_empty(&shard->heap)
? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_timer_list_init(gpr_timespec now) {
+void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
uint32_t i;
g_shared_mutables.initialized = true;
+ g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
- g_clock_type = now.clock_type;
- g_start_time = now;
- g_shared_mutables.min_timer = timespec_to_atm_round_down(now);
+ g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx);
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
grpc_register_tracer(&grpc_timer_trace);
@@ -176,6 +262,8 @@ void grpc_timer_list_init(gpr_timespec now) {
shard->min_deadline = compute_min_deadline(shard);
g_shard_queue[i] = shard;
}
+
+ INIT_TIMER_HASH_TABLE();
}
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
@@ -193,10 +281,6 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
g_shared_mutables.initialized = false;
}
-static double ts_to_dbl(gpr_timespec ts) {
- return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
-}
-
/* returns true if the first element in the list */
static void list_join(grpc_timer *head, grpc_timer *timer) {
timer->next = head;
@@ -234,21 +318,23 @@ static void note_deadline_change(timer_shard *shard) {
}
}
+void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
+
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- gpr_timespec deadline, grpc_closure *closure,
- gpr_timespec now) {
+ grpc_millis deadline, grpc_closure *closure) {
int is_first_timer = 0;
timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
- GPR_ASSERT(deadline.clock_type == g_clock_type);
- GPR_ASSERT(now.clock_type == g_clock_type);
timer->closure = closure;
- gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline);
+ timer->deadline = deadline;
+
+#ifndef NDEBUG
+ timer->hash_table_next = NULL;
+#endif
if (GRPC_TRACER_ON(grpc_timer_trace)) {
- gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
- "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
- timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec,
- now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb);
+ gpr_log(GPR_DEBUG,
+ "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
+ deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb);
}
if (!g_shared_mutables.initialized) {
@@ -261,7 +347,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_mu_lock(&shard->mu);
timer->pending = true;
- if (gpr_time_cmp(deadline, now) <= 0) {
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+ if (deadline <= now) {
timer->pending = false;
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
gpr_mu_unlock(&shard->mu);
@@ -270,8 +357,11 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
grpc_time_averaged_stats_add_sample(&shard->stats,
- ts_to_dbl(gpr_time_sub(deadline, now)));
- if (deadline_atm < shard->queue_deadline_cap) {
+ (double)(deadline - now) / 1000.0);
+
+ ADD_TO_HASH_TABLE(timer);
+
+ if (deadline < shard->queue_deadline_cap) {
is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
} else {
timer->heap_index = INVALID_HEAP_INDEX;
@@ -302,12 +392,12 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
shard->min_deadline);
}
- if (deadline_atm < shard->min_deadline) {
+ if (deadline < shard->min_deadline) {
gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
- shard->min_deadline = deadline_atm;
+ shard->min_deadline = deadline;
note_deadline_change(shard);
- if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) {
- gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm);
+ if (shard->shard_queue_index == 0 && deadline < old_min_deadline) {
+ gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline);
grpc_kick_poller();
}
}
@@ -332,7 +422,10 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
+
if (timer->pending) {
+ REMOVE_FROM_HASH_TABLE(timer);
+
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
timer->pending = false;
if (timer->heap_index == INVALID_HEAP_INDEX) {
@@ -340,6 +433,8 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
} else {
grpc_timer_heap_remove(&shard->heap, timer);
}
+ } else {
+ VALIDATE_NON_PENDING_TIMER(timer);
}
gpr_mu_unlock(&shard->mu);
}
@@ -406,8 +501,9 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
}
if (timer->deadline > now) return NULL;
if (GRPC_TRACER_ON(grpc_timer_trace)) {
- gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer,
- now - timer->deadline);
+ gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
+ timer, now - timer->deadline,
+ timer->closure->scheduler->vtable->name);
}
timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
@@ -423,11 +519,16 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
grpc_timer *timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
+ REMOVE_FROM_HASH_TABLE(timer);
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
n++;
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
+ if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR,
+ (int)(shard - g_shards), n);
+ }
return n;
}
@@ -500,29 +601,27 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
}
grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- gpr_timespec now, gpr_timespec *next) {
+ grpc_millis *next) {
// prelude
- GPR_ASSERT(now.clock_type == g_clock_type);
- gpr_atm now_atm = timespec_to_atm_round_down(now);
+ grpc_millis now = grpc_exec_ctx_now(exec_ctx);
/* fetch from a thread-local first: this avoids contention on a globally
mutable cacheline in the common case */
- gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer);
- if (now_atm < min_timer) {
+ grpc_millis min_timer = gpr_tls_get(&g_last_seen_min_timer);
+ if (now < min_timer) {
if (next != NULL) {
- *next =
- atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer));
+ *next = GPR_MIN(*next, min_timer);
}
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG,
- "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR,
- now_atm, min_timer);
+ "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
+ min_timer);
}
return GRPC_TIMERS_CHECKED_AND_EMPTY;
}
grpc_error *shutdown_error =
- gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
+ now != GRPC_MILLIS_INF_FUTURE
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
@@ -532,34 +631,24 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
if (next == NULL) {
next_str = gpr_strdup("NULL");
} else {
- gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
- next->tv_nsec, timespec_to_atm_round_down(*next));
+ gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
- gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR
- "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
- now.tv_sec, now.tv_nsec, now_atm, next_str,
- gpr_tls_get(&g_last_seen_min_timer),
+ gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRIdPTR
+ " next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
+ now, next_str, gpr_tls_get(&g_last_seen_min_timer),
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
gpr_free(next_str);
}
// actual code
- grpc_timer_check_result r;
- gpr_atm next_atm;
- if (next == NULL) {
- r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error);
- } else {
- next_atm = timespec_to_atm_round_down(*next);
- r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error);
- *next = atm_to_timespec(next_atm);
- }
+ grpc_timer_check_result r =
+ run_some_expired_timers(exec_ctx, now, next, shutdown_error);
// tracing
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
char *next_str;
if (next == NULL) {
next_str = gpr_strdup("NULL");
} else {
- gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
- next->tv_nsec, next_atm);
+ gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str);
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
index 72a4ac1f10..f0597f6ea0 100644
--- a/src/core/lib/iomgr/timer_generic.h
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -29,6 +29,9 @@ struct grpc_timer {
struct grpc_timer *next;
struct grpc_timer *prev;
grpc_closure *closure;
+#ifndef NDEBUG
+ struct grpc_timer *hash_table_next;
+#endif
};
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */
diff --git a/src/core/lib/iomgr/timer_heap.c b/src/core/lib/iomgr/timer_heap.cc
index a70e3942b2..2648d5da5d 100644
--- a/src/core/lib/iomgr/timer_heap.c
+++ b/src/core/lib/iomgr/timer_heap.cc
@@ -74,8 +74,8 @@ static void maybe_shrink(grpc_timer_heap *heap) {
if (heap->timer_count >= 8 &&
heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
- heap->timers =
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer **)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
}
@@ -99,8 +99,8 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
if (heap->timer_count == heap->timer_capacity) {
heap->timer_capacity =
GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
- heap->timers =
- gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer **)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
timer->heap_index = heap->timer_count;
adjust_upwards(heap->timers, heap->timer_count, timer);
diff --git a/src/core/lib/iomgr/timer_heap.h b/src/core/lib/iomgr/timer_heap.h
index 0d64199ab9..228d038ab3 100644
--- a/src/core/lib/iomgr/timer_heap.h
+++ b/src/core/lib/iomgr/timer_heap.h
@@ -21,6 +21,10 @@
#include "src/core/lib/iomgr/timer.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
grpc_timer **timers;
uint32_t timer_count;
@@ -39,4 +43,8 @@ void grpc_timer_heap_pop(grpc_timer_heap *heap);
int grpc_timer_heap_is_empty(grpc_timer_heap *heap);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_HEAP_H */
diff --git a/src/core/lib/iomgr/timer_manager.c b/src/core/lib/iomgr/timer_manager.cc
index 631f7935d9..1248f82189 100644
--- a/src/core/lib/iomgr/timer_manager.c
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -20,8 +20,11 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
#include <grpc/support/thd.h>
+#include <inttypes.h>
+
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/timer.h"
@@ -30,7 +33,7 @@ typedef struct completed_thread {
struct completed_thread *next;
} completed_thread;
-extern grpc_tracer_flag grpc_timer_check_trace;
+extern "C" grpc_tracer_flag grpc_timer_check_trace;
// global mutex
static gpr_mu g_mu;
@@ -52,7 +55,7 @@ static bool g_kicked;
static bool g_has_timed_waiter;
// the deadline of the current timed waiter thread (only relevant if
// g_has_timed_waiter is true)
-static gpr_timespec g_timed_waiter_deadline;
+static grpc_millis g_timed_waiter_deadline;
// generation counter to track which thread is waiting for the next timer
static uint64_t g_timed_waiter_generation;
@@ -83,7 +86,7 @@ static void start_timer_thread_and_unlock(void) {
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
- completed_thread *ct = gpr_malloc(sizeof(*ct));
+ completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct));
// The call to gpr_thd_new() has to be under the same lock used by
// gc_completed_threads(), particularly due to ct->t, which is written here
// (internally by gpr_thd_new) and read there. Otherwise it's possible for ct
@@ -96,9 +99,8 @@ static void start_timer_thread_and_unlock(void) {
void grpc_timer_manager_tick() {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- gpr_timespec next = gpr_inf_future(GPR_CLOCK_MONOTONIC);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- grpc_timer_check(&exec_ctx, now, &next);
+ grpc_millis next = GRPC_MILLIS_INF_FUTURE;
+ grpc_timer_check(&exec_ctx, &next);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -121,6 +123,9 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
gpr_mu_unlock(&g_mu);
}
// without our lock, flush the exec_ctx
+ if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
+ gpr_log(GPR_DEBUG, "flush exec_ctx");
+ }
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&g_mu);
// garbage collect any threads hanging out that are dead
@@ -133,8 +138,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
// wait until 'next' (or forever if there is already a timed waiter in the pool)
// returns true if the thread should continue executing (false if it should
// shutdown)
-static bool wait_until(gpr_timespec next) {
- const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) {
gpr_mu_lock(&g_mu);
// if we're not threaded anymore, leave
if (!g_threaded) {
@@ -168,30 +172,29 @@ static bool wait_until(gpr_timespec next) {
unless their 'next' is earlier than the current timed-waiter's deadline
(in which case the thread with earlier 'next' takes over as the new timed
waiter) */
- if (gpr_time_cmp(next, inf_future) != 0) {
- if (!g_has_timed_waiter ||
- (gpr_time_cmp(next, g_timed_waiter_deadline) < 0)) {
+ if (next != GRPC_MILLIS_INF_FUTURE) {
+ if (!g_has_timed_waiter || (next < g_timed_waiter_deadline)) {
my_timed_waiter_generation = ++g_timed_waiter_generation;
g_has_timed_waiter = true;
g_timed_waiter_deadline = next;
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
- gpr_timespec wait_time =
- gpr_time_sub(next, gpr_now(GPR_CLOCK_MONOTONIC));
- gpr_log(GPR_DEBUG, "sleep for a %" PRId64 ".%09d seconds",
- wait_time.tv_sec, wait_time.tv_nsec);
+ grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx);
+ gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
+ wait_time);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
- next = inf_future;
+ next = GRPC_MILLIS_INF_FUTURE;
}
}
if (GRPC_TRACER_ON(grpc_timer_check_trace) &&
- gpr_time_cmp(next, inf_future) == 0) {
+ next == GRPC_MILLIS_INF_FUTURE) {
gpr_log(GPR_DEBUG, "sleep until kicked");
}
- gpr_cv_wait(&g_cv_wait, &g_mu, next);
+ gpr_cv_wait(&g_cv_wait, &g_mu,
+ grpc_millis_to_timespec(next, GPR_CLOCK_REALTIME));
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
@@ -203,7 +206,7 @@ static bool wait_until(gpr_timespec next) {
// there's work to do after checking timers (code above)
if (my_timed_waiter_generation == g_timed_waiter_generation) {
g_has_timed_waiter = false;
- g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE;
}
}
@@ -219,12 +222,11 @@ static bool wait_until(gpr_timespec next) {
}
static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
- const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC);
for (;;) {
- gpr_timespec next = inf_future;
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ grpc_millis next = GRPC_MILLIS_INF_FUTURE;
+ grpc_exec_ctx_invalidate_now(exec_ctx);
// check timer state, updates next to the next time to run a check
- switch (grpc_timer_check(exec_ctx, now, &next)) {
+ switch (grpc_timer_check(exec_ctx, &next)) {
case GRPC_TIMERS_FIRED:
run_some_timers(exec_ctx);
break;
@@ -241,10 +243,10 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, "timers not checked: expect another thread to");
}
- next = inf_future;
+ next = GRPC_MILLIS_INF_FUTURE;
/* fall through */
case GRPC_TIMERS_CHECKED_AND_EMPTY:
- if (!wait_until(next)) {
+ if (!wait_until(exec_ctx, next)) {
return;
}
break;
@@ -276,7 +278,7 @@ static void timer_thread(void *completed_thread_ptr) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
timer_main_loop(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
- timer_thread_cleanup(completed_thread_ptr);
+ timer_thread_cleanup((completed_thread *)completed_thread_ptr);
}
static void start_threads(void) {
@@ -300,7 +302,7 @@ void grpc_timer_manager_init(void) {
g_completed_threads = NULL;
g_has_timed_waiter = false;
- g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE;
start_threads();
}
@@ -347,7 +349,7 @@ void grpc_kick_poller(void) {
gpr_mu_lock(&g_mu);
g_kicked = true;
g_has_timed_waiter = false;
- g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE;
++g_timed_waiter_generation;
gpr_cv_signal(&g_cv_wait);
gpr_mu_unlock(&g_mu);
diff --git a/src/core/lib/iomgr/timer_manager.h b/src/core/lib/iomgr/timer_manager.h
index 0ba502928a..72960d6ffc 100644
--- a/src/core/lib/iomgr/timer_manager.h
+++ b/src/core/lib/iomgr/timer_manager.h
@@ -21,6 +21,10 @@
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Timer Manager tries to keep one thread waiting for the next timeout at all
times */
@@ -34,4 +38,8 @@ void grpc_timer_manager_set_threading(bool enabled);
* disabled */
void grpc_timer_manager_tick(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_TIMER_MANAGER_H */
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.cc
index 70f49bcbe8..ccbbe357ae 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -29,9 +29,11 @@
#include <uv.h>
+extern "C" {
grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer");
grpc_tracer_flag grpc_timer_check_trace =
GRPC_TRACER_INITIALIZER(false, "timer_check");
+}
static void timer_close_callback(uv_handle_t *handle) { gpr_free(handle); }
@@ -53,20 +55,19 @@ void run_expired_timer(uv_timer_t *handle) {
}
void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- gpr_timespec deadline, grpc_closure *closure,
- gpr_timespec now) {
+ grpc_millis deadline, grpc_closure *closure) {
uint64_t timeout;
uv_timer_t *uv_timer;
GRPC_UV_ASSERT_SAME_THREAD();
timer->closure = closure;
- if (gpr_time_cmp(deadline, now) <= 0) {
+ if (deadline <= grpc_exec_ctx_now(exec_ctx)) {
timer->pending = 0;
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
return;
}
timer->pending = 1;
- timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
- uv_timer = gpr_malloc(sizeof(uv_timer_t));
+ timeout = (uint64_t)(deadline - grpc_exec_ctx_now(exec_ctx));
+ uv_timer = (uv_timer_t *)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
uv_timer->data = timer;
timer->uv_timer = uv_timer;
@@ -77,6 +78,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_unref((uv_handle_t *)uv_timer);
}
+void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; }
+
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
GRPC_UV_ASSERT_SAME_THREAD();
if (timer->pending) {
@@ -87,11 +90,11 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
}
grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- gpr_timespec now, gpr_timespec *next) {
+ grpc_millis *next) {
return GRPC_TIMERS_NOT_CHECKED;
}
-void grpc_timer_list_init(gpr_timespec now) {}
+void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {}
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {}
void grpc_timer_consume_kick(void) {}
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.cc
index 88fa34cb7a..00b2e68bb5 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -118,14 +118,14 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
if (arg) {
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
- return arg->value.pointer.p;
+ return (grpc_socket_factory *)arg->value.pointer.p;
}
}
return NULL;
}
grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
- grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
+ grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
s->socket_factory = get_socket_factory(args);
if (s->socket_factory) {
@@ -176,7 +176,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
- grpc_udp_server *s = server;
+ grpc_udp_server *s = (grpc_udp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -237,7 +237,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
+ struct shutdown_fd_args *args =
+ (struct shutdown_fd_args *)gpr_malloc(sizeof(*args));
args->fd = sp->emfd;
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@@ -331,7 +332,7 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = arg;
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@@ -354,7 +355,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = arg;
+ grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
@@ -393,7 +394,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
- sp = gpr_malloc(sizeof(grpc_udp_listener));
+ sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -444,7 +445,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
(socklen_t *)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
+ allocated_addr = (grpc_resolved_address *)gpr_malloc(
+ sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index 881468ea2c..e887cb1bcf 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Forward decl of struct grpc_server */
/* This is not typedef'ed to avoid a typedef-redefinition error */
struct grpc_server;
@@ -73,4 +77,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server,
grpc_closure *on_done);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_UDP_SERVER_H */
diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.cc
index 0c8627c8c6..35f898f13a 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.c
+++ b/src/core/lib/iomgr/unix_sockets_posix.cc
@@ -49,9 +49,11 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
gpr_free(err_msg);
return err;
}
- *addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
+ *addrs =
+ (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addrs)->naddrs = 1;
- (*addrs)->addrs = gpr_malloc(sizeof(grpc_resolved_address));
+ (*addrs)->addrs =
+ (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
un = (struct sockaddr_un *)(*addrs)->addrs->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, name);
diff --git a/src/core/lib/iomgr/unix_sockets_posix.h b/src/core/lib/iomgr/unix_sockets_posix.h
index 25b64b3eec..3e7f9c7d1e 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.h
+++ b/src/core/lib/iomgr/unix_sockets_posix.h
@@ -25,6 +25,10 @@
#include "src/core/lib/iomgr/resolve_address.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_create_socketpair_if_unix(int sv[2]);
grpc_error *grpc_resolve_unix_domain_address(
@@ -38,4 +42,8 @@ void grpc_unlink_if_unix_domain_socket(
char *grpc_sockaddr_to_uri_unix_if_possible(
const grpc_resolved_address *resolved_addr);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H */
diff --git a/src/core/lib/iomgr/unix_sockets_posix_noop.c b/src/core/lib/iomgr/unix_sockets_posix_noop.cc
index e46b1c003d..e46b1c003d 100644
--- a/src/core/lib/iomgr/unix_sockets_posix_noop.c
+++ b/src/core/lib/iomgr/unix_sockets_posix_noop.cc
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.c b/src/core/lib/iomgr/wakeup_fd_cv.cc
index 075a0b6426..268e0175dd 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.c
+++ b/src/core/lib/iomgr/wakeup_fd_cv.cc
@@ -42,7 +42,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
- g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
+ g_cvfds.cvfds =
+ (fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = NULL;
@@ -56,7 +57,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
g_cvfds.free_fds = g_cvfds.free_fds->next_free;
g_cvfds.cvfds[idx].cvs = NULL;
g_cvfds.cvfds[idx].is_set = 0;
- fd_info->read_fd = IDX_TO_FD(idx);
+ fd_info->read_fd = GRPC_IDX_TO_FD(idx);
fd_info->write_fd = -1;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
@@ -65,8 +66,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
cv_node* cvn;
gpr_mu_lock(&g_cvfds.mu);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 1;
- cvn = g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs;
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
+ cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
while (cvn) {
gpr_cv_signal(cvn->cv);
cvn = cvn->next;
@@ -77,7 +78,7 @@ static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
static grpc_error* cv_fd_consume(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].is_set = 0;
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 0;
gpr_mu_unlock(&g_cvfds.mu);
return GRPC_ERROR_NONE;
}
@@ -88,9 +89,9 @@ static void cv_fd_destroy(grpc_wakeup_fd* fd_info) {
}
gpr_mu_lock(&g_cvfds.mu);
// Assert that there are no active pollers
- GPR_ASSERT(!g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].cvs);
- g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
- g_cvfds.free_fds = &g_cvfds.cvfds[FD_TO_IDX(fd_info->read_fd)];
+ GPR_ASSERT(!g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs);
+ g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].next_free = g_cvfds.free_fds;
+ g_cvfds.free_fds = &g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)];
gpr_mu_unlock(&g_cvfds.mu);
}
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.h b/src/core/lib/iomgr/wakeup_fd_cv.h
index 46e84f5843..dcd7bdb560 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.h
+++ b/src/core/lib/iomgr/wakeup_fd_cv.h
@@ -37,8 +37,12 @@
#include "src/core/lib/iomgr/ev_posix.h"
-#define FD_TO_IDX(fd) (-(fd)-1)
-#define IDX_TO_FD(idx) (-(idx)-1)
+#define GRPC_FD_TO_IDX(fd) (-(fd)-1)
+#define GRPC_IDX_TO_FD(idx) (-(idx)-1)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
typedef struct cv_node {
gpr_cv* cv;
@@ -62,4 +66,10 @@ typedef struct cv_fd_table {
grpc_poll_function_type poll;
} cv_fd_table;
+extern const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_eventfd.c b/src/core/lib/iomgr/wakeup_fd_eventfd.cc
index 81cb7ee280..81cb7ee280 100644
--- a/src/core/lib/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/lib/iomgr/wakeup_fd_eventfd.cc
diff --git a/src/core/lib/iomgr/wakeup_fd_nospecial.c b/src/core/lib/iomgr/wakeup_fd_nospecial.cc
index 4c20b8c1b7..4c20b8c1b7 100644
--- a/src/core/lib/iomgr/wakeup_fd_nospecial.c
+++ b/src/core/lib/iomgr/wakeup_fd_nospecial.cc
diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.c b/src/core/lib/iomgr/wakeup_fd_pipe.cc
index 4189488f8a..05d69dc9cc 100644
--- a/src/core/lib/iomgr/wakeup_fd_pipe.c
+++ b/src/core/lib/iomgr/wakeup_fd_pipe.cc
@@ -20,6 +20,7 @@
#ifdef GRPC_POSIX_WAKEUP_FD
+#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include <errno.h>
diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.h b/src/core/lib/iomgr/wakeup_fd_pipe.h
index f860406bda..9bbb5e2ff7 100644
--- a/src/core/lib/iomgr/wakeup_fd_pipe.h
+++ b/src/core/lib/iomgr/wakeup_fd_pipe.h
@@ -21,6 +21,14 @@
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-extern grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable;
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable;
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_PIPE_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.c b/src/core/lib/iomgr/wakeup_fd_posix.cc
index 25daa7d3fb..9af96d314b 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.c
+++ b/src/core/lib/iomgr/wakeup_fd_posix.cc
@@ -25,7 +25,6 @@
#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-extern grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
int grpc_allow_specialized_wakeup_fd = 1;
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.h b/src/core/lib/iomgr/wakeup_fd_posix.h
index a9584d0d48..ae7849f98c 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.h
+++ b/src/core/lib/iomgr/wakeup_fd_posix.h
@@ -49,6 +49,10 @@
#include "src/core/lib/iomgr/error.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_wakeup_fd_global_init(void);
void grpc_wakeup_fd_global_destroy(void);
@@ -91,4 +95,8 @@ void grpc_wakeup_fd_destroy(grpc_wakeup_fd* fd_info);
* wakeup_fd_nospecial.c if no such implementation exists. */
extern const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_POSIX_H */
diff --git a/src/core/lib/json/json.c b/src/core/lib/json/json.cc
index 25eee05532..4ad51f662a 100644
--- a/src/core/lib/json/json.c
+++ b/src/core/lib/json/json.cc
@@ -23,7 +23,7 @@
#include "src/core/lib/json/json.h"
grpc_json* grpc_json_create(grpc_json_type type) {
- grpc_json* json = gpr_zalloc(sizeof(*json));
+ grpc_json* json = (grpc_json*)gpr_zalloc(sizeof(*json));
json->type = type;
return json;
diff --git a/src/core/lib/json/json.h b/src/core/lib/json/json.h
index bbd43025eb..c9fdec4ecb 100644
--- a/src/core/lib/json/json.h
+++ b/src/core/lib/json/json.h
@@ -23,6 +23,10 @@
#include "src/core/lib/json/json_common.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A tree-like structure to hold json values. The key and value pointers
* are not owned by it.
*/
@@ -70,4 +74,8 @@ char* grpc_json_dump_to_string(grpc_json* json, int indent);
grpc_json* grpc_json_create(grpc_json_type type);
void grpc_json_destroy(grpc_json* json);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_JSON_JSON_H */
diff --git a/src/core/lib/json/json_reader.c b/src/core/lib/json/json_reader.cc
index 094a35176c..094a35176c 100644
--- a/src/core/lib/json/json_reader.c
+++ b/src/core/lib/json/json_reader.cc
diff --git a/src/core/lib/json/json_reader.h b/src/core/lib/json/json_reader.h
index 577fbbbaf6..7f14a9a9c8 100644
--- a/src/core/lib/json/json_reader.h
+++ b/src/core/lib/json/json_reader.h
@@ -22,6 +22,10 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/json/json_common.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
GRPC_JSON_STATE_OBJECT_KEY_BEGIN,
GRPC_JSON_STATE_OBJECT_KEY_STRING,
@@ -142,4 +146,8 @@ void grpc_json_reader_init(grpc_json_reader *reader,
*/
int grpc_json_reader_is_complete(grpc_json_reader *reader);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_JSON_JSON_READER_H */
diff --git a/src/core/lib/json/json_string.c b/src/core/lib/json/json_string.cc
index 65b5f0f482..3178d2d2b4 100644
--- a/src/core/lib/json/json_string.c
+++ b/src/core/lib/json/json_string.cc
@@ -63,19 +63,19 @@ typedef struct {
* bytes at a time (or multiples thereof).
*/
static void json_writer_output_check(void *userdata, size_t needed) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
needed = (needed + 0xff) & ~0xffU;
- state->output = gpr_realloc(state->output, state->allocated + needed);
+ state->output = (char *)gpr_realloc(state->output, state->allocated + needed);
state->free_space += needed;
state->allocated += needed;
}
/* These are needed by the writer's implementation. */
static void json_writer_output_char(void *userdata, char c) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
@@ -83,7 +83,7 @@ static void json_writer_output_char(void *userdata, char c) {
static void json_writer_output_string_with_len(void *userdata, const char *str,
size_t len) {
- json_writer_userdata *state = userdata;
+ json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
@@ -99,7 +99,7 @@ static void json_writer_output_string(void *userdata, const char *str) {
* the end of the current string, and advance our output pointer.
*/
static void json_reader_string_clear(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@@ -108,7 +108,7 @@ static void json_reader_string_clear(void *userdata) {
}
static void json_reader_string_add_char(void *userdata, uint32_t c) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (uint8_t)c;
@@ -149,7 +149,7 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
*/
static uint32_t json_reader_read_char(void *userdata) {
uint32_t r;
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@@ -168,7 +168,7 @@ static uint32_t json_reader_read_char(void *userdata) {
* our tree-in-progress inside our opaque structure.
*/
static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = grpc_json_create(type);
json->parent = state->current_container;
@@ -194,7 +194,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
}
static void json_reader_container_begins(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -215,7 +215,7 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) {
*/
static grpc_json_type json_reader_container_ends(void *userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->current_container);
@@ -236,18 +236,18 @@ static grpc_json_type json_reader_container_ends(void *userdata) {
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
static void json_reader_set_key(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
state->key = state->string;
}
static void json_reader_set_string(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
json->value = (char *)state->string;
}
static int json_reader_set_number(void *userdata) {
- json_reader_userdata *state = userdata;
+ json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
json->value = (char *)state->string;
return 1;
diff --git a/src/core/lib/json/json_writer.c b/src/core/lib/json/json_writer.cc
index eab1bff7a6..eab1bff7a6 100644
--- a/src/core/lib/json/json_writer.c
+++ b/src/core/lib/json/json_writer.cc
diff --git a/src/core/lib/json/json_writer.h b/src/core/lib/json/json_writer.h
index 8779039d42..132d1f24e8 100644
--- a/src/core/lib/json/json_writer.h
+++ b/src/core/lib/json/json_writer.h
@@ -35,6 +35,10 @@
#include "src/core/lib/json/json_common.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_json_writer_vtable {
/* Adds a character to the output stream. */
void (*output_char)(void *userdata, char);
@@ -79,4 +83,8 @@ void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
void grpc_json_writer_value_string(grpc_json_writer *writer,
const char *string);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_JSON_JSON_WRITER_H */
diff --git a/src/core/lib/profiling/basic_timers.c b/src/core/lib/profiling/basic_timers.cc
index c7645b74f2..0ae7d7f600 100644
--- a/src/core/lib/profiling/basic_timers.c
+++ b/src/core/lib/profiling/basic_timers.cc
@@ -18,10 +18,10 @@
#include <grpc/support/port_platform.h>
-#ifdef GRPC_BASIC_PROFILER
-
#include "src/core/lib/profiling/timers.h"
+#ifdef GRPC_BASIC_PROFILER
+
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@@ -209,9 +209,9 @@ static void init_output() {
static void rotate_log() {
/* Using malloc here, as this code could end up being called by gpr_malloc */
- gpr_timer_log *new = malloc(sizeof(*new));
+ gpr_timer_log *log = static_cast<gpr_timer_log *>(malloc(sizeof(*log)));
gpr_once_init(&g_once_init, init_output);
- new->num_entries = 0;
+ log->num_entries = 0;
pthread_mutex_lock(&g_mu);
if (g_thread_log != NULL) {
timer_log_remove(&g_in_progress_logs, g_thread_log);
@@ -221,9 +221,9 @@ static void rotate_log() {
} else {
g_thread_id = g_next_thread_id++;
}
- timer_log_push_back(&g_in_progress_logs, new);
+ timer_log_push_back(&g_in_progress_logs, log);
pthread_mutex_unlock(&g_mu);
- g_thread_log = new;
+ g_thread_log = log;
}
static void gpr_timers_log_add(const char *tagstr, marker_type type,
diff --git a/src/core/lib/profiling/stap_timers.c b/src/core/lib/profiling/stap_timers.cc
index c86d74f058..c86d74f058 100644
--- a/src/core/lib/profiling/stap_timers.c
+++ b/src/core/lib/profiling/stap_timers.cc
diff --git a/src/core/lib/security/context/security_context.c b/src/core/lib/security/context/security_context.cc
index 8fff2c92c5..31d800b9b4 100644
--- a/src/core/lib/security/context/security_context.c
+++ b/src/core/lib/security/context/security_context.cc
@@ -82,7 +82,8 @@ void grpc_auth_context_release(grpc_auth_context *context) {
/* --- grpc_client_security_context --- */
grpc_client_security_context *grpc_client_security_context_create(void) {
- return gpr_zalloc(sizeof(grpc_client_security_context));
+ return (grpc_client_security_context *)gpr_zalloc(
+ sizeof(grpc_client_security_context));
}
void grpc_client_security_context_destroy(void *ctx) {
@@ -100,7 +101,8 @@ void grpc_client_security_context_destroy(void *ctx) {
/* --- grpc_server_security_context --- */
grpc_server_security_context *grpc_server_security_context_create(void) {
- return gpr_zalloc(sizeof(grpc_server_security_context));
+ return (grpc_server_security_context *)gpr_zalloc(
+ sizeof(grpc_server_security_context));
}
void grpc_server_security_context_destroy(void *ctx) {
@@ -117,7 +119,8 @@ void grpc_server_security_context_destroy(void *ctx) {
static grpc_auth_property_iterator empty_iterator = {NULL, 0, NULL};
grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
- grpc_auth_context *ctx = gpr_zalloc(sizeof(grpc_auth_context));
+ grpc_auth_context *ctx =
+ (grpc_auth_context *)gpr_zalloc(sizeof(grpc_auth_context));
gpr_ref_init(&ctx->refcount, 1);
if (chained != NULL) {
ctx->chained = GRPC_AUTH_CONTEXT_REF(chained, "chained");
@@ -258,9 +261,9 @@ static void ensure_auth_context_capacity(grpc_auth_context *ctx) {
if (ctx->properties.count == ctx->properties.capacity) {
ctx->properties.capacity =
GPR_MAX(ctx->properties.capacity + 8, ctx->properties.capacity * 2);
- ctx->properties.array =
- gpr_realloc(ctx->properties.array,
- ctx->properties.capacity * sizeof(grpc_auth_property));
+ ctx->properties.array = (grpc_auth_property *)gpr_realloc(
+ ctx->properties.array,
+ ctx->properties.capacity * sizeof(grpc_auth_property));
}
}
@@ -275,7 +278,7 @@ void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name,
ensure_auth_context_capacity(ctx);
prop = &ctx->properties.array[ctx->properties.count++];
prop->name = gpr_strdup(name);
- prop->value = gpr_malloc(value_length + 1);
+ prop->value = (char *)gpr_malloc(value_length + 1);
memcpy(prop->value, value, value_length);
prop->value[value_length] = '\0';
prop->value_length = value_length;
@@ -302,11 +305,12 @@ void grpc_auth_property_reset(grpc_auth_property *property) {
}
static void auth_context_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- GRPC_AUTH_CONTEXT_UNREF(p, "auth_context_pointer_arg");
+ GRPC_AUTH_CONTEXT_UNREF((grpc_auth_context *)p, "auth_context_pointer_arg");
}
static void *auth_context_pointer_arg_copy(void *p) {
- return GRPC_AUTH_CONTEXT_REF(p, "auth_context_pointer_arg");
+ return GRPC_AUTH_CONTEXT_REF((grpc_auth_context *)p,
+ "auth_context_pointer_arg");
}
static int auth_context_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
@@ -316,7 +320,7 @@ static const grpc_arg_pointer_vtable auth_context_pointer_vtable = {
auth_context_pointer_cmp};
grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
- return grpc_channel_arg_pointer_create(GRPC_AUTH_CONTEXT_ARG, p,
+ return grpc_channel_arg_pointer_create((char *)GRPC_AUTH_CONTEXT_ARG, p,
&auth_context_pointer_vtable);
}
@@ -327,7 +331,7 @@ grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg) {
GRPC_AUTH_CONTEXT_ARG);
return NULL;
}
- return arg->value.pointer.p;
+ return (grpc_auth_context *)arg->value.pointer.p;
}
grpc_auth_context *grpc_find_auth_context_in_args(
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.cc
index 09fd60a12c..779300ac07 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.c
+++ b/src/core/lib/security/credentials/composite/composite_credentials.cc
@@ -79,7 +79,8 @@ static bool composite_call_get_request_metadata(
grpc_error **error) {
grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
grpc_composite_call_credentials_metadata_context *ctx;
- ctx = gpr_zalloc(sizeof(grpc_composite_call_credentials_metadata_context));
+ ctx = (grpc_composite_call_credentials_metadata_context *)gpr_zalloc(
+ sizeof(grpc_composite_call_credentials_metadata_context));
ctx->composite_creds = c;
ctx->pollent = pollent;
ctx->auth_md_context = auth_md_context;
@@ -87,6 +88,7 @@ static bool composite_call_get_request_metadata(
ctx->on_request_metadata = on_request_metadata;
GRPC_CLOSURE_INIT(&ctx->internal_on_request_metadata,
composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx);
+ bool synchronous = true;
while (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
grpc_call_credentials *inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
@@ -95,19 +97,12 @@ static bool composite_call_get_request_metadata(
ctx->md_array, &ctx->internal_on_request_metadata, error)) {
if (*error != GRPC_ERROR_NONE) break;
} else {
+ synchronous = false; // Async return.
break;
}
}
- // If we got through all creds synchronously or we got a synchronous
- // error on one of them, return synchronously.
- if (ctx->creds_index == ctx->composite_creds->inner.num_creds ||
- *error != GRPC_ERROR_NONE) {
- gpr_free(ctx);
- return true;
- }
- // At least one inner cred is returning asynchronously, so we'll
- // return asynchronously as well.
- return false;
+ if (synchronous) gpr_free(ctx);
+ return synchronous;
}
static void composite_call_cancel_get_request_metadata(
@@ -152,7 +147,8 @@ grpc_call_credentials *grpc_composite_call_credentials_create(
GPR_ASSERT(reserved == NULL);
GPR_ASSERT(creds1 != NULL);
GPR_ASSERT(creds2 != NULL);
- c = gpr_zalloc(sizeof(grpc_composite_call_credentials));
+ c = (grpc_composite_call_credentials *)gpr_zalloc(
+ sizeof(grpc_composite_call_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE;
c->base.vtable = &composite_call_credentials_vtable;
gpr_ref_init(&c->base.refcount, 1);
@@ -160,7 +156,8 @@ grpc_call_credentials *grpc_composite_call_credentials_create(
creds2_array = get_creds_array(&creds2);
c->inner.num_creds = creds1_array.num_creds + creds2_array.num_creds;
creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials *);
- c->inner.creds_array = gpr_zalloc(creds_array_byte_size);
+ c->inner.creds_array =
+ (grpc_call_credentials **)gpr_zalloc(creds_array_byte_size);
for (i = 0; i < creds1_array.num_creds; i++) {
grpc_call_credentials *cur_creds = creds1_array.creds_array[i];
c->inner.creds_array[i] = grpc_call_credentials_ref(cur_creds);
@@ -254,7 +251,8 @@ static grpc_channel_credentials_vtable composite_channel_credentials_vtable = {
grpc_channel_credentials *grpc_composite_channel_credentials_create(
grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
void *reserved) {
- grpc_composite_channel_credentials *c = gpr_zalloc(sizeof(*c));
+ grpc_composite_channel_credentials *c =
+ (grpc_composite_channel_credentials *)gpr_zalloc(sizeof(*c));
GPR_ASSERT(channel_creds != NULL && call_creds != NULL && reserved == NULL);
GRPC_API_TRACE(
"grpc_composite_channel_credentials_create(channel_creds=%p, "
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.h b/src/core/lib/security/credentials/composite/composite_credentials.h
index 3076afcb7e..6e9f9a8f6f 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.h
+++ b/src/core/lib/security/credentials/composite/composite_credentials.h
@@ -21,6 +21,10 @@
#include "src/core/lib/security/credentials/credentials.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
grpc_call_credentials **creds_array;
size_t num_creds;
@@ -53,5 +57,9 @@ typedef struct {
grpc_call_credentials_array inner;
} grpc_composite_call_credentials;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H \
*/
diff --git a/src/core/lib/security/credentials/credentials.c b/src/core/lib/security/credentials/credentials.cc
index 8a67c9865b..ebbf350865 100644
--- a/src/core/lib/security/credentials/credentials.c
+++ b/src/core/lib/security/credentials/credentials.cc
@@ -40,7 +40,8 @@
grpc_credentials_metadata_request *grpc_credentials_metadata_request_create(
grpc_call_credentials *creds) {
grpc_credentials_metadata_request *r =
- gpr_zalloc(sizeof(grpc_credentials_metadata_request));
+ (grpc_credentials_metadata_request *)gpr_zalloc(
+ sizeof(grpc_credentials_metadata_request));
r->creds = grpc_call_credentials_ref(creds);
return r;
}
@@ -148,11 +149,11 @@ grpc_channel_credentials_duplicate_without_call_credentials(
}
static void credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_channel_credentials_unref(exec_ctx, p);
+ grpc_channel_credentials_unref(exec_ctx, (grpc_channel_credentials *)p);
}
static void *credentials_pointer_arg_copy(void *p) {
- return grpc_channel_credentials_ref(p);
+ return grpc_channel_credentials_ref((grpc_channel_credentials *)p);
}
static int credentials_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
@@ -163,8 +164,9 @@ static const grpc_arg_pointer_vtable credentials_pointer_vtable = {
grpc_arg grpc_channel_credentials_to_arg(
grpc_channel_credentials *credentials) {
- return grpc_channel_arg_pointer_create(
- GRPC_ARG_CHANNEL_CREDENTIALS, credentials, &credentials_pointer_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_CHANNEL_CREDENTIALS,
+ credentials,
+ &credentials_pointer_vtable);
}
grpc_channel_credentials *grpc_channel_credentials_from_arg(
@@ -175,7 +177,7 @@ grpc_channel_credentials *grpc_channel_credentials_from_arg(
GRPC_ARG_CHANNEL_CREDENTIALS);
return NULL;
}
- return arg->value.pointer.p;
+ return (grpc_channel_credentials *)arg->value.pointer.p;
}
grpc_channel_credentials *grpc_channel_credentials_find_in_args(
@@ -244,11 +246,11 @@ void grpc_server_credentials_set_auth_metadata_processor(
static void server_credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx,
void *p) {
- grpc_server_credentials_unref(exec_ctx, p);
+ grpc_server_credentials_unref(exec_ctx, (grpc_server_credentials *)p);
}
static void *server_credentials_pointer_arg_copy(void *p) {
- return grpc_server_credentials_ref(p);
+ return grpc_server_credentials_ref((grpc_server_credentials *)p);
}
static int server_credentials_pointer_cmp(void *a, void *b) {
@@ -260,7 +262,7 @@ static const grpc_arg_pointer_vtable cred_ptr_vtable = {
server_credentials_pointer_cmp};
grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
- return grpc_channel_arg_pointer_create(GRPC_SERVER_CREDENTIALS_ARG, p,
+ return grpc_channel_arg_pointer_create((char *)GRPC_SERVER_CREDENTIALS_ARG, p,
&cred_ptr_vtable);
}
@@ -271,7 +273,7 @@ grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg) {
GRPC_SERVER_CREDENTIALS_ARG);
return NULL;
}
- return arg->value.pointer.p;
+ return (grpc_server_credentials *)arg->value.pointer.p;
}
grpc_server_credentials *grpc_find_server_credentials_in_args(
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index 04a54b0ca8..73e39ae039 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -29,6 +29,10 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/security/transport/security_connector.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct grpc_http_response;
/* --- Constants. --- */
@@ -252,4 +256,8 @@ grpc_credentials_metadata_request *grpc_credentials_metadata_request_create(
void grpc_credentials_metadata_request_destroy(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/credentials_metadata.c b/src/core/lib/security/credentials/credentials_metadata.cc
index ccd39e610f..5ba98bda4e 100644
--- a/src/core/lib/security/credentials/credentials_metadata.c
+++ b/src/core/lib/security/credentials/credentials_metadata.cc
@@ -33,7 +33,8 @@ static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array *list,
while (new_size < target_size) {
new_size *= 2;
}
- list->md = gpr_realloc(list->md, sizeof(grpc_mdelem) * new_size);
+ list->md =
+ (grpc_mdelem *)gpr_realloc(list->md, sizeof(grpc_mdelem) * new_size);
}
void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array *list,
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.cc
index ac9017850f..cf10bf24c8 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.cc
@@ -38,7 +38,8 @@ static grpc_security_status fake_transport_security_create_security_connector(
grpc_call_credentials *call_creds, const char *target,
const grpc_channel_args *args, grpc_channel_security_connector **sc,
grpc_channel_args **new_args) {
- *sc = grpc_fake_channel_security_connector_create(call_creds, target, args);
+ *sc =
+ grpc_fake_channel_security_connector_create(c, call_creds, target, args);
return GRPC_SECURITY_OK;
}
@@ -46,7 +47,7 @@ static grpc_security_status
fake_transport_security_server_create_security_connector(
grpc_exec_ctx *exec_ctx, grpc_server_credentials *c,
grpc_server_security_connector **sc) {
- *sc = grpc_fake_server_security_connector_create();
+ *sc = grpc_fake_server_security_connector_create(c);
return GRPC_SECURITY_OK;
}
@@ -60,7 +61,8 @@ static grpc_server_credentials_vtable
grpc_channel_credentials *grpc_fake_transport_security_credentials_create(
void) {
- grpc_channel_credentials *c = gpr_zalloc(sizeof(grpc_channel_credentials));
+ grpc_channel_credentials *c =
+ (grpc_channel_credentials *)gpr_zalloc(sizeof(grpc_channel_credentials));
c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
c->vtable = &fake_transport_security_credentials_vtable;
gpr_ref_init(&c->refcount, 1);
@@ -69,7 +71,8 @@ grpc_channel_credentials *grpc_fake_transport_security_credentials_create(
grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
void) {
- grpc_server_credentials *c = gpr_malloc(sizeof(grpc_server_credentials));
+ grpc_server_credentials *c =
+ (grpc_server_credentials *)gpr_malloc(sizeof(grpc_server_credentials));
memset(c, 0, sizeof(grpc_server_credentials));
c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
gpr_ref_init(&c->refcount, 1);
@@ -78,8 +81,8 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
}
grpc_arg grpc_fake_transport_expected_targets_arg(char *expected_targets) {
- return grpc_channel_arg_string_create(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS,
- expected_targets);
+ return grpc_channel_arg_string_create(
+ (char *)GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
}
const char *grpc_fake_transport_get_expected_targets(
@@ -129,7 +132,8 @@ grpc_call_credentials *grpc_md_only_test_credentials_create(
grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value,
bool is_async) {
grpc_md_only_test_credentials *c =
- gpr_zalloc(sizeof(grpc_md_only_test_credentials));
+ (grpc_md_only_test_credentials *)gpr_zalloc(
+ sizeof(grpc_md_only_test_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &md_only_test_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.h b/src/core/lib/security/credentials/fake/fake_credentials.h
index aa0f3b6e20..ed3f893c58 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.h
+++ b/src/core/lib/security/credentials/fake/fake_credentials.h
@@ -21,6 +21,10 @@
#include "src/core/lib/security/credentials/credentials.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* -- Fake transport security credentials. -- */
/* Creates a fake transport security credentials object for testing. */
@@ -56,4 +60,8 @@ typedef struct {
bool is_async;
} grpc_md_only_test_credentials;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_FAKE_FAKE_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/google_default/credentials_generic.c b/src/core/lib/security/credentials/google_default/credentials_generic.cc
index 4f79718f3d..4f79718f3d 100644
--- a/src/core/lib/security/credentials/google_default/credentials_generic.c
+++ b/src/core/lib/security/credentials/google_default/credentials_generic.cc
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
index a2a8e289ee..5b2ddceb4a 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
@@ -28,6 +28,7 @@
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/iomgr/polling_entity.h"
+#include "src/core/lib/security/credentials/google_default/google_default_credentials.h"
#include "src/core/lib/security/credentials/jwt/jwt_credentials.h"
#include "src/core/lib/security/credentials/oauth2/oauth2_credentials.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -79,12 +80,13 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
detector->is_done = 1;
GRPC_LOG_IF_ERROR(
"Pollset kick",
- grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), NULL));
+ grpc_pollset_kick(exec_ctx,
+ grpc_polling_entity_pollset(&detector->pollent), NULL));
gpr_mu_unlock(g_polling_mu);
}
static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, grpc_error *e) {
- grpc_pollset_destroy(exec_ctx, p);
+ grpc_pollset_destroy(exec_ctx, (grpc_pollset *)p);
}
static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
@@ -95,9 +97,9 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
- gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
+ grpc_millis max_detection_delay = GPR_MS_PER_SEC;
- grpc_pollset *pollset = gpr_zalloc(grpc_pollset_size());
+ grpc_pollset *pollset = (grpc_pollset *)gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(pollset, &g_polling_mu);
detector.pollent = grpc_polling_entity_create_from_pollset(pollset);
detector.is_done = 0;
@@ -105,8 +107,8 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
memset(&detector.response, 0, sizeof(detector.response));
memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST;
- request.http.path = "/";
+ request.host = (char *)GRPC_COMPUTE_ENGINE_DETECTION_HOST;
+ request.http.path = (char *)"/";
grpc_httpcli_context_init(&context);
@@ -114,7 +116,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get(
exec_ctx, &context, &detector.pollent, resource_quota, &request,
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
+ grpc_exec_ctx_now(exec_ctx) + max_detection_delay,
GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector,
grpc_schedule_on_exec_ctx),
&detector.response);
@@ -131,8 +133,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
"pollset_work",
grpc_pollset_work(exec_ctx,
grpc_polling_entity_pollset(&detector.pollent),
- &worker, gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_inf_future(GPR_CLOCK_MONOTONIC)))) {
+ &worker, GRPC_MILLIS_INF_FUTURE))) {
detector.is_done = 1;
detector.success = 0;
}
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index c3755e01a6..66677873ca 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -23,6 +23,10 @@
#include "src/core/lib/security/credentials/credentials.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#define GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY "gcloud"
#define GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE \
"application_default_credentials.json"
@@ -41,5 +45,9 @@
void grpc_flush_cached_google_default_credentials(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \
*/
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.c b/src/core/lib/security/credentials/iam/iam_credentials.cc
index 3de8319d98..e9cf208c16 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.c
+++ b/src/core/lib/security/credentials/iam/iam_credentials.cc
@@ -64,7 +64,8 @@ grpc_call_credentials *grpc_google_iam_credentials_create(
GPR_ASSERT(reserved == NULL);
GPR_ASSERT(token != NULL);
GPR_ASSERT(authority_selector != NULL);
- grpc_google_iam_credentials *c = gpr_zalloc(sizeof(*c));
+ grpc_google_iam_credentials *c =
+ (grpc_google_iam_credentials *)gpr_zalloc(sizeof(*c));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_IAM;
c->base.vtable = &iam_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/jwt/json_token.c b/src/core/lib/security/credentials/jwt/json_token.cc
index fff71255a5..8c30353470 100644
--- a/src/core/lib/security/credentials/jwt/json_token.c
+++ b/src/core/lib/security/credentials/jwt/json_token.cc
@@ -20,6 +20,7 @@
#include <string.h>
+#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
@@ -29,9 +30,11 @@
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/support/string.h"
+extern "C" {
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/pem.h>
+}
/* --- Constants. --- */
@@ -96,7 +99,7 @@ grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json) {
gpr_log(GPR_ERROR, "Could not write into openssl BIO.");
goto end;
}
- result.private_key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, "");
+ result.private_key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, (void *)"");
if (result.private_key == NULL) {
gpr_log(GPR_ERROR, "Could not deserialize private key.");
goto end;
@@ -214,7 +217,7 @@ static char *dot_concat_and_free_strings(char *str1, char *str2) {
size_t str1_len = strlen(str1);
size_t str2_len = strlen(str2);
size_t result_len = str1_len + 1 /* dot */ + str2_len;
- char *result = gpr_malloc(result_len + 1 /* NULL terminated */);
+ char *result = (char *)gpr_malloc(result_len + 1 /* NULL terminated */);
char *current = result;
memcpy(current, str1, str1_len);
current += str1_len;
@@ -266,7 +269,7 @@ char *compute_and_encode_signature(const grpc_auth_json_key *json_key,
gpr_log(GPR_ERROR, "DigestFinal (get signature length) failed.");
goto end;
}
- sig = gpr_malloc(sig_len);
+ sig = (unsigned char *)gpr_malloc(sig_len);
if (EVP_DigestSignFinal(md_ctx, sig, &sig_len) != 1) {
gpr_log(GPR_ERROR, "DigestFinal (signature compute) failed.");
goto end;
diff --git a/src/core/lib/security/credentials/jwt/json_token.h b/src/core/lib/security/credentials/jwt/json_token.h
index e50790ef2e..b923b02df6 100644
--- a/src/core/lib/security/credentials/jwt/json_token.h
+++ b/src/core/lib/security/credentials/jwt/json_token.h
@@ -19,6 +19,10 @@
#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H
#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <grpc/slice.h>
#include <openssl/rsa.h>
@@ -70,4 +74,8 @@ typedef char *(*grpc_jwt_encode_and_sign_override)(
void grpc_jwt_encode_and_sign_set_override(
grpc_jwt_encode_and_sign_override func);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H */
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.c b/src/core/lib/security/credentials/jwt/jwt_credentials.cc
index 02c82e99ba..835dd677ed 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.c
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.cc
@@ -16,8 +16,11 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/security/credentials/jwt/jwt_credentials.h"
+#include <inttypes.h>
#include <string.h>
#include "src/core/lib/surface/api_trace.h"
@@ -125,7 +128,8 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation");
return NULL;
}
- c = gpr_zalloc(sizeof(grpc_service_account_jwt_access_credentials));
+ c = (grpc_service_account_jwt_access_credentials *)gpr_zalloc(
+ sizeof(grpc_service_account_jwt_access_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_JWT;
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &jwt_vtable;
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.h b/src/core/lib/security/credentials/jwt/jwt_credentials.h
index 07f4022669..5cee6ed0da 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.h
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.h
@@ -22,6 +22,10 @@
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/jwt/json_token.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
grpc_call_credentials base;
@@ -45,4 +49,8 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
grpc_exec_ctx *exec_ctx, grpc_auth_json_key key,
gpr_timespec token_lifetime);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
index a27284bc50..39e72c195b 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
@@ -26,7 +26,10 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
+
+extern "C" {
#include <openssl/pem.h>
+}
#include "src/core/lib/http/httpcli.h"
#include "src/core/lib/iomgr/polling_entity.h"
@@ -129,7 +132,7 @@ static void jose_header_destroy(grpc_exec_ctx *exec_ctx, jose_header *h) {
static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx,
grpc_json *json, grpc_slice buffer) {
grpc_json *cur;
- jose_header *h = gpr_zalloc(sizeof(jose_header));
+ jose_header *h = (jose_header *)gpr_zalloc(sizeof(jose_header));
h->buffer = buffer;
for (cur = json->child; cur != NULL; cur = cur->next) {
if (strcmp(cur->key, "alg") == 0) {
@@ -231,7 +234,8 @@ gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) {
grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx,
grpc_json *json, grpc_slice buffer) {
grpc_json *cur;
- grpc_jwt_claims *claims = gpr_malloc(sizeof(grpc_jwt_claims));
+ grpc_jwt_claims *claims =
+ (grpc_jwt_claims *)gpr_malloc(sizeof(grpc_jwt_claims));
memset(claims, 0, sizeof(grpc_jwt_claims));
claims->json = json;
claims->buffer = buffer;
@@ -347,7 +351,7 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
const char *signed_jwt, size_t signed_jwt_len, void *user_data,
grpc_jwt_verification_done_cb cb) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- verifier_cb_ctx *ctx = gpr_zalloc(sizeof(verifier_cb_ctx));
+ verifier_cb_ctx *ctx = (verifier_cb_ctx *)gpr_zalloc(sizeof(verifier_cb_ctx));
ctx->verifier = verifier;
ctx->pollent = grpc_polling_entity_create_from_pollset(pollset);
ctx->header = header;
@@ -380,7 +384,7 @@ void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) {
gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN};
/* Max delay defaults to one minute. */
-gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN};
+grpc_millis grpc_jwt_verifier_max_delay = 60 * GPR_MS_PER_SEC;
typedef struct {
char *email_domain;
@@ -676,6 +680,7 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_json *json = json_from_http(response);
grpc_httpcli_request req;
const char *jwks_uri;
+ grpc_resource_quota *resource_quota = NULL;
/* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time. */
if (json == NULL) goto error;
@@ -693,9 +698,9 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
jwks_uri += 8;
req.handshaker = &grpc_httpcli_ssl;
req.host = gpr_strdup(jwks_uri);
- req.http.path = strchr(jwks_uri, '/');
+ req.http.path = (char *)strchr(jwks_uri, '/');
if (req.http.path == NULL) {
- req.http.path = "";
+ req.http.path = (char *)"";
} else {
*(req.host + (req.http.path - jwks_uri)) = '\0';
}
@@ -703,11 +708,10 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
- grpc_resource_quota *resource_quota =
- grpc_resource_quota_create("jwt_verifier");
+ resource_quota = grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
+ grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay,
GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -760,7 +764,8 @@ const char *grpc_jwt_issuer_email_domain(const char *issuer) {
if (dot == NULL || dot == email_domain) return email_domain;
GPR_ASSERT(dot > email_domain);
/* There may be a subdomain, we just want the domain. */
- dot = gpr_memrchr(email_domain, '.', (size_t)(dot - email_domain));
+ dot = (const char *)gpr_memrchr((void *)email_domain, '.',
+ (size_t)(dot - email_domain));
if (dot == NULL) return email_domain;
return dot + 1;
}
@@ -773,6 +778,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
char *path_prefix = NULL;
const char *iss;
grpc_httpcli_request req;
+ grpc_resource_quota *resource_quota = NULL;
memset(&req, 0, sizeof(grpc_httpcli_request));
req.handshaker = &grpc_httpcli_ssl;
http_response_index rsp_idx;
@@ -831,12 +837,11 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
- grpc_resource_quota *resource_quota =
- grpc_resource_quota_create("jwt_verifier");
- grpc_httpcli_get(
- exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
- http_cb, &ctx->responses[rsp_idx]);
+ resource_quota = grpc_resource_quota_create("jwt_verifier");
+ grpc_httpcli_get(exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent,
+ resource_quota, &req,
+ grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay,
+ http_cb, &ctx->responses[rsp_idx]);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
gpr_free(req.host);
gpr_free(req.http.path);
@@ -901,12 +906,14 @@ error:
grpc_jwt_verifier *grpc_jwt_verifier_create(
const grpc_jwt_verifier_email_domain_key_url_mapping *mappings,
size_t num_mappings) {
- grpc_jwt_verifier *v = gpr_zalloc(sizeof(grpc_jwt_verifier));
+ grpc_jwt_verifier *v =
+ (grpc_jwt_verifier *)gpr_zalloc(sizeof(grpc_jwt_verifier));
grpc_httpcli_context_init(&v->http_ctx);
/* We know at least of one mapping. */
v->allocated_mappings = 1 + num_mappings;
- v->mappings = gpr_malloc(v->allocated_mappings * sizeof(email_key_mapping));
+ v->mappings = (email_key_mapping *)gpr_malloc(v->allocated_mappings *
+ sizeof(email_key_mapping));
verifier_put_mapping(v, GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN,
GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX);
/* User-Provided mappings. */
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h
index 8fac452d4e..998365e75c 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.h
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h
@@ -32,6 +32,10 @@
#define GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX \
"www.googleapis.com/robot/v1/metadata/x509"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* --- grpc_jwt_verifier_status. --- */
typedef enum {
@@ -81,7 +85,7 @@ typedef struct {
/* Globals to control the verifier. Not thread-safe. */
extern gpr_timespec grpc_jwt_verifier_clock_skew;
-extern gpr_timespec grpc_jwt_verifier_max_delay;
+extern grpc_millis grpc_jwt_verifier_max_delay;
/* The verifier can be created with some custom mappings to help with key
discovery in the case where the issuer is an email address.
@@ -122,4 +126,8 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
const char *audience);
const char *grpc_jwt_issuer_email_domain(const char *issuer);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H */
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
index 10b270c49c..7867105f56 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
@@ -117,7 +117,7 @@ static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx,
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
grpc_exec_ctx *exec_ctx, const grpc_http_response *response,
- grpc_mdelem *token_md, gpr_timespec *token_lifetime) {
+ grpc_mdelem *token_md, grpc_millis *token_lifetime) {
char *null_terminated_body = NULL;
char *new_access_token = NULL;
grpc_credentials_status status = GRPC_CREDENTIALS_OK;
@@ -130,7 +130,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
}
if (response->body_length > 0) {
- null_terminated_body = gpr_malloc(response->body_length + 1);
+ null_terminated_body = (char *)gpr_malloc(response->body_length + 1);
null_terminated_body[response->body_length] = '\0';
memcpy(null_terminated_body, response->body, response->body_length);
}
@@ -183,9 +183,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
}
gpr_asprintf(&new_access_token, "%s %s", token_type->value,
access_token->value);
- token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
- token_lifetime->tv_nsec = 0;
- token_lifetime->clock_type = GPR_TIMESPAN;
+ *token_lifetime = strtol(expires_in->value, NULL, 10) * GPR_MS_PER_SEC;
if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(exec_ctx, *token_md);
*token_md = grpc_mdelem_from_slices(
exec_ctx,
@@ -214,7 +212,7 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)r->creds;
grpc_mdelem access_token_md = GRPC_MDNULL;
- gpr_timespec token_lifetime;
+ grpc_millis token_lifetime;
grpc_credentials_status status =
grpc_oauth2_token_fetcher_credentials_parse_server_response(
exec_ctx, &r->response, &access_token_md, &token_lifetime);
@@ -222,10 +220,9 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&c->mu);
c->token_fetch_pending = false;
c->access_token_md = GRPC_MDELEM_REF(access_token_md);
- c->token_expiration =
- status == GRPC_CREDENTIALS_OK
- ? gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime)
- : gpr_inf_past(GPR_CLOCK_REALTIME);
+ c->token_expiration = status == GRPC_CREDENTIALS_OK
+ ? grpc_exec_ctx_now(exec_ctx) + token_lifetime
+ : 0;
grpc_oauth2_pending_get_request_metadata *pending_request =
c->pending_requests;
c->pending_requests = NULL;
@@ -260,14 +257,12 @@ static bool oauth2_token_fetcher_get_request_metadata(
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
// Check if we can use the cached token.
- gpr_timespec refresh_threshold = gpr_time_from_seconds(
- GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
+ grpc_millis refresh_threshold =
+ GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC;
grpc_mdelem cached_access_token_md = GRPC_MDNULL;
gpr_mu_lock(&c->mu);
if (!GRPC_MDISNULL(c->access_token_md) &&
- (gpr_time_cmp(
- gpr_time_sub(c->token_expiration, gpr_now(GPR_CLOCK_REALTIME)),
- refresh_threshold) > 0)) {
+ (c->token_expiration - grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) {
cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md);
}
if (!GRPC_MDISNULL(cached_access_token_md)) {
@@ -296,10 +291,10 @@ static bool oauth2_token_fetcher_get_request_metadata(
gpr_mu_unlock(&c->mu);
if (start_fetch) {
grpc_call_credentials_ref(creds);
- c->fetch_func(
- exec_ctx, grpc_credentials_metadata_request_create(creds),
- &c->httpcli_context, &c->pollent, on_oauth2_token_fetcher_http_response,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), refresh_threshold));
+ c->fetch_func(exec_ctx, grpc_credentials_metadata_request_create(creds),
+ &c->httpcli_context, &c->pollent,
+ on_oauth2_token_fetcher_http_response,
+ grpc_exec_ctx_now(exec_ctx) + refresh_threshold);
}
return false;
}
@@ -340,7 +335,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
gpr_ref_init(&c->base.refcount, 1);
gpr_mu_init(&c->mu);
- c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
+ c->token_expiration = 0;
c->fetch_func = fetch_func;
c->pollent =
grpc_polling_entity_create_from_pollset_set(grpc_pollset_set_create());
@@ -358,12 +353,12 @@ static grpc_call_credentials_vtable compute_engine_vtable = {
static void compute_engine_fetch_oauth2(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
- grpc_iomgr_cb_func response_cb, gpr_timespec deadline) {
- grpc_http_header header = {"Metadata-Flavor", "Google"};
+ grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
+ grpc_http_header header = {(char *)"Metadata-Flavor", (char *)"Google"};
grpc_httpcli_request request;
memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = GRPC_COMPUTE_ENGINE_METADATA_HOST;
- request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
+ request.host = (char *)GRPC_COMPUTE_ENGINE_METADATA_HOST;
+ request.http.path = (char *)GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
@@ -381,7 +376,8 @@ static void compute_engine_fetch_oauth2(
grpc_call_credentials *grpc_google_compute_engine_credentials_create(
void *reserved) {
grpc_oauth2_token_fetcher_credentials *c =
- gpr_malloc(sizeof(grpc_oauth2_token_fetcher_credentials));
+ (grpc_oauth2_token_fetcher_credentials *)gpr_malloc(
+ sizeof(grpc_oauth2_token_fetcher_credentials));
GRPC_API_TRACE("grpc_compute_engine_credentials_create(reserved=%p)", 1,
(reserved));
GPR_ASSERT(reserved == NULL);
@@ -409,19 +405,19 @@ static grpc_call_credentials_vtable refresh_token_vtable = {
static void refresh_token_fetch_oauth2(
grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
- grpc_iomgr_cb_func response_cb, gpr_timespec deadline) {
+ grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
grpc_google_refresh_token_credentials *c =
(grpc_google_refresh_token_credentials *)metadata_req->creds;
- grpc_http_header header = {"Content-Type",
- "application/x-www-form-urlencoded"};
+ grpc_http_header header = {(char *)"Content-Type",
+ (char *)"application/x-www-form-urlencoded"};
grpc_httpcli_request request;
char *body = NULL;
gpr_asprintf(&body, GRPC_REFRESH_TOKEN_POST_BODY_FORMAT_STRING,
c->refresh_token.client_id, c->refresh_token.client_secret,
c->refresh_token.refresh_token);
memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = GRPC_GOOGLE_OAUTH2_SERVICE_HOST;
- request.http.path = GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
+ request.host = (char *)GRPC_GOOGLE_OAUTH2_SERVICE_HOST;
+ request.http.path = (char *)GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
@@ -447,7 +443,8 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation");
return NULL;
}
- c = gpr_zalloc(sizeof(grpc_google_refresh_token_credentials));
+ c = (grpc_google_refresh_token_credentials *)gpr_zalloc(
+ sizeof(grpc_google_refresh_token_credentials));
init_oauth2_token_fetcher(&c->base, refresh_token_fetch_oauth2);
c->base.base.vtable = &refresh_token_vtable;
c->refresh_token = refresh_token;
@@ -515,7 +512,8 @@ static grpc_call_credentials_vtable access_token_vtable = {
grpc_call_credentials *grpc_access_token_credentials_create(
const char *access_token, void *reserved) {
grpc_access_token_credentials *c =
- gpr_zalloc(sizeof(grpc_access_token_credentials));
+ (grpc_access_token_credentials *)gpr_zalloc(
+ sizeof(grpc_access_token_credentials));
GRPC_API_TRACE(
"grpc_access_token_credentials_create(access_token=<redacted>, "
"reserved=%p)",
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
index d9ad6691b8..c12db896f3 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
@@ -22,6 +22,10 @@
#include "src/core/lib/json/json.h"
#include "src/core/lib/security/credentials/credentials.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// auth_refresh_token parsing.
typedef struct {
const char *type;
@@ -57,7 +61,7 @@ typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx,
grpc_httpcli_context *http_context,
grpc_polling_entity *pollent,
grpc_iomgr_cb_func cb,
- gpr_timespec deadline);
+ grpc_millis deadline);
typedef struct grpc_oauth2_pending_get_request_metadata {
grpc_credentials_mdelem_array *md_array;
@@ -70,7 +74,7 @@ typedef struct {
grpc_call_credentials base;
gpr_mu mu;
grpc_mdelem access_token_md;
- gpr_timespec token_expiration;
+ grpc_millis token_expiration;
bool token_fetch_pending;
grpc_oauth2_pending_get_request_metadata *pending_requests;
grpc_httpcli_context httpcli_context;
@@ -100,6 +104,10 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
grpc_exec_ctx *exec_ctx, const struct grpc_http_response *response,
- grpc_mdelem *token_md, gpr_timespec *token_lifetime);
+ grpc_mdelem *token_md, grpc_millis *token_lifetime);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.cc
index 73e0c23e0f..8106a730fe 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.c
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.cc
@@ -31,6 +31,9 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/validate_metadata.h"
+grpc_tracer_flag grpc_plugin_credentials_trace =
+ GRPC_TRACER_INITIALIZER(false, "plugin_credentials");
+
static void plugin_destruct(grpc_exec_ctx *exec_ctx,
grpc_call_credentials *creds) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
@@ -53,6 +56,62 @@ static void pending_request_remove_locked(
}
}
+// Checks if the request has been cancelled.
+// If not, removes it from the pending list, so that it cannot be
+// cancelled out from under us.
+// When this returns, r->cancelled indicates whether the request was
+// cancelled before completion.
+static void pending_request_complete(
+ grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) {
+ gpr_mu_lock(&r->creds->mu);
+ if (!r->cancelled) pending_request_remove_locked(r->creds, r);
+ gpr_mu_unlock(&r->creds->mu);
+ // Ref to credentials not needed anymore.
+ grpc_call_credentials_unref(exec_ctx, &r->creds->base);
+}
+
+static grpc_error *process_plugin_result(
+ grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r,
+ const grpc_metadata *md, size_t num_md, grpc_status_code status,
+ const char *error_details) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (status != GRPC_STATUS_OK) {
+ char *msg;
+ gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
+ error_details);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ gpr_free(msg);
+ } else {
+ bool seen_illegal_header = false;
+ for (size_t i = 0; i < num_md; ++i) {
+ if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
+ grpc_validate_header_key_is_legal(md[i].key))) {
+ seen_illegal_header = true;
+ break;
+ } else if (!grpc_is_binary_header(md[i].key) &&
+ !GRPC_LOG_IF_ERROR(
+ "validate_metadata_from_plugin",
+ grpc_validate_header_nonbin_value_is_legal(md[i].value))) {
+ gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
+ seen_illegal_header = true;
+ break;
+ }
+ }
+ if (seen_illegal_header) {
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata");
+ } else {
+ for (size_t i = 0; i < num_md; ++i) {
+ grpc_mdelem mdelem = grpc_mdelem_from_slices(
+ exec_ctx, grpc_slice_ref_internal(md[i].key),
+ grpc_slice_ref_internal(md[i].value));
+ grpc_credentials_mdelem_array_add(r->md_array, mdelem);
+ GRPC_MDELEM_UNREF(exec_ctx, mdelem);
+ }
+ }
+ }
+ return error;
+}
+
static void plugin_md_request_metadata_ready(void *request,
const grpc_metadata *md,
size_t num_md,
@@ -64,54 +123,24 @@ static void plugin_md_request_metadata_ready(void *request,
NULL, NULL);
grpc_plugin_credentials_pending_request *r =
(grpc_plugin_credentials_pending_request *)request;
- // Check if the request has been cancelled.
- // If not, remove it from the pending list, so that it cannot be
- // cancelled out from under us.
- gpr_mu_lock(&r->creds->mu);
- if (!r->cancelled) pending_request_remove_locked(r->creds, r);
- gpr_mu_unlock(&r->creds->mu);
- grpc_call_credentials_unref(&exec_ctx, &r->creds->base);
+ if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO,
+ "plugin_credentials[%p]: request %p: plugin returned "
+ "asynchronously",
+ r->creds, r);
+ }
+ // Remove request from pending list if not previously cancelled.
+ pending_request_complete(&exec_ctx, r);
// If it has not been cancelled, process it.
if (!r->cancelled) {
- if (status != GRPC_STATUS_OK) {
- char *msg;
- gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
- error_details);
- GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata,
- GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg));
- gpr_free(msg);
- } else {
- bool seen_illegal_header = false;
- for (size_t i = 0; i < num_md; ++i) {
- if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin",
- grpc_validate_header_key_is_legal(md[i].key))) {
- seen_illegal_header = true;
- break;
- } else if (!grpc_is_binary_header(md[i].key) &&
- !GRPC_LOG_IF_ERROR(
- "validate_metadata_from_plugin",
- grpc_validate_header_nonbin_value_is_legal(
- md[i].value))) {
- gpr_log(GPR_ERROR, "Plugin added invalid metadata value.");
- seen_illegal_header = true;
- break;
- }
- }
- if (seen_illegal_header) {
- GRPC_CLOSURE_SCHED(
- &exec_ctx, r->on_request_metadata,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"));
- } else {
- for (size_t i = 0; i < num_md; ++i) {
- grpc_mdelem mdelem = grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_ref_internal(md[i].key),
- grpc_slice_ref_internal(md[i].value));
- grpc_credentials_mdelem_array_add(r->md_array, mdelem);
- GRPC_MDELEM_UNREF(&exec_ctx, mdelem);
- }
- GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, GRPC_ERROR_NONE);
- }
- }
+ grpc_error *error =
+ process_plugin_result(&exec_ctx, r, md, num_md, status, error_details);
+ GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error);
+ } else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO,
+ "plugin_credentials[%p]: request %p: plugin was previously "
+ "cancelled",
+ r->creds, r);
}
gpr_free(r);
grpc_exec_ctx_finish(&exec_ctx);
@@ -125,6 +154,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_closure *on_request_metadata,
grpc_error **error) {
grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
+ bool retval = true; // Synchronous return.
if (c->plugin.get_metadata != NULL) {
// Create pending_request object.
grpc_plugin_credentials_pending_request *pending_request =
@@ -142,12 +172,60 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
c->pending_requests = pending_request;
gpr_mu_unlock(&c->mu);
// Invoke the plugin. The callback holds a ref to us.
+ if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin",
+ c, pending_request);
+ }
grpc_call_credentials_ref(creds);
- c->plugin.get_metadata(c->plugin.state, context,
- plugin_md_request_metadata_ready, pending_request);
- return false;
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
+ size_t num_creds_md = 0;
+ grpc_status_code status = GRPC_STATUS_OK;
+ const char *error_details = NULL;
+ if (!c->plugin.get_metadata(c->plugin.state, context,
+ plugin_md_request_metadata_ready,
+ pending_request, creds_md, &num_creds_md,
+ &status, &error_details)) {
+ if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO,
+ "plugin_credentials[%p]: request %p: plugin will return "
+ "asynchronously",
+ c, pending_request);
+ }
+ return false; // Asynchronous return.
+ }
+ // Returned synchronously.
+ // Remove request from pending list if not previously cancelled.
+ pending_request_complete(exec_ctx, pending_request);
+ // If the request was cancelled, the error will have been returned
+ // asynchronously by plugin_cancel_get_request_metadata(), so return
+ // false. Otherwise, process the result.
+ if (pending_request->cancelled) {
+ if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO,
+ "plugin_credentials[%p]: request %p was cancelled, error "
+ "will be returned asynchronously",
+ c, pending_request);
+ }
+ retval = false;
+ } else {
+ if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO,
+ "plugin_credentials[%p]: request %p: plugin returned "
+ "synchronously",
+ c, pending_request);
+ }
+ *error = process_plugin_result(exec_ctx, pending_request, creds_md,
+ num_creds_md, status, error_details);
+ }
+ // Clean up.
+ for (size_t i = 0; i < num_creds_md; ++i) {
+ grpc_slice_unref_internal(exec_ctx, creds_md[i].key);
+ grpc_slice_unref_internal(exec_ctx, creds_md[i].value);
+ }
+ gpr_free((void *)error_details);
+ gpr_free(pending_request);
}
- return true;
+ return retval;
}
static void plugin_cancel_get_request_metadata(
@@ -159,6 +237,10 @@ static void plugin_cancel_get_request_metadata(
c->pending_requests;
pending_request != NULL; pending_request = pending_request->next) {
if (pending_request->md_array == md_array) {
+ if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
+ gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", c,
+ pending_request);
+ }
pending_request->cancelled = true;
GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata,
GRPC_ERROR_REF(error));
@@ -176,7 +258,8 @@ static grpc_call_credentials_vtable plugin_vtable = {
grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void *reserved) {
- grpc_plugin_credentials *c = gpr_zalloc(sizeof(*c));
+ grpc_plugin_credentials *c =
+ (grpc_plugin_credentials *)gpr_zalloc(sizeof(*c));
GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1,
(reserved));
GPR_ASSERT(reserved == NULL);
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.h b/src/core/lib/security/credentials/plugin/plugin_credentials.h
index 57266d589a..f56df9eac5 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.h
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.h
@@ -21,6 +21,8 @@
#include "src/core/lib/security/credentials/credentials.h"
+extern grpc_tracer_flag grpc_plugin_credentials_trace;
+
struct grpc_plugin_credentials;
typedef struct grpc_plugin_credentials_pending_request {
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.c b/src/core/lib/security/credentials/ssl/ssl_credentials.c
deleted file mode 100644
index 006db1ec76..0000000000
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/security/credentials/ssl/ssl_credentials.h"
-
-#include <string.h>
-
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/surface/api_trace.h"
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-//
-// SSL Channel Credentials.
-//
-
-static void ssl_config_pem_key_cert_pair_destroy(
- tsi_ssl_pem_key_cert_pair *kp) {
- if (kp == NULL) return;
- gpr_free((void *)kp->private_key);
- gpr_free((void *)kp->cert_chain);
-}
-
-static void ssl_destruct(grpc_exec_ctx *exec_ctx,
- grpc_channel_credentials *creds) {
- grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
- gpr_free(c->config.pem_root_certs);
- ssl_config_pem_key_cert_pair_destroy(&c->config.pem_key_cert_pair);
-}
-
-static grpc_security_status ssl_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds,
- grpc_call_credentials *call_creds, const char *target,
- const grpc_channel_args *args, grpc_channel_security_connector **sc,
- grpc_channel_args **new_args) {
- grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
- grpc_security_status status = GRPC_SECURITY_OK;
- const char *overridden_target_name = NULL;
- for (size_t i = 0; args && i < args->num_args; i++) {
- grpc_arg *arg = &args->args[i];
- if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 &&
- arg->type == GRPC_ARG_STRING) {
- overridden_target_name = arg->value.string;
- break;
- }
- }
- status = grpc_ssl_channel_security_connector_create(
- exec_ctx, call_creds, &c->config, target, overridden_target_name, sc);
- if (status != GRPC_SECURITY_OK) {
- return status;
- }
- grpc_arg new_arg =
- grpc_channel_arg_string_create(GRPC_ARG_HTTP2_SCHEME, "https");
- *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
- return status;
-}
-
-static grpc_channel_credentials_vtable ssl_vtable = {
- ssl_destruct, ssl_create_security_connector, NULL};
-
-static void ssl_build_config(const char *pem_root_certs,
- grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
- grpc_ssl_config *config) {
- if (pem_root_certs != NULL) {
- config->pem_root_certs = gpr_strdup(pem_root_certs);
- }
- if (pem_key_cert_pair != NULL) {
- GPR_ASSERT(pem_key_cert_pair->private_key != NULL);
- GPR_ASSERT(pem_key_cert_pair->cert_chain != NULL);
- config->pem_key_cert_pair.cert_chain =
- gpr_strdup(pem_key_cert_pair->cert_chain);
- config->pem_key_cert_pair.private_key =
- gpr_strdup(pem_key_cert_pair->private_key);
- }
-}
-
-grpc_channel_credentials *grpc_ssl_credentials_create(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
- void *reserved) {
- grpc_ssl_credentials *c = gpr_zalloc(sizeof(grpc_ssl_credentials));
- GRPC_API_TRACE(
- "grpc_ssl_credentials_create(pem_root_certs=%s, "
- "pem_key_cert_pair=%p, "
- "reserved=%p)",
- 3, (pem_root_certs, pem_key_cert_pair, reserved));
- GPR_ASSERT(reserved == NULL);
- c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
- c->base.vtable = &ssl_vtable;
- gpr_ref_init(&c->base.refcount, 1);
- ssl_build_config(pem_root_certs, pem_key_cert_pair, &c->config);
- return &c->base;
-}
-
-//
-// SSL Server Credentials.
-//
-
-static void ssl_server_destruct(grpc_exec_ctx *exec_ctx,
- grpc_server_credentials *creds) {
- grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
- size_t i;
- for (i = 0; i < c->config.num_key_cert_pairs; i++) {
- ssl_config_pem_key_cert_pair_destroy(&c->config.pem_key_cert_pairs[i]);
- }
- gpr_free(c->config.pem_key_cert_pairs);
- gpr_free(c->config.pem_root_certs);
-}
-
-static grpc_security_status ssl_server_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds,
- grpc_server_security_connector **sc) {
- grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
- return grpc_ssl_server_security_connector_create(exec_ctx, &c->config, sc);
-}
-
-static grpc_server_credentials_vtable ssl_server_vtable = {
- ssl_server_destruct, ssl_server_create_security_connector};
-
-static void ssl_build_server_config(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs,
- grpc_ssl_client_certificate_request_type client_certificate_request,
- grpc_ssl_server_config *config) {
- size_t i;
- config->client_certificate_request = client_certificate_request;
- if (pem_root_certs != NULL) {
- config->pem_root_certs = gpr_strdup(pem_root_certs);
- }
- if (num_key_cert_pairs > 0) {
- GPR_ASSERT(pem_key_cert_pairs != NULL);
- config->pem_key_cert_pairs =
- gpr_zalloc(num_key_cert_pairs * sizeof(tsi_ssl_pem_key_cert_pair));
- }
- config->num_key_cert_pairs = num_key_cert_pairs;
- for (i = 0; i < num_key_cert_pairs; i++) {
- GPR_ASSERT(pem_key_cert_pairs[i].private_key != NULL);
- GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != NULL);
- config->pem_key_cert_pairs[i].cert_chain =
- gpr_strdup(pem_key_cert_pairs[i].cert_chain);
- config->pem_key_cert_pairs[i].private_key =
- gpr_strdup(pem_key_cert_pairs[i].private_key);
- }
-}
-
-grpc_server_credentials *grpc_ssl_server_credentials_create(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, int force_client_auth, void *reserved) {
- return grpc_ssl_server_credentials_create_ex(
- pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs,
- force_client_auth
- ? GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
- : GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
- reserved);
-}
-
-grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs,
- grpc_ssl_client_certificate_request_type client_certificate_request,
- void *reserved) {
- grpc_ssl_server_credentials *c =
- gpr_zalloc(sizeof(grpc_ssl_server_credentials));
- GRPC_API_TRACE(
- "grpc_ssl_server_credentials_create_ex("
- "pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
- "client_certificate_request=%d, reserved=%p)",
- 5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
- client_certificate_request, reserved));
- GPR_ASSERT(reserved == NULL);
- c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
- gpr_ref_init(&c->base.refcount, 1);
- c->base.vtable = &ssl_server_vtable;
- ssl_build_server_config(pem_root_certs, pem_key_cert_pairs,
- num_key_cert_pairs, client_certificate_request,
- &c->config);
- return &c->base;
-}
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.cc b/src/core/lib/security/credentials/ssl/ssl_credentials.cc
new file mode 100644
index 0000000000..2085e2b8e7
--- /dev/null
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.cc
@@ -0,0 +1,343 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/security/credentials/ssl/ssl_credentials.h"
+
+#include <string.h>
+
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/surface/api_trace.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+//
+// SSL Channel Credentials.
+//
+
+void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair *kp,
+ size_t num_key_cert_pairs) {
+ if (kp == NULL) return;
+ for (size_t i = 0; i < num_key_cert_pairs; i++) {
+ gpr_free((void *)kp[i].private_key);
+ gpr_free((void *)kp[i].cert_chain);
+ }
+ gpr_free(kp);
+}
+
+static void ssl_destruct(grpc_exec_ctx *exec_ctx,
+ grpc_channel_credentials *creds) {
+ grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
+ gpr_free(c->config.pem_root_certs);
+ grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pair, 1);
+}
+
+static grpc_security_status ssl_create_security_connector(
+ grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds,
+ grpc_call_credentials *call_creds, const char *target,
+ const grpc_channel_args *args, grpc_channel_security_connector **sc,
+ grpc_channel_args **new_args) {
+ grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
+ grpc_security_status status = GRPC_SECURITY_OK;
+ const char *overridden_target_name = NULL;
+ for (size_t i = 0; args && i < args->num_args; i++) {
+ grpc_arg *arg = &args->args[i];
+ if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 &&
+ arg->type == GRPC_ARG_STRING) {
+ overridden_target_name = arg->value.string;
+ break;
+ }
+ }
+ status = grpc_ssl_channel_security_connector_create(
+ exec_ctx, creds, call_creds, &c->config, target, overridden_target_name,
+ sc);
+ if (status != GRPC_SECURITY_OK) {
+ return status;
+ }
+ grpc_arg new_arg = grpc_channel_arg_string_create(
+ (char *)GRPC_ARG_HTTP2_SCHEME, (char *)"https");
+ *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
+ return status;
+}
+
+static grpc_channel_credentials_vtable ssl_vtable = {
+ ssl_destruct, ssl_create_security_connector, NULL};
+
+static void ssl_build_config(const char *pem_root_certs,
+ grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
+ grpc_ssl_config *config) {
+ if (pem_root_certs != NULL) {
+ config->pem_root_certs = gpr_strdup(pem_root_certs);
+ }
+ if (pem_key_cert_pair != NULL) {
+ GPR_ASSERT(pem_key_cert_pair->private_key != NULL);
+ GPR_ASSERT(pem_key_cert_pair->cert_chain != NULL);
+ config->pem_key_cert_pair = (tsi_ssl_pem_key_cert_pair *)gpr_zalloc(
+ sizeof(tsi_ssl_pem_key_cert_pair));
+ config->pem_key_cert_pair->cert_chain =
+ gpr_strdup(pem_key_cert_pair->cert_chain);
+ config->pem_key_cert_pair->private_key =
+ gpr_strdup(pem_key_cert_pair->private_key);
+ }
+}
+
+grpc_channel_credentials *grpc_ssl_credentials_create(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
+ void *reserved) {
+ grpc_ssl_credentials *c =
+ (grpc_ssl_credentials *)gpr_zalloc(sizeof(grpc_ssl_credentials));
+ GRPC_API_TRACE(
+ "grpc_ssl_credentials_create(pem_root_certs=%s, "
+ "pem_key_cert_pair=%p, "
+ "reserved=%p)",
+ 3, (pem_root_certs, pem_key_cert_pair, reserved));
+ GPR_ASSERT(reserved == NULL);
+ c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
+ c->base.vtable = &ssl_vtable;
+ gpr_ref_init(&c->base.refcount, 1);
+ ssl_build_config(pem_root_certs, pem_key_cert_pair, &c->config);
+ return &c->base;
+}
+
+//
+// SSL Server Credentials.
+//
+
+struct grpc_ssl_server_credentials_options {
+ grpc_ssl_client_certificate_request_type client_certificate_request;
+ grpc_ssl_server_certificate_config *certificate_config;
+ grpc_ssl_server_certificate_config_fetcher *certificate_config_fetcher;
+};
+
+static void ssl_server_destruct(grpc_exec_ctx *exec_ctx,
+ grpc_server_credentials *creds) {
+ grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
+ grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pairs,
+ c->config.num_key_cert_pairs);
+ gpr_free(c->config.pem_root_certs);
+}
+
+static grpc_security_status ssl_server_create_security_connector(
+ grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds,
+ grpc_server_security_connector **sc) {
+ return grpc_ssl_server_security_connector_create(exec_ctx, creds, sc);
+}
+
+static grpc_server_credentials_vtable ssl_server_vtable = {
+ ssl_server_destruct, ssl_server_create_security_connector};
+
+tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs(
+ const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs) {
+ tsi_ssl_pem_key_cert_pair *tsi_pairs = NULL;
+ if (num_key_cert_pairs > 0) {
+ GPR_ASSERT(pem_key_cert_pairs != NULL);
+ tsi_pairs = (tsi_ssl_pem_key_cert_pair *)gpr_zalloc(
+ num_key_cert_pairs * sizeof(tsi_ssl_pem_key_cert_pair));
+ }
+ for (size_t i = 0; i < num_key_cert_pairs; i++) {
+ GPR_ASSERT(pem_key_cert_pairs[i].private_key != NULL);
+ GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != NULL);
+ tsi_pairs[i].cert_chain = gpr_strdup(pem_key_cert_pairs[i].cert_chain);
+ tsi_pairs[i].private_key = gpr_strdup(pem_key_cert_pairs[i].private_key);
+ }
+ return tsi_pairs;
+}
+
+static void ssl_build_server_config(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs,
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_config *config) {
+ config->client_certificate_request = client_certificate_request;
+ if (pem_root_certs != NULL) {
+ config->pem_root_certs = gpr_strdup(pem_root_certs);
+ }
+ config->pem_key_cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs(
+ pem_key_cert_pairs, num_key_cert_pairs);
+ config->num_key_cert_pairs = num_key_cert_pairs;
+}
+
+grpc_ssl_server_certificate_config *grpc_ssl_server_certificate_config_create(
+ const char *pem_root_certs,
+ const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs) {
+ grpc_ssl_server_certificate_config *config =
+ (grpc_ssl_server_certificate_config *)gpr_zalloc(
+ sizeof(grpc_ssl_server_certificate_config));
+ if (pem_root_certs != NULL) {
+ config->pem_root_certs = gpr_strdup(pem_root_certs);
+ }
+ if (num_key_cert_pairs > 0) {
+ GPR_ASSERT(pem_key_cert_pairs != NULL);
+ config->pem_key_cert_pairs = (grpc_ssl_pem_key_cert_pair *)gpr_zalloc(
+ num_key_cert_pairs * sizeof(grpc_ssl_pem_key_cert_pair));
+ }
+ config->num_key_cert_pairs = num_key_cert_pairs;
+ for (size_t i = 0; i < num_key_cert_pairs; i++) {
+ GPR_ASSERT(pem_key_cert_pairs[i].private_key != NULL);
+ GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != NULL);
+ config->pem_key_cert_pairs[i].cert_chain =
+ gpr_strdup(pem_key_cert_pairs[i].cert_chain);
+ config->pem_key_cert_pairs[i].private_key =
+ gpr_strdup(pem_key_cert_pairs[i].private_key);
+ }
+ return config;
+}
+
+void grpc_ssl_server_certificate_config_destroy(
+ grpc_ssl_server_certificate_config *config) {
+ if (config == NULL) return;
+ for (size_t i = 0; i < config->num_key_cert_pairs; i++) {
+ gpr_free((void *)config->pem_key_cert_pairs[i].private_key);
+ gpr_free((void *)config->pem_key_cert_pairs[i].cert_chain);
+ }
+ gpr_free(config->pem_key_cert_pairs);
+ gpr_free(config->pem_root_certs);
+ gpr_free(config);
+}
+
+grpc_ssl_server_credentials_options *
+grpc_ssl_server_credentials_create_options_using_config(
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_certificate_config *config) {
+ grpc_ssl_server_credentials_options *options = NULL;
+ if (config == NULL) {
+ gpr_log(GPR_ERROR, "Certificate config must not be NULL.");
+ goto done;
+ }
+ options = (grpc_ssl_server_credentials_options *)gpr_zalloc(
+ sizeof(grpc_ssl_server_credentials_options));
+ options->client_certificate_request = client_certificate_request;
+ options->certificate_config = config;
+done:
+ return options;
+}
+
+grpc_ssl_server_credentials_options *
+grpc_ssl_server_credentials_create_options_using_config_fetcher(
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_certificate_config_callback cb, void *user_data) {
+ if (cb == NULL) {
+ gpr_log(GPR_ERROR, "Invalid certificate config callback parameter.");
+ return NULL;
+ }
+
+ grpc_ssl_server_certificate_config_fetcher *fetcher =
+ (grpc_ssl_server_certificate_config_fetcher *)gpr_zalloc(
+ sizeof(grpc_ssl_server_certificate_config_fetcher));
+ fetcher->cb = cb;
+ fetcher->user_data = user_data;
+
+ grpc_ssl_server_credentials_options *options =
+ (grpc_ssl_server_credentials_options *)gpr_zalloc(
+ sizeof(grpc_ssl_server_credentials_options));
+ options->client_certificate_request = client_certificate_request;
+ options->certificate_config_fetcher = fetcher;
+
+ return options;
+}
+
+grpc_server_credentials *grpc_ssl_server_credentials_create(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs, int force_client_auth, void *reserved) {
+ return grpc_ssl_server_credentials_create_ex(
+ pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs,
+ force_client_auth
+ ? GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+ : GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ reserved);
+}
+
+grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs,
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ void *reserved) {
+ GRPC_API_TRACE(
+ "grpc_ssl_server_credentials_create_ex("
+ "pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
+ "client_certificate_request=%d, reserved=%p)",
+ 5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
+ client_certificate_request, reserved));
+ GPR_ASSERT(reserved == NULL);
+
+ grpc_ssl_server_certificate_config *cert_config =
+ grpc_ssl_server_certificate_config_create(
+ pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs);
+ grpc_ssl_server_credentials_options *options =
+ grpc_ssl_server_credentials_create_options_using_config(
+ client_certificate_request, cert_config);
+
+ return grpc_ssl_server_credentials_create_with_options(options);
+}
+
+grpc_server_credentials *grpc_ssl_server_credentials_create_with_options(
+ grpc_ssl_server_credentials_options *options) {
+ grpc_server_credentials *retval = NULL;
+ grpc_ssl_server_credentials *c = NULL;
+
+ if (options == NULL) {
+ gpr_log(GPR_ERROR,
+ "Invalid options trying to create SSL server credentials.");
+ goto done;
+ }
+
+ if (options->certificate_config == NULL &&
+ options->certificate_config_fetcher == NULL) {
+ gpr_log(GPR_ERROR,
+ "SSL server credentials options must specify either "
+ "certificate config or fetcher.");
+ goto done;
+ } else if (options->certificate_config_fetcher != NULL &&
+ options->certificate_config_fetcher->cb == NULL) {
+ gpr_log(GPR_ERROR, "Certificate config fetcher callback must not be NULL.");
+ goto done;
+ }
+
+ c = (grpc_ssl_server_credentials *)gpr_zalloc(
+ sizeof(grpc_ssl_server_credentials));
+ c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
+ gpr_ref_init(&c->base.refcount, 1);
+ c->base.vtable = &ssl_server_vtable;
+
+ if (options->certificate_config_fetcher != NULL) {
+ c->config.client_certificate_request = options->client_certificate_request;
+ c->certificate_config_fetcher = *options->certificate_config_fetcher;
+ } else {
+ ssl_build_server_config(options->certificate_config->pem_root_certs,
+ options->certificate_config->pem_key_cert_pairs,
+ options->certificate_config->num_key_cert_pairs,
+ options->client_certificate_request, &c->config);
+ }
+
+ retval = &c->base;
+
+done:
+ grpc_ssl_server_credentials_options_destroy(options);
+ return retval;
+}
+
+void grpc_ssl_server_credentials_options_destroy(
+ grpc_ssl_server_credentials_options *o) {
+ if (o == NULL) return;
+ gpr_free(o->certificate_config_fetcher);
+ grpc_ssl_server_certificate_config_destroy(o->certificate_config);
+ gpr_free(o);
+}
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.h b/src/core/lib/security/credentials/ssl/ssl_credentials.h
index b43c656cd7..5542484aae 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.h
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.h
@@ -20,14 +20,41 @@
#include "src/core/lib/security/credentials/credentials.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct {
grpc_channel_credentials base;
grpc_ssl_config config;
} grpc_ssl_credentials;
+struct grpc_ssl_server_certificate_config {
+ grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs;
+ size_t num_key_cert_pairs;
+ char *pem_root_certs;
+};
+
+typedef struct {
+ grpc_ssl_server_certificate_config_callback cb;
+ void *user_data;
+} grpc_ssl_server_certificate_config_fetcher;
+
typedef struct {
grpc_server_credentials base;
grpc_ssl_server_config config;
+ grpc_ssl_server_certificate_config_fetcher certificate_config_fetcher;
} grpc_ssl_server_credentials;
+tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs(
+ const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs);
+
+void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair *kp,
+ size_t num_key_cert_pairs);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_SSL_SSL_CREDENTIALS_H */
diff --git a/src/core/lib/security/transport/auth_filters.h b/src/core/lib/security/transport/auth_filters.h
index bd5902a128..ba5df7fe70 100644
--- a/src/core/lib/security/transport/auth_filters.h
+++ b/src/core/lib/security/transport/auth_filters.h
@@ -21,7 +21,15 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H */
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.cc
index 531a88434f..a8464dbf9e 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -39,6 +39,8 @@
/* We can have a per-call credentials. */
typedef struct {
+ grpc_call_stack *owning_call;
+ grpc_call_combiner *call_combiner;
grpc_call_credentials *creds;
bool have_host;
bool have_method;
@@ -49,17 +51,12 @@ typedef struct {
pollset_set so that work can progress when this call wants work to progress
*/
grpc_polling_entity *pollent;
- gpr_atm security_context_set;
- gpr_mu security_context_mu;
grpc_credentials_mdelem_array md_array;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
- grpc_closure closure;
- // Either 0 (no cancellation and no async operation in flight),
- // a grpc_closure* (if the lowest bit is 0),
- // or a grpc_error* (if the lowest bit is 1).
- gpr_atm cancellation_state;
- grpc_closure cancel_closure;
+ grpc_closure async_result_closure;
+ grpc_closure check_call_host_cancel_closure;
+ grpc_closure get_request_metadata_cancel_closure;
} call_data;
/* We can have a per-channel credentials. */
@@ -68,43 +65,6 @@ typedef struct {
grpc_auth_context *auth_context;
} channel_data;
-static void decode_cancel_state(gpr_atm cancel_state, grpc_closure **func,
- grpc_error **error) {
- // If the lowest bit is 1, the value is a grpc_error*.
- // Otherwise, if non-zdero, the value is a grpc_closure*.
- if (cancel_state & 1) {
- *error = (grpc_error *)(cancel_state & ~(gpr_atm)1);
- } else if (cancel_state != 0) {
- *func = (grpc_closure *)cancel_state;
- }
-}
-
-static gpr_atm encode_cancel_state_error(grpc_error *error) {
- // Set the lowest bit to 1 to indicate that it's an error.
- return (gpr_atm)1 | (gpr_atm)error;
-}
-
-// Returns an error if the call has been cancelled. Otherwise, sets the
-// cancellation function to be called upon cancellation.
-static grpc_error *set_cancel_func(grpc_call_element *elem,
- grpc_iomgr_cb_func func) {
- call_data *calld = (call_data *)elem->call_data;
- // Decode original state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *original_error = GRPC_ERROR_NONE;
- grpc_closure *original_func = NULL;
- decode_cancel_state(original_state, &original_func, &original_error);
- // If error is set, return it.
- if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
- // Otherwise, store func.
- GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
- grpc_schedule_on_exec_ctx);
- GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
- gpr_atm_rel_store(&calld->cancellation_state,
- (gpr_atm)&calld->cancel_closure);
- return GRPC_ERROR_NONE;
-}
-
static void reset_auth_metadata_context(
grpc_auth_metadata_context *auth_md_context) {
if (auth_md_context->service_url != NULL) {
@@ -133,8 +93,9 @@ static void add_error(grpc_error **combined, grpc_error *error) {
static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *input_error) {
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
- grpc_call_element *elem = batch->handler_private.extra_arg;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem =
+ (grpc_call_element *)batch->handler_private.extra_arg;
+ call_data *calld = (call_data *)elem->call_data;
reset_auth_metadata_context(&calld->auth_md_context);
grpc_error *error = GRPC_ERROR_REF(input_error);
if (error == GRPC_ERROR_NONE) {
@@ -153,7 +114,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
} else {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAUTHENTICATED);
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error);
+ grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+ calld->call_combiner);
}
}
@@ -191,15 +153,19 @@ static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
- grpc_call_credentials_cancel_get_request_metadata(
- exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_call_credentials_cancel_get_request_metadata(
+ exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
+ "cancel_get_request_metadata");
}
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_client_security_context *ctx =
(grpc_client_security_context *)batch->payload
->context[GRPC_CONTEXT_SECURITY]
@@ -223,7 +189,8 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
return;
}
} else {
@@ -234,31 +201,34 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
- grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
- return;
- }
GPR_ASSERT(calld->pollent != NULL);
- GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch,
- grpc_schedule_on_exec_ctx);
+
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata,
+ batch, grpc_schedule_on_exec_ctx);
grpc_error *error = GRPC_ERROR_NONE;
if (grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
- &calld->md_array, &calld->closure, &error)) {
+ &calld->md_array, &calld->async_result_closure, &error)) {
// Synchronous return; invoke on_credentials_metadata() directly.
on_credentials_metadata(exec_ctx, batch, error);
GRPC_ERROR_UNREF(error);
+ } else {
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure,
+ cancel_get_request_metadata, elem,
+ grpc_schedule_on_exec_ctx));
}
}
static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
- grpc_call_element *elem = batch->handler_private.extra_arg;
- call_data *calld = elem->call_data;
-
+ grpc_call_element *elem =
+ (grpc_call_element *)batch->handler_private.extra_arg;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
send_security_metadata(exec_ctx, elem, batch);
} else {
@@ -271,7 +241,8 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, batch,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAUTHENTICATED));
+ GRPC_STATUS_UNAUTHENTICATED),
+ calld->call_combiner);
gpr_free(error_msg);
}
}
@@ -281,9 +252,12 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
- grpc_channel_security_connector_cancel_check_call_host(
- exec_ctx, chand->security_connector, &calld->closure,
- GRPC_ERROR_REF(error));
+ if (error != GRPC_ERROR_NONE) {
+ grpc_channel_security_connector_cancel_check_call_host(
+ exec_ctx, chand->security_connector, &calld->async_result_closure,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host");
}
static void auth_start_transport_stream_op_batch(
@@ -292,55 +266,24 @@ static void auth_start_transport_stream_op_batch(
GPR_TIMER_BEGIN("auth_start_transport_stream_op_batch", 0);
/* grab pointers to our data from the call element */
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
-
- if (batch->cancel_stream) {
- while (true) {
- // Decode the original cancellation state.
- gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *func = NULL;
- decode_cancel_state(original_state, &func, &cancel_error);
- // If we had already set a cancellation error, there's nothing
- // more to do.
- if (cancel_error != GRPC_ERROR_NONE) break;
- // If there's a cancel func, call it.
- // Note that even if the cancel func has been changed by some
- // other thread between when we decoded it and now, it will just
- // be a no-op.
- cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
- if (func != NULL) {
- GRPC_CLOSURE_SCHED(exec_ctx, func, GRPC_ERROR_REF(cancel_error));
- }
- // Encode the new error into cancellation state.
- if (gpr_atm_full_cas(&calld->cancellation_state, original_state,
- encode_cancel_state_error(cancel_error))) {
- break; // Success.
- }
- // The cas failed, so try again.
- }
- } else {
- /* double checked lock over security context to ensure it's set once */
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- gpr_mu_lock(&calld->security_context_mu);
- if (gpr_atm_acq_load(&calld->security_context_set) == 0) {
- GPR_ASSERT(batch->payload->context != NULL);
- if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
- batch->payload->context[GRPC_CONTEXT_SECURITY].value =
- grpc_client_security_context_create();
- batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
- grpc_client_security_context_destroy;
- }
- grpc_client_security_context *sec_ctx =
- batch->payload->context[GRPC_CONTEXT_SECURITY].value;
- GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
- sec_ctx->auth_context =
- GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
- gpr_atm_rel_store(&calld->security_context_set, 1);
- }
- gpr_mu_unlock(&calld->security_context_mu);
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+
+ if (!batch->cancel_stream) {
+ GPR_ASSERT(batch->payload->context != NULL);
+ if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) {
+ batch->payload->context[GRPC_CONTEXT_SECURITY].value =
+ grpc_client_security_context_create();
+ batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
+ grpc_client_security_context_destroy;
}
+ grpc_client_security_context *sec_ctx =
+ (grpc_client_security_context *)batch->payload
+ ->context[GRPC_CONTEXT_SECURITY]
+ .value;
+ GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
+ sec_ctx->auth_context =
+ GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter");
}
if (batch->send_initial_metadata) {
@@ -365,26 +308,27 @@ static void auth_start_transport_stream_op_batch(
}
}
if (calld->have_host) {
- grpc_error *cancel_error = set_cancel_func(elem, cancel_check_call_host);
- if (cancel_error != GRPC_ERROR_NONE) {
- grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
- cancel_error);
+ batch->handler_private.extra_arg = elem;
+ GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch,
+ grpc_schedule_on_exec_ctx);
+ char *call_host = grpc_slice_to_c_string(calld->host);
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (grpc_channel_security_connector_check_call_host(
+ exec_ctx, chand->security_connector, call_host,
+ chand->auth_context, &calld->async_result_closure, &error)) {
+ // Synchronous return; invoke on_host_checked() directly.
+ on_host_checked(exec_ctx, batch, error);
+ GRPC_ERROR_UNREF(error);
} else {
- char *call_host = grpc_slice_to_c_string(calld->host);
- batch->handler_private.extra_arg = elem;
- grpc_error *error = GRPC_ERROR_NONE;
- if (grpc_channel_security_connector_check_call_host(
- exec_ctx, chand->security_connector, call_host,
- chand->auth_context,
- GRPC_CLOSURE_INIT(&calld->closure, on_host_checked, batch,
- grpc_schedule_on_exec_ctx),
- &error)) {
- // Synchronous return; invoke on_host_checked() directly.
- on_host_checked(exec_ctx, batch, error);
- GRPC_ERROR_UNREF(error);
- }
- gpr_free(call_host);
+ // Async return; register cancellation closure with call combiner.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host");
+ grpc_call_combiner_set_notify_on_cancel(
+ exec_ctx, calld->call_combiner,
+ GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure,
+ cancel_check_call_host, elem,
+ grpc_schedule_on_exec_ctx));
}
+ gpr_free(call_host);
GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
return; /* early exit */
}
@@ -399,16 +343,16 @@ static void auth_start_transport_stream_op_batch(
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- memset(calld, 0, sizeof(*calld));
- gpr_mu_init(&calld->security_context_mu);
+ call_data *calld = (call_data *)elem->call_data;
+ calld->owning_call = args->call_stack;
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
calld->pollent = pollent;
}
@@ -416,7 +360,7 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_credentials_mdelem_array_destroy(exec_ctx, &calld->md_array);
grpc_call_credentials_unref(exec_ctx, calld->creds);
if (calld->have_host) {
@@ -426,12 +370,6 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
- gpr_mu_destroy(&calld->security_context_mu);
- gpr_atm cancel_state = gpr_atm_acq_load(&calld->cancellation_state);
- grpc_error *cancel_error = GRPC_ERROR_NONE;
- grpc_closure *cancel_func = NULL;
- decode_cancel_state(cancel_state, &cancel_func, &cancel_error);
- GRPC_ERROR_UNREF(cancel_error);
}
/* Constructor for channel_data */
@@ -452,7 +390,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* grab pointers to our data from the channel element */
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
@@ -472,7 +410,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_channel_security_connector *sc = chand->security_connector;
if (sc != NULL) {
GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "client_auth_filter");
@@ -490,6 +428,5 @@ const grpc_channel_filter grpc_client_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"client-auth"};
diff --git a/src/core/lib/security/transport/lb_targets_info.c b/src/core/lib/security/transport/lb_targets_info.cc
index 5583a4e0ff..947fc1addf 100644
--- a/src/core/lib/security/transport/lb_targets_info.c
+++ b/src/core/lib/security/transport/lb_targets_info.cc
@@ -25,19 +25,22 @@
* secure naming purposes. */
#define GRPC_ARG_LB_SECURE_NAMING_MAP "grpc.lb_secure_naming_map"
-static void *targets_info_copy(void *p) { return grpc_slice_hash_table_ref(p); }
+static void *targets_info_copy(void *p) {
+ return grpc_slice_hash_table_ref((grpc_slice_hash_table *)p);
+}
static void targets_info_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_slice_hash_table_unref(exec_ctx, p);
+ grpc_slice_hash_table_unref(exec_ctx, (grpc_slice_hash_table *)p);
}
static int targets_info_cmp(void *a, void *b) {
- return grpc_slice_hash_table_cmp(a, b);
+ return grpc_slice_hash_table_cmp((const grpc_slice_hash_table *)a,
+ (const grpc_slice_hash_table *)b);
}
static const grpc_arg_pointer_vtable server_to_balancer_names_vtable = {
targets_info_copy, targets_info_destroy, targets_info_cmp};
grpc_arg grpc_lb_targets_info_create_channel_arg(
grpc_slice_hash_table *targets_info) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_LB_SECURE_NAMING_MAP,
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_LB_SECURE_NAMING_MAP,
targets_info,
&server_to_balancer_names_vtable);
}
@@ -48,7 +51,7 @@ grpc_slice_hash_table *grpc_lb_targets_info_find_in_args(
grpc_channel_args_find(args, GRPC_ARG_LB_SECURE_NAMING_MAP);
if (targets_info_arg != NULL) {
GPR_ASSERT(targets_info_arg->type == GRPC_ARG_POINTER);
- return targets_info_arg->value.pointer.p;
+ return (grpc_slice_hash_table *)targets_info_arg->value.pointer.p;
}
return NULL;
}
diff --git a/src/core/lib/security/transport/lb_targets_info.h b/src/core/lib/security/transport/lb_targets_info.h
index c3d685df5f..43f0e64556 100644
--- a/src/core/lib/security/transport/lb_targets_info.h
+++ b/src/core/lib/security/transport/lb_targets_info.h
@@ -21,6 +21,10 @@
#include "src/core/lib/slice/slice_hash_table.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Return a channel argument containing \a targets_info. */
grpc_arg grpc_lb_targets_info_create_channel_arg(
grpc_slice_hash_table *targets_info);
@@ -29,4 +33,8 @@ grpc_arg grpc_lb_targets_info_create_channel_arg(
grpc_slice_hash_table *grpc_lb_targets_info_find_in_args(
const grpc_channel_args *args);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H */
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.cc
index 5e41b94ff8..859d04ae5a 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -34,7 +34,7 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
-#include "src/core/tsi/transport_security_interface.h"
+#include "src/core/tsi/transport_security_grpc.h"
#define STAGING_BUFFER_SIZE 8192
@@ -42,6 +42,7 @@ typedef struct {
grpc_endpoint base;
grpc_endpoint *wrapped_ep;
struct tsi_frame_protector *protector;
+ struct tsi_zero_copy_grpc_protector *zero_copy_protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
grpc_closure *read_cb;
@@ -67,6 +68,7 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
secure_endpoint *ep = secure_ep;
grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
tsi_frame_protector_destroy(ep->protector);
+ tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector);
grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes);
grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer);
grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer);
@@ -159,51 +161,58 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
return;
}
- /* TODO(yangg) check error, maybe bail out early */
- for (i = 0; i < ep->source_buffer.count; i++) {
- grpc_slice encrypted = ep->source_buffer.slices[i];
- uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
- size_t message_size = GRPC_SLICE_LENGTH(encrypted);
-
- while (message_size > 0 || keep_looping) {
- size_t unprotected_buffer_size_written = (size_t)(end - cur);
- size_t processed_message_size = message_size;
- gpr_mu_lock(&ep->protector_mu);
- result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
- &processed_message_size, cur,
- &unprotected_buffer_size_written);
- gpr_mu_unlock(&ep->protector_mu);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Decryption error: %s",
- tsi_result_to_string(result));
- break;
- }
- message_bytes += processed_message_size;
- message_size -= processed_message_size;
- cur += unprotected_buffer_size_written;
-
- if (cur == end) {
- flush_read_staging_buffer(ep, &cur, &end);
- /* Force to enter the loop again to extract buffered bytes in protector.
- The bytes could be buffered because of running out of staging_buffer.
- If this happens at the end of all slices, doing another unprotect
- avoids leaving data in the protector. */
- keep_looping = 1;
- } else if (unprotected_buffer_size_written > 0) {
- keep_looping = 1;
- } else {
- keep_looping = 0;
+ if (ep->zero_copy_protector != NULL) {
+ // Use zero-copy grpc protector to unprotect.
+ result = tsi_zero_copy_grpc_protector_unprotect(
+ exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer);
+ } else {
+ // Use frame protector to unprotect.
+ /* TODO(yangg) check error, maybe bail out early */
+ for (i = 0; i < ep->source_buffer.count; i++) {
+ grpc_slice encrypted = ep->source_buffer.slices[i];
+ uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
+ size_t message_size = GRPC_SLICE_LENGTH(encrypted);
+
+ while (message_size > 0 || keep_looping) {
+ size_t unprotected_buffer_size_written = (size_t)(end - cur);
+ size_t processed_message_size = message_size;
+ gpr_mu_lock(&ep->protector_mu);
+ result = tsi_frame_protector_unprotect(
+ ep->protector, message_bytes, &processed_message_size, cur,
+ &unprotected_buffer_size_written);
+ gpr_mu_unlock(&ep->protector_mu);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Decryption error: %s",
+ tsi_result_to_string(result));
+ break;
+ }
+ message_bytes += processed_message_size;
+ message_size -= processed_message_size;
+ cur += unprotected_buffer_size_written;
+
+ if (cur == end) {
+ flush_read_staging_buffer(ep, &cur, &end);
+ /* Force to enter the loop again to extract buffered bytes in
+ protector. The bytes could be buffered because of running out of
+ staging_buffer. If this happens at the end of all slices, doing
+ another unprotect avoids leaving data in the protector. */
+ keep_looping = 1;
+ } else if (unprotected_buffer_size_written > 0) {
+ keep_looping = 1;
+ } else {
+ keep_looping = 0;
+ }
}
+ if (result != TSI_OK) break;
}
- if (result != TSI_OK) break;
- }
- if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) {
- grpc_slice_buffer_add(
- ep->read_buffer,
- grpc_slice_split_head(
- &ep->read_staging_buffer,
- (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer))));
+ if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) {
+ grpc_slice_buffer_add(
+ ep->read_buffer,
+ grpc_slice_split_head(
+ &ep->read_staging_buffer,
+ (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer))));
+ }
}
/* TODO(yangg) experiment with moving this block after read_cb to see if it
@@ -270,54 +279,62 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
}
}
- for (i = 0; i < slices->count; i++) {
- grpc_slice plain = slices->slices[i];
- uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
- size_t message_size = GRPC_SLICE_LENGTH(plain);
- while (message_size > 0) {
- size_t protected_buffer_size_to_send = (size_t)(end - cur);
- size_t processed_message_size = message_size;
- gpr_mu_lock(&ep->protector_mu);
- result = tsi_frame_protector_protect(ep->protector, message_bytes,
- &processed_message_size, cur,
- &protected_buffer_size_to_send);
- gpr_mu_unlock(&ep->protector_mu);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Encryption error: %s",
- tsi_result_to_string(result));
- break;
- }
- message_bytes += processed_message_size;
- message_size -= processed_message_size;
- cur += protected_buffer_size_to_send;
-
- if (cur == end) {
- flush_write_staging_buffer(ep, &cur, &end);
+ if (ep->zero_copy_protector != NULL) {
+ // Use zero-copy grpc protector to protect.
+ result = tsi_zero_copy_grpc_protector_protect(
+ exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer);
+ } else {
+ // Use frame protector to protect.
+ for (i = 0; i < slices->count; i++) {
+ grpc_slice plain = slices->slices[i];
+ uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
+ size_t message_size = GRPC_SLICE_LENGTH(plain);
+ while (message_size > 0) {
+ size_t protected_buffer_size_to_send = (size_t)(end - cur);
+ size_t processed_message_size = message_size;
+ gpr_mu_lock(&ep->protector_mu);
+ result = tsi_frame_protector_protect(ep->protector, message_bytes,
+ &processed_message_size, cur,
+ &protected_buffer_size_to_send);
+ gpr_mu_unlock(&ep->protector_mu);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Encryption error: %s",
+ tsi_result_to_string(result));
+ break;
+ }
+ message_bytes += processed_message_size;
+ message_size -= processed_message_size;
+ cur += protected_buffer_size_to_send;
+
+ if (cur == end) {
+ flush_write_staging_buffer(ep, &cur, &end);
+ }
}
- }
- if (result != TSI_OK) break;
- }
- if (result == TSI_OK) {
- size_t still_pending_size;
- do {
- size_t protected_buffer_size_to_send = (size_t)(end - cur);
- gpr_mu_lock(&ep->protector_mu);
- result = tsi_frame_protector_protect_flush(ep->protector, cur,
- &protected_buffer_size_to_send,
- &still_pending_size);
- gpr_mu_unlock(&ep->protector_mu);
if (result != TSI_OK) break;
- cur += protected_buffer_size_to_send;
- if (cur == end) {
- flush_write_staging_buffer(ep, &cur, &end);
+ }
+ if (result == TSI_OK) {
+ size_t still_pending_size;
+ do {
+ size_t protected_buffer_size_to_send = (size_t)(end - cur);
+ gpr_mu_lock(&ep->protector_mu);
+ result = tsi_frame_protector_protect_flush(
+ ep->protector, cur, &protected_buffer_size_to_send,
+ &still_pending_size);
+ gpr_mu_unlock(&ep->protector_mu);
+ if (result != TSI_OK) break;
+ cur += protected_buffer_size_to_send;
+ if (cur == end) {
+ flush_write_staging_buffer(ep, &cur, &end);
+ }
+ } while (still_pending_size > 0);
+ if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) {
+ grpc_slice_buffer_add(
+ &ep->output_buffer,
+ grpc_slice_split_head(
+ &ep->write_staging_buffer,
+ (size_t)(cur -
+ GRPC_SLICE_START_PTR(ep->write_staging_buffer))));
}
- } while (still_pending_size > 0);
- if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) {
- grpc_slice_buffer_add(
- &ep->output_buffer,
- grpc_slice_split_head(
- &ep->write_staging_buffer,
- (size_t)(cur - GRPC_SLICE_START_PTR(ep->write_staging_buffer))));
}
}
@@ -362,6 +379,13 @@ static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
}
+static void endpoint_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
+ grpc_endpoint *secure_ep,
+ grpc_pollset_set *pollset_set) {
+ secure_endpoint *ep = (secure_endpoint *)secure_ep;
+ grpc_endpoint_delete_from_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
+}
+
static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_peer(ep->wrapped_ep);
@@ -382,6 +406,7 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_write,
endpoint_add_to_pollset,
endpoint_add_to_pollset_set,
+ endpoint_delete_from_pollset_set,
endpoint_shutdown,
endpoint_destroy,
endpoint_get_resource_user,
@@ -389,13 +414,16 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_get_fd};
grpc_endpoint *grpc_secure_endpoint_create(
- struct tsi_frame_protector *protector, grpc_endpoint *transport,
- grpc_slice *leftover_slices, size_t leftover_nslices) {
+ struct tsi_frame_protector *protector,
+ struct tsi_zero_copy_grpc_protector *zero_copy_protector,
+ grpc_endpoint *transport, grpc_slice *leftover_slices,
+ size_t leftover_nslices) {
size_t i;
secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
ep->base.vtable = &vtable;
ep->wrapped_ep = transport;
ep->protector = protector;
+ ep->zero_copy_protector = zero_copy_protector;
grpc_slice_buffer_init(&ep->leftover_bytes);
for (i = 0; i < leftover_nslices; i++) {
grpc_slice_buffer_add(&ep->leftover_bytes,
diff --git a/src/core/lib/security/transport/secure_endpoint.h b/src/core/lib/security/transport/secure_endpoint.h
index 1c5555f3df..980449c03e 100644
--- a/src/core/lib/security/transport/secure_endpoint.h
+++ b/src/core/lib/security/transport/secure_endpoint.h
@@ -22,13 +22,26 @@
#include <grpc/slice.h>
#include "src/core/lib/iomgr/endpoint.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct tsi_frame_protector;
+struct tsi_zero_copy_grpc_protector;
extern grpc_tracer_flag grpc_trace_secure_endpoint;
-/* Takes ownership of protector and to_wrap, and refs leftover_slices. */
+/* Takes ownership of protector, zero_copy_protector, and to_wrap, and refs
+ * leftover_slices. If zero_copy_protector is not NULL, protector will never be
+ * used. */
grpc_endpoint *grpc_secure_endpoint_create(
- struct tsi_frame_protector *protector, grpc_endpoint *to_wrap,
- grpc_slice *leftover_slices, size_t leftover_nslices);
+ struct tsi_frame_protector *protector,
+ struct tsi_zero_copy_grpc_protector *zero_copy_protector,
+ grpc_endpoint *to_wrap, grpc_slice *leftover_slices,
+ size_t leftover_nslices);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H */
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.cc
index a7568b995f..06160d0caa 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.cc
@@ -34,6 +34,7 @@
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
+#include "src/core/lib/security/credentials/ssl/ssl_credentials.h"
#include "src/core/lib/security/transport/lb_targets_info.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/security_handshaker.h"
@@ -136,6 +137,39 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
}
}
+int grpc_security_connector_cmp(grpc_security_connector *sc,
+ grpc_security_connector *other) {
+ if (sc == NULL || other == NULL) return GPR_ICMP(sc, other);
+ int c = GPR_ICMP(sc->vtable, other->vtable);
+ if (c != 0) return c;
+ return sc->vtable->cmp(sc, other);
+}
+
+int grpc_channel_security_connector_cmp(grpc_channel_security_connector *sc1,
+ grpc_channel_security_connector *sc2) {
+ GPR_ASSERT(sc1->channel_creds != NULL);
+ GPR_ASSERT(sc2->channel_creds != NULL);
+ int c = GPR_ICMP(sc1->channel_creds, sc2->channel_creds);
+ if (c != 0) return c;
+ c = GPR_ICMP(sc1->request_metadata_creds, sc2->request_metadata_creds);
+ if (c != 0) return c;
+ c = GPR_ICMP((void *)sc1->check_call_host, (void *)sc2->check_call_host);
+ if (c != 0) return c;
+ c = GPR_ICMP((void *)sc1->cancel_check_call_host,
+ (void *)sc2->cancel_check_call_host);
+ if (c != 0) return c;
+ return GPR_ICMP((void *)sc1->add_handshakers, (void *)sc2->add_handshakers);
+}
+
+int grpc_server_security_connector_cmp(grpc_server_security_connector *sc1,
+ grpc_server_security_connector *sc2) {
+ GPR_ASSERT(sc1->server_creds != NULL);
+ GPR_ASSERT(sc2->server_creds != NULL);
+ int c = GPR_ICMP(sc1->server_creds, sc2->server_creds);
+ if (c != 0) return c;
+ return GPR_ICMP((void *)sc1->add_handshakers, (void *)sc2->add_handshakers);
+}
+
bool grpc_channel_security_connector_check_call_host(
grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
const char *host, grpc_auth_context *auth_context,
@@ -199,23 +233,27 @@ void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
if (gpr_unref(&sc->refcount)) sc->vtable->destroy(exec_ctx, sc);
}
-static void connector_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, "connector_pointer_arg_destroy");
+static void connector_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
+ GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, (grpc_security_connector *)p,
+ "connector_arg_destroy");
}
-static void *connector_pointer_arg_copy(void *p) {
- return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy");
+static void *connector_arg_copy(void *p) {
+ return GRPC_SECURITY_CONNECTOR_REF((grpc_security_connector *)p,
+ "connector_arg_copy");
}
-static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+static int connector_cmp(void *a, void *b) {
+ return grpc_security_connector_cmp((grpc_security_connector *)a,
+ (grpc_security_connector *)b);
+}
-static const grpc_arg_pointer_vtable connector_pointer_vtable = {
- connector_pointer_arg_copy, connector_pointer_arg_destroy,
- connector_pointer_cmp};
+static const grpc_arg_pointer_vtable connector_arg_vtable = {
+ connector_arg_copy, connector_arg_destroy, connector_cmp};
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc) {
- return grpc_channel_arg_pointer_create(GRPC_ARG_SECURITY_CONNECTOR, sc,
- &connector_pointer_vtable);
+ return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SECURITY_CONNECTOR,
+ sc, &connector_arg_vtable);
}
grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg) {
@@ -225,7 +263,7 @@ grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg) {
GRPC_ARG_SECURITY_CONNECTOR);
return NULL;
}
- return arg->value.pointer.p;
+ return (grpc_security_connector *)arg->value.pointer.p;
}
grpc_security_connector *grpc_security_connector_find_in_args(
@@ -240,6 +278,30 @@ grpc_security_connector *grpc_security_connector_find_in_args(
return NULL;
}
+static tsi_client_certificate_request_type
+get_tsi_client_certificate_request_type(
+ grpc_ssl_client_certificate_request_type grpc_request_type) {
+ switch (grpc_request_type) {
+ case GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE:
+ return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
+
+ case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
+ return TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
+
+ case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY:
+ return TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY;
+
+ case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
+ return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
+
+ case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY:
+ return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY;
+
+ default:
+ return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
+ }
+}
+
/* -- Fake implementation. -- */
typedef struct {
@@ -380,6 +442,32 @@ static void fake_server_check_peer(grpc_exec_ctx *exec_ctx,
fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
}
+static int fake_channel_cmp(grpc_security_connector *sc1,
+ grpc_security_connector *sc2) {
+ grpc_fake_channel_security_connector *c1 =
+ (grpc_fake_channel_security_connector *)sc1;
+ grpc_fake_channel_security_connector *c2 =
+ (grpc_fake_channel_security_connector *)sc2;
+ int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base);
+ if (c != 0) return c;
+ c = strcmp(c1->target, c2->target);
+ if (c != 0) return c;
+ if (c1->expected_targets == NULL || c2->expected_targets == NULL) {
+ c = GPR_ICMP(c1->expected_targets, c2->expected_targets);
+ } else {
+ c = strcmp(c1->expected_targets, c2->expected_targets);
+ }
+ if (c != 0) return c;
+ return GPR_ICMP(c1->is_lb_channel, c2->is_lb_channel);
+}
+
+static int fake_server_cmp(grpc_security_connector *sc1,
+ grpc_security_connector *sc2) {
+ return grpc_server_security_connector_cmp(
+ (grpc_server_security_connector *)sc1,
+ (grpc_server_security_connector *)sc2);
+}
+
static bool fake_channel_check_call_host(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc,
const char *host,
@@ -416,18 +504,21 @@ static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx,
}
static grpc_security_connector_vtable fake_channel_vtable = {
- fake_channel_destroy, fake_channel_check_peer};
+ fake_channel_destroy, fake_channel_check_peer, fake_channel_cmp};
static grpc_security_connector_vtable fake_server_vtable = {
- fake_server_destroy, fake_server_check_peer};
+ fake_server_destroy, fake_server_check_peer, fake_server_cmp};
grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
+ grpc_channel_credentials *channel_creds,
grpc_call_credentials *request_metadata_creds, const char *target,
const grpc_channel_args *args) {
- grpc_fake_channel_security_connector *c = gpr_zalloc(sizeof(*c));
+ grpc_fake_channel_security_connector *c =
+ (grpc_fake_channel_security_connector *)gpr_zalloc(sizeof(*c));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
c->base.base.vtable = &fake_channel_vtable;
+ c->base.channel_creds = channel_creds;
c->base.request_metadata_creds =
grpc_call_credentials_ref(request_metadata_creds);
c->base.check_call_host = fake_channel_check_call_host;
@@ -441,12 +532,14 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
}
grpc_server_security_connector *grpc_fake_server_security_connector_create(
- void) {
+ grpc_server_credentials *server_creds) {
grpc_server_security_connector *c =
- gpr_zalloc(sizeof(grpc_server_security_connector));
+ (grpc_server_security_connector *)gpr_zalloc(
+ sizeof(grpc_server_security_connector));
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &fake_server_vtable;
c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
+ c->server_creds = server_creds;
c->add_handshakers = fake_server_add_handshakers;
return c;
}
@@ -455,24 +548,33 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_client_handshaker_factory *handshaker_factory;
+ tsi_ssl_client_handshaker_factory *client_handshaker_factory;
char *target_name;
char *overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
- tsi_ssl_server_handshaker_factory *handshaker_factory;
+ tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector;
+static bool server_connector_has_cert_config_fetcher(
+ grpc_ssl_server_security_connector *c) {
+ GPR_ASSERT(c != NULL);
+ grpc_ssl_server_credentials *server_creds =
+ (grpc_ssl_server_credentials *)c->base.server_creds;
+ GPR_ASSERT(server_creds != NULL);
+ return server_creds->certificate_config_fetcher.cb != NULL;
+}
+
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
+ grpc_channel_credentials_unref(exec_ctx, c->base.channel_creds);
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
- if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
+ c->client_handshaker_factory = NULL;
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
gpr_free(sc);
@@ -482,9 +584,9 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
- if (c->handshaker_factory != NULL) {
- tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory);
- }
+ grpc_server_credentials_unref(exec_ctx, c->base.server_creds);
+ tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
+ c->server_handshaker_factory = NULL;
gpr_free(sc);
}
@@ -496,7 +598,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
- c->handshaker_factory,
+ c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name
: c->target_name,
&tsi_hs);
@@ -505,7 +607,6 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
tsi_result_to_string(result));
return;
}
-
// Create handshakers.
grpc_handshake_manager_add(
handshake_mgr,
@@ -513,21 +614,110 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base));
}
+static const char **fill_alpn_protocol_strings(size_t *num_alpn_protocols) {
+ GPR_ASSERT(num_alpn_protocols != NULL);
+ *num_alpn_protocols = grpc_chttp2_num_alpn_versions();
+ const char **alpn_protocol_strings =
+ (const char **)gpr_malloc(sizeof(const char *) * (*num_alpn_protocols));
+ for (size_t i = 0; i < *num_alpn_protocols; i++) {
+ alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
+ }
+ return alpn_protocol_strings;
+}
+
+/* Attempts to replace the server_handshaker_factory with a new factory using
+ * the provided grpc_ssl_server_certificate_config. Should new factory creation
+ * fail, the existing factory will not be replaced. Returns true on success (new
+ * factory created). */
+static bool try_replace_server_handshaker_factory(
+ grpc_ssl_server_security_connector *sc,
+ const grpc_ssl_server_certificate_config *config) {
+ if (config == NULL) {
+ gpr_log(GPR_ERROR,
+ "Server certificate config callback returned invalid (NULL) "
+ "config.");
+ return false;
+ }
+ gpr_log(GPR_DEBUG, "Using new server certificate config (%p).", config);
+
+ size_t num_alpn_protocols = 0;
+ const char **alpn_protocol_strings =
+ fill_alpn_protocol_strings(&num_alpn_protocols);
+ tsi_ssl_pem_key_cert_pair *cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs(
+ config->pem_key_cert_pairs, config->num_key_cert_pairs);
+ tsi_ssl_server_handshaker_factory *new_handshaker_factory = NULL;
+ grpc_ssl_server_credentials *server_creds =
+ (grpc_ssl_server_credentials *)sc->base.server_creds;
+ tsi_result result = tsi_create_ssl_server_handshaker_factory_ex(
+ cert_pairs, config->num_key_cert_pairs, config->pem_root_certs,
+ get_tsi_client_certificate_request_type(
+ server_creds->config.client_certificate_request),
+ ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
+ &new_handshaker_factory);
+ gpr_free(cert_pairs);
+ gpr_free((void *)alpn_protocol_strings);
+
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
+ tsi_result_to_string(result));
+ return false;
+ }
+ tsi_ssl_server_handshaker_factory_unref(sc->server_handshaker_factory);
+ sc->server_handshaker_factory = new_handshaker_factory;
+ return true;
+}
+
+/* Attempts to fetch the server certificate config if a callback is available.
+ * Current certificate config will continue to be used if the callback returns
+ * an error. Returns true if new credentials were sucessfully loaded. */
+static bool try_fetch_ssl_server_credentials(
+ grpc_ssl_server_security_connector *sc) {
+ grpc_ssl_server_certificate_config *certificate_config = NULL;
+ bool status;
+
+ GPR_ASSERT(sc != NULL);
+ if (!server_connector_has_cert_config_fetcher(sc)) return false;
+
+ grpc_ssl_server_credentials *server_creds =
+ (grpc_ssl_server_credentials *)sc->base.server_creds;
+ grpc_ssl_certificate_config_reload_status cb_result =
+ server_creds->certificate_config_fetcher.cb(
+ server_creds->certificate_config_fetcher.user_data,
+ &certificate_config);
+ if (cb_result == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED) {
+ gpr_log(GPR_DEBUG, "No change in SSL server credentials.");
+ status = false;
+ } else if (cb_result == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW) {
+ status = try_replace_server_handshaker_factory(sc, certificate_config);
+ } else {
+ // Log error, continue using previously-loaded credentials.
+ gpr_log(GPR_ERROR,
+ "Failed fetching new server credentials, continuing to "
+ "use previously-loaded credentials.");
+ status = false;
+ }
+
+ if (certificate_config != NULL) {
+ grpc_ssl_server_certificate_config_destroy(certificate_config);
+ }
+ return status;
+}
+
static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
grpc_server_security_connector *sc,
grpc_handshake_manager *handshake_mgr) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
// Instantiate TSI handshaker.
+ try_fetch_ssl_server_credentials(c);
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
- c->handshaker_factory, &tsi_hs);
+ c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
return;
}
-
// Create handshakers.
grpc_handshake_manager_add(
handshake_mgr,
@@ -639,6 +829,29 @@ static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
}
+static int ssl_channel_cmp(grpc_security_connector *sc1,
+ grpc_security_connector *sc2) {
+ grpc_ssl_channel_security_connector *c1 =
+ (grpc_ssl_channel_security_connector *)sc1;
+ grpc_ssl_channel_security_connector *c2 =
+ (grpc_ssl_channel_security_connector *)sc2;
+ int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base);
+ if (c != 0) return c;
+ c = strcmp(c1->target_name, c2->target_name);
+ if (c != 0) return c;
+ return (c1->overridden_target_name == NULL ||
+ c2->overridden_target_name == NULL)
+ ? GPR_ICMP(c1->overridden_target_name, c2->overridden_target_name)
+ : strcmp(c1->overridden_target_name, c2->overridden_target_name);
+}
+
+static int ssl_server_cmp(grpc_security_connector *sc1,
+ grpc_security_connector *sc2) {
+ return grpc_server_security_connector_cmp(
+ (grpc_server_security_connector *)sc1,
+ (grpc_server_security_connector *)sc2);
+}
+
static void add_shallow_auth_property_to_peer(tsi_peer *peer,
const grpc_auth_property *prop,
const char *tsi_prop_name) {
@@ -660,7 +873,8 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
while (grpc_auth_property_iterator_next(&it) != NULL) max_num_props++;
if (max_num_props > 0) {
- peer.properties = gpr_malloc(max_num_props * sizeof(tsi_peer_property));
+ peer.properties = (tsi_peer_property *)gpr_malloc(
+ max_num_props * sizeof(tsi_peer_property));
it = grpc_auth_context_property_iterator(auth_context);
while ((prop = grpc_auth_property_iterator_next(&it)) != NULL) {
if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) {
@@ -714,10 +928,10 @@ static void ssl_channel_cancel_check_call_host(
}
static grpc_security_connector_vtable ssl_channel_vtable = {
- ssl_channel_destroy, ssl_channel_check_peer};
+ ssl_channel_destroy, ssl_channel_check_peer, ssl_channel_cmp};
static grpc_security_connector_vtable ssl_server_vtable = {
- ssl_server_destroy, ssl_server_check_peer};
+ ssl_server_destroy, ssl_server_check_peer, ssl_server_cmp};
/* returns a NULL terminated slice. */
static grpc_slice compute_default_pem_root_certs_once(void) {
@@ -765,31 +979,6 @@ grpc_slice grpc_get_default_ssl_roots_for_testing(void) {
return compute_default_pem_root_certs_once();
}
-static tsi_client_certificate_request_type
-get_tsi_client_certificate_request_type(
- grpc_ssl_client_certificate_request_type grpc_request_type) {
- switch (grpc_request_type) {
- case GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE:
- return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
-
- case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
- return TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
-
- case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY:
- return TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY;
-
- case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
- return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY;
-
- case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY:
- return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY;
-
- default:
- // Is this a sane default
- return TSI_DONT_REQUEST_CLIENT_CERTIFICATE;
- }
-}
-
const char *grpc_get_default_ssl_roots(void) {
/* TODO(jboeuf@google.com): Maybe revisit the approach which consists in
loading all the roots once for the lifetime of the process. */
@@ -801,21 +990,18 @@ const char *grpc_get_default_ssl_roots(void) {
}
grpc_security_status grpc_ssl_channel_security_connector_create(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *request_metadata_creds,
+ grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds,
+ grpc_call_credentials *request_metadata_creds,
const grpc_ssl_config *config, const char *target_name,
const char *overridden_target_name, grpc_channel_security_connector **sc) {
- size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions();
+ size_t num_alpn_protocols = 0;
const char **alpn_protocol_strings =
- gpr_malloc(sizeof(const char *) * num_alpn_protocols);
+ fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_result result = TSI_OK;
grpc_ssl_channel_security_connector *c;
- size_t i;
const char *pem_root_certs;
char *port;
-
- for (i = 0; i < num_alpn_protocols; i++) {
- alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
- }
+ bool has_key_cert_pair;
if (config == NULL || target_name == NULL) {
gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name.");
@@ -831,11 +1017,13 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
pem_root_certs = config->pem_root_certs;
}
- c = gpr_zalloc(sizeof(grpc_ssl_channel_security_connector));
+ c = (grpc_ssl_channel_security_connector *)gpr_zalloc(
+ sizeof(grpc_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.vtable = &ssl_channel_vtable;
c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
+ c->base.channel_creds = grpc_channel_credentials_ref(channel_creds);
c->base.request_metadata_creds =
grpc_call_credentials_ref(request_metadata_creds);
c->base.check_call_host = ssl_channel_check_call_host;
@@ -847,12 +1035,13 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
c->overridden_target_name = gpr_strdup(overridden_target_name);
}
- bool has_key_cert_pair = config->pem_key_cert_pair.private_key != NULL &&
- config->pem_key_cert_pair.cert_chain != NULL;
+ has_key_cert_pair = config->pem_key_cert_pair != NULL &&
+ config->pem_key_cert_pair->private_key != NULL &&
+ config->pem_key_cert_pair->cert_chain != NULL;
result = tsi_create_ssl_client_handshaker_factory(
- has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs,
+ has_key_cert_pair ? config->pem_key_cert_pair : NULL, pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->client_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@@ -869,48 +1058,64 @@ error:
return GRPC_SECURITY_ERROR;
}
+static grpc_ssl_server_security_connector *
+grpc_ssl_server_security_connector_initialize(
+ grpc_server_credentials *server_creds) {
+ grpc_ssl_server_security_connector *c =
+ (grpc_ssl_server_security_connector *)gpr_zalloc(
+ sizeof(grpc_ssl_server_security_connector));
+ gpr_ref_init(&c->base.base.refcount, 1);
+ c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
+ c->base.base.vtable = &ssl_server_vtable;
+ c->base.add_handshakers = ssl_server_add_handshakers;
+ c->base.server_creds = grpc_server_credentials_ref(server_creds);
+ return c;
+}
+
grpc_security_status grpc_ssl_server_security_connector_create(
- grpc_exec_ctx *exec_ctx, const grpc_ssl_server_config *config,
+ grpc_exec_ctx *exec_ctx, grpc_server_credentials *gsc,
grpc_server_security_connector **sc) {
- size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions();
- const char **alpn_protocol_strings =
- gpr_malloc(sizeof(const char *) * num_alpn_protocols);
tsi_result result = TSI_OK;
- grpc_ssl_server_security_connector *c;
- size_t i;
+ grpc_ssl_server_credentials *server_credentials =
+ (grpc_ssl_server_credentials *)gsc;
+ grpc_security_status retval = GRPC_SECURITY_OK;
- for (i = 0; i < num_alpn_protocols; i++) {
- alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
- }
+ GPR_ASSERT(server_credentials != NULL);
+ GPR_ASSERT(sc != NULL);
- if (config == NULL || config->num_key_cert_pairs == 0) {
- gpr_log(GPR_ERROR, "An SSL server needs a key and a cert.");
- goto error;
+ grpc_ssl_server_security_connector *c =
+ grpc_ssl_server_security_connector_initialize(gsc);
+ if (server_connector_has_cert_config_fetcher(c)) {
+ // Load initial credentials from certificate_config_fetcher:
+ if (!try_fetch_ssl_server_credentials(c)) {
+ gpr_log(GPR_ERROR, "Failed loading SSL server credentials from fetcher.");
+ retval = GRPC_SECURITY_ERROR;
+ }
+ } else {
+ size_t num_alpn_protocols = 0;
+ const char **alpn_protocol_strings =
+ fill_alpn_protocol_strings(&num_alpn_protocols);
+ result = tsi_create_ssl_server_handshaker_factory_ex(
+ server_credentials->config.pem_key_cert_pairs,
+ server_credentials->config.num_key_cert_pairs,
+ server_credentials->config.pem_root_certs,
+ get_tsi_client_certificate_request_type(
+ server_credentials->config.client_certificate_request),
+ ssl_cipher_suites(), alpn_protocol_strings,
+ (uint16_t)num_alpn_protocols, &c->server_handshaker_factory);
+ gpr_free((void *)alpn_protocol_strings);
+ if (result != TSI_OK) {
+ gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
+ tsi_result_to_string(result));
+ retval = GRPC_SECURITY_ERROR;
+ }
}
- c = gpr_zalloc(sizeof(grpc_ssl_server_security_connector));
- gpr_ref_init(&c->base.base.refcount, 1);
- c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
- c->base.base.vtable = &ssl_server_vtable;
- result = tsi_create_ssl_server_handshaker_factory_ex(
- config->pem_key_cert_pairs, config->num_key_cert_pairs,
- config->pem_root_certs, get_tsi_client_certificate_request_type(
- config->client_certificate_request),
- ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
- if (result != TSI_OK) {
- gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
- tsi_result_to_string(result));
- ssl_server_destroy(exec_ctx, &c->base.base);
- *sc = NULL;
- goto error;
+ if (retval == GRPC_SECURITY_OK) {
+ *sc = &c->base;
+ } else {
+ if (c != NULL) ssl_server_destroy(exec_ctx, &c->base.base);
+ if (sc != NULL) *sc = NULL;
}
- c->base.add_handshakers = ssl_server_add_handshakers;
- *sc = &c->base;
- gpr_free((void *)alpn_protocol_strings);
- return GRPC_SECURITY_OK;
-
-error:
- gpr_free((void *)alpn_protocol_strings);
- return GRPC_SECURITY_ERROR;
+ return retval;
}
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 4f9b63ad20..54a563bb2c 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -29,6 +29,10 @@
#include "src/core/tsi/ssl_transport_security.h"
#include "src/core/tsi/transport_security_interface.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_security_connector_refcount;
#endif
@@ -56,13 +60,9 @@ typedef struct {
void (*check_peer)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc,
tsi_peer peer, grpc_auth_context **auth_context,
grpc_closure *on_peer_checked);
+ int (*cmp)(grpc_security_connector *sc, grpc_security_connector *other);
} grpc_security_connector_vtable;
-typedef struct grpc_security_connector_handshake_list {
- void *handshake;
- struct grpc_security_connector_handshake_list *next;
-} grpc_security_connector_handshake_list;
-
struct grpc_security_connector {
const grpc_security_connector_vtable *vtable;
gpr_refcount refcount;
@@ -100,6 +100,10 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_auth_context **auth_context,
grpc_closure *on_peer_checked);
+/* Compares two security connectors. */
+int grpc_security_connector_cmp(grpc_security_connector *sc,
+ grpc_security_connector *other);
+
/* Util to encapsulate the connector in a channel arg. */
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc);
@@ -112,13 +116,14 @@ grpc_security_connector *grpc_security_connector_find_in_args(
/* --- channel_security_connector object. ---
- A channel security connector object represents away to configure the
+ A channel security connector object represents a way to configure the
underlying transport security mechanism on the client side. */
typedef struct grpc_channel_security_connector grpc_channel_security_connector;
struct grpc_channel_security_connector {
grpc_security_connector base;
+ grpc_channel_credentials *channel_creds;
grpc_call_credentials *request_metadata_creds;
bool (*check_call_host)(grpc_exec_ctx *exec_ctx,
grpc_channel_security_connector *sc, const char *host,
@@ -134,6 +139,10 @@ struct grpc_channel_security_connector {
grpc_handshake_manager *handshake_mgr);
};
+/// A helper function for use in grpc_security_connector_cmp() implementations.
+int grpc_channel_security_connector_cmp(grpc_channel_security_connector *sc1,
+ grpc_channel_security_connector *sc2);
+
/// Checks that the host that will be set for a call is acceptable.
/// Returns true if completed synchronously, in which case \a error will
/// be set to indicate the result. Otherwise, \a on_call_host_checked
@@ -157,18 +166,23 @@ void grpc_channel_security_connector_add_handshakers(
/* --- server_security_connector object. ---
- A server security connector object represents away to configure the
+ A server security connector object represents a way to configure the
underlying transport security mechanism on the server side. */
typedef struct grpc_server_security_connector grpc_server_security_connector;
struct grpc_server_security_connector {
grpc_security_connector base;
+ grpc_server_credentials *server_creds;
void (*add_handshakers)(grpc_exec_ctx *exec_ctx,
grpc_server_security_connector *sc,
grpc_handshake_manager *handshake_mgr);
};
+/// A helper function for use in grpc_security_connector_cmp() implementations.
+int grpc_server_security_connector_cmp(grpc_server_security_connector *sc1,
+ grpc_server_security_connector *sc2);
+
void grpc_server_security_connector_add_handshakers(
grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
grpc_handshake_manager *handshake_mgr);
@@ -178,18 +192,19 @@ void grpc_server_security_connector_add_handshakers(
/* For TESTING ONLY!
Creates a fake connector that emulates real channel security. */
grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
+ grpc_channel_credentials *channel_creds,
grpc_call_credentials *request_metadata_creds, const char *target,
const grpc_channel_args *args);
/* For TESTING ONLY!
Creates a fake connector that emulates real server security. */
grpc_server_security_connector *grpc_fake_server_security_connector_create(
- void);
+ grpc_server_credentials *server_creds);
/* Config for ssl clients. */
typedef struct {
- tsi_ssl_pem_key_cert_pair pem_key_cert_pair;
+ tsi_ssl_pem_key_cert_pair *pem_key_cert_pair;
char *pem_root_certs;
} grpc_ssl_config;
@@ -207,7 +222,8 @@ typedef struct {
specific error code otherwise.
*/
grpc_security_status grpc_ssl_channel_security_connector_create(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *request_metadata_creds,
+ grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds,
+ grpc_call_credentials *request_metadata_creds,
const grpc_ssl_config *config, const char *target_name,
const char *overridden_target_name, grpc_channel_security_connector **sc);
@@ -232,7 +248,7 @@ typedef struct {
specific error code otherwise.
*/
grpc_security_status grpc_ssl_server_security_connector_create(
- grpc_exec_ctx *exec_ctx, const grpc_ssl_server_config *config,
+ grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_credentials,
grpc_server_security_connector **sc);
/* Util. */
@@ -245,4 +261,8 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
const grpc_auth_context *auth_context);
void tsi_shallow_peer_destruct(tsi_peer *peer);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_CONNECTOR_H */
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.cc
index fc9c9f980f..3d19605617 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.cc
@@ -32,6 +32,7 @@
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/tsi_error.h"
#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/tsi/transport_security_grpc.h"
#define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256
@@ -127,24 +128,36 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
-static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- security_handshaker *h = arg;
- gpr_mu_lock(&h->mu);
+static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
+ security_handshaker *h, grpc_error *error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
- goto done;
+ return;
}
- // Create frame protector.
- tsi_frame_protector *protector;
- tsi_result result = tsi_handshaker_result_create_frame_protector(
- h->handshaker_result, NULL, &protector);
- if (result != TSI_OK) {
+ // Create zero-copy frame protector, if implemented.
+ tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
+ tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
+ exec_ctx, h->handshaker_result, NULL, &zero_copy_protector);
+ if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
error = grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"),
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Zero-copy frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
- goto done;
+ return;
+ }
+ // Create frame protector if zero-copy frame protector is NULL.
+ tsi_frame_protector *protector = NULL;
+ if (zero_copy_protector == NULL) {
+ result = tsi_handshaker_result_create_frame_protector(h->handshaker_result,
+ NULL, &protector);
+ if (result != TSI_OK) {
+ error = grpc_set_tsi_error_result(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Frame protector creation failed"),
+ result);
+ security_handshake_failed_locked(exec_ctx, h, error);
+ return;
+ }
}
// Get unused bytes.
const unsigned char *unused_bytes = NULL;
@@ -155,12 +168,12 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
if (unused_bytes_size > 0) {
grpc_slice slice =
grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size);
- h->args->endpoint =
- grpc_secure_endpoint_create(protector, h->args->endpoint, &slice, 1);
+ h->args->endpoint = grpc_secure_endpoint_create(
+ protector, zero_copy_protector, h->args->endpoint, &slice, 1);
grpc_slice_unref_internal(exec_ctx, slice);
} else {
- h->args->endpoint =
- grpc_secure_endpoint_create(protector, h->args->endpoint, NULL, 0);
+ h->args->endpoint = grpc_secure_endpoint_create(
+ protector, zero_copy_protector, h->args->endpoint, NULL, 0);
}
tsi_handshaker_result_destroy(h->handshaker_result);
h->handshaker_result = NULL;
@@ -177,7 +190,13 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
-done:
+}
+
+static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ security_handshaker *h = (security_handshaker *)arg;
+ gpr_mu_lock(&h->mu);
+ on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
@@ -239,7 +258,7 @@ static grpc_error *on_handshake_next_done_locked(
static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void *user_data, const unsigned char *bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
- security_handshaker *h = user_data;
+ security_handshaker *h = (security_handshaker *)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -281,7 +300,7 @@ static grpc_error *do_handshaker_next_locked(
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -298,7 +317,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
}
if (bytes_received_size > h->handshake_buffer_size) {
- h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size);
+ h->handshake_buffer =
+ (uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@@ -323,7 +343,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- security_handshaker *h = arg;
+ security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@@ -400,14 +420,15 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
- security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
+ security_handshaker *h =
+ (security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
+ h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@@ -450,7 +471,7 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_do_handshake};
static grpc_handshaker *fail_handshaker_create() {
- grpc_handshaker *h = gpr_malloc(sizeof(*h));
+ grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}
diff --git a/src/core/lib/security/transport/security_handshaker.h b/src/core/lib/security/transport/security_handshaker.h
index 95bf127fc6..178099bb94 100644
--- a/src/core/lib/security/transport/security_handshaker.h
+++ b/src/core/lib/security/transport/security_handshaker.h
@@ -23,6 +23,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/security/transport/security_connector.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// Creates a security handshaker using \a handshaker.
grpc_handshaker *grpc_security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
@@ -31,4 +35,8 @@ grpc_handshaker *grpc_security_handshaker_create(
/// Registers security handshaker factories.
void grpc_security_register_handshaker_factories();
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H */
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.cc
index 9bf3f0ca0f..f5e02f42fe 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -26,7 +26,15 @@
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/slice/slice_internal.h"
+typedef enum {
+ STATE_INIT = 0,
+ STATE_DONE,
+ STATE_CANCELLED,
+} async_state;
+
typedef struct call_data {
+ grpc_call_combiner *call_combiner;
+ grpc_call_stack *owning_call;
grpc_transport_stream_op_batch *recv_initial_metadata_batch;
grpc_closure *original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
@@ -34,6 +42,8 @@ typedef struct call_data {
const grpc_metadata *consumed_md;
size_t num_consumed_md;
grpc_auth_context *auth_context;
+ grpc_closure cancel_closure;
+ gpr_atm state; // async_state
} call_data;
typedef struct channel_data {
@@ -53,8 +63,8 @@ static grpc_metadata_array metadata_batch_to_md_array(
grpc_slice value = GRPC_MDVALUE(md);
if (result.count == result.capacity) {
result.capacity = GPR_MAX(result.capacity + 8, result.capacity * 2);
- result.metadata =
- gpr_realloc(result.metadata, result.capacity * sizeof(grpc_metadata));
+ result.metadata = (grpc_metadata *)gpr_realloc(
+ result.metadata, result.capacity * sizeof(grpc_metadata));
}
usr_md = &result.metadata[result.count++];
usr_md->key = grpc_slice_ref_internal(key);
@@ -66,8 +76,8 @@ static grpc_metadata_array metadata_batch_to_md_array(
static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_mdelem md) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
size_t i;
for (i = 0; i < calld->num_consumed_md; i++) {
const grpc_metadata *consumed_md = &calld->consumed_md[i];
@@ -78,54 +88,94 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
return GRPC_FILTERED_MDELEM(md);
}
-/* called from application code */
-static void on_md_processing_done(
- void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
- const grpc_metadata *response_md, size_t num_response_md,
- grpc_status_code status, const char *error_details) {
- grpc_call_element *elem = user_data;
- call_data *calld = elem->call_data;
+static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx,
+ grpc_call_element *elem,
+ const grpc_metadata *consumed_md,
+ size_t num_consumed_md,
+ const grpc_metadata *response_md,
+ size_t num_response_md,
+ grpc_error *error) {
+ call_data *calld = (call_data *)elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO,
"response_md in auth metadata processing not supported for now. "
"Ignoring...");
}
- grpc_error *error = GRPC_ERROR_NONE;
- if (status == GRPC_STATUS_OK) {
+ if (error == GRPC_ERROR_NONE) {
calld->consumed_md = consumed_md;
calld->num_consumed_md = num_consumed_md;
error = grpc_metadata_batch_filter(
- &exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
+ exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error");
- } else {
- if (error_details == NULL) {
- error_details = "Authentication metadata processing failed.";
+ }
+ GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready,
+ error);
+}
+
+// Called from application code.
+static void on_md_processing_done(
+ void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
+ const grpc_metadata *response_md, size_t num_response_md,
+ grpc_status_code status, const char *error_details) {
+ grpc_call_element *elem = (grpc_call_element *)user_data;
+ call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ // If the call was not cancelled while we were in flight, process the result.
+ if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_DONE)) {
+ grpc_error *error = GRPC_ERROR_NONE;
+ if (status != GRPC_STATUS_OK) {
+ if (error_details == NULL) {
+ error_details = "Authentication metadata processing failed.";
+ }
+ error = grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status);
}
- error =
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status);
+ on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md,
+ response_md, num_response_md, error);
}
+ // Clean up.
for (size_t i = 0; i < calld->md.count; i++) {
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
}
grpc_metadata_array_destroy(&calld->md);
- GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready,
- error);
+ GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata");
grpc_exec_ctx_finish(&exec_ctx);
}
+static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)elem->call_data;
+ // If the result was not already processed, invoke the callback now.
+ if (error != GRPC_ERROR_NONE &&
+ gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
+ (gpr_atm)STATE_CANCELLED)) {
+ on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0,
+ GRPC_ERROR_REF(error));
+ }
+ GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call");
+}
+
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *elem = arg;
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)arg;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) {
+ // We're calling out to the application, so we need to make sure
+ // to drop the call combiner early if we get cancelled.
+ GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call");
+ GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
+ grpc_schedule_on_exec_ctx);
+ grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+ &calld->cancel_closure);
+ GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
calld->md = metadata_batch_to_md_array(
batch->payload->recv_initial_metadata.recv_initial_metadata);
chand->creds->processor.process(
@@ -141,7 +191,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
static void auth_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (batch->recv_initial_metadata) {
// Inject our callback.
calld->recv_initial_metadata_batch = batch;
@@ -157,8 +207,10 @@ static void auth_start_transport_stream_op_batch(
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ calld->call_combiner = args->call_combiner;
+ calld->owning_call = args->call_stack;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -188,7 +240,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
grpc_auth_context *auth_context =
grpc_find_auth_context_in_args(args->channel_args);
GPR_ASSERT(auth_context != NULL);
@@ -203,7 +255,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
grpc_server_credentials_unref(exec_ctx, chand->creds);
}
@@ -218,6 +270,5 @@ const grpc_channel_filter grpc_server_auth_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server-auth"};
diff --git a/src/core/lib/security/transport/tsi_error.c b/src/core/lib/security/transport/tsi_error.cc
index 72f9600e84..72f9600e84 100644
--- a/src/core/lib/security/transport/tsi_error.c
+++ b/src/core/lib/security/transport/tsi_error.cc
diff --git a/src/core/lib/security/transport/tsi_error.h b/src/core/lib/security/transport/tsi_error.h
index 87a63a8a7c..4e19daf796 100644
--- a/src/core/lib/security/transport/tsi_error.h
+++ b/src/core/lib/security/transport/tsi_error.h
@@ -22,6 +22,14 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/tsi/transport_security_interface.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_TSI_ERROR_H */
diff --git a/src/core/lib/security/util/json_util.c b/src/core/lib/security/util/json_util.cc
index d847addef9..d847addef9 100644
--- a/src/core/lib/security/util/json_util.c
+++ b/src/core/lib/security/util/json_util.cc
diff --git a/src/core/lib/security/util/json_util.h b/src/core/lib/security/util/json_util.h
index 5ea831e27e..cdd8a7198a 100644
--- a/src/core/lib/security/util/json_util.h
+++ b/src/core/lib/security/util/json_util.h
@@ -28,6 +28,10 @@
#define GRPC_AUTH_JSON_TYPE_SERVICE_ACCOUNT "service_account"
#define GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER "authorized_user"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Gets a child property from a json node.
const char *grpc_json_get_string_property(const grpc_json *json,
const char *prop_name);
@@ -37,4 +41,8 @@ const char *grpc_json_get_string_property(const grpc_json *json,
bool grpc_copy_json_string_property(const grpc_json *json,
const char *prop_name, char **copied_value);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H */
diff --git a/src/core/lib/slice/b64.c b/src/core/lib/slice/b64.cc
index d02f303bdb..50264719a4 100644
--- a/src/core/lib/slice/b64.c
+++ b/src/core/lib/slice/b64.cc
@@ -58,7 +58,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
int multiline) {
size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
- char *result = gpr_malloc(result_projected_size);
+ char *result = (char *)gpr_malloc(result_projected_size);
grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
return result;
}
@@ -75,7 +75,7 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
int url_safe, int multiline) {
- const unsigned char *data = vdata;
+ const unsigned char *data = (const unsigned char *)vdata;
const char *base64_chars =
url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
const size_t result_projected_size =
diff --git a/src/core/lib/slice/b64.h b/src/core/lib/slice/b64.h
index 3fd15febe5..9b4dc65dbb 100644
--- a/src/core/lib/slice/b64.h
+++ b/src/core/lib/slice/b64.h
@@ -21,6 +21,10 @@
#include <grpc/slice.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Encodes data using base64. It is the caller's responsability to free
the returned char * using gpr_free. Returns NULL on NULL input.
TODO(makdharma) : change the flags to bool from int */
@@ -47,4 +51,8 @@ grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64,
grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64,
size_t b64_len, int url_safe);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SLICE_B64_H */
diff --git a/src/core/lib/slice/percent_encoding.c b/src/core/lib/slice/percent_encoding.cc
index effc8d7ad6..effc8d7ad6 100644
--- a/src/core/lib/slice/percent_encoding.c
+++ b/src/core/lib/slice/percent_encoding.cc
diff --git a/src/core/lib/slice/percent_encoding.h b/src/core/lib/slice/percent_encoding.h
index faae26a683..14a4deb44b 100644
--- a/src/core/lib/slice/percent_encoding.h
+++ b/src/core/lib/slice/percent_encoding.h
@@ -30,6 +30,10 @@
#include <grpc/slice.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* URL percent encoding spec bitfield (usabel as 'unreserved_bytes' in
grpc_percent_encode_slice, grpc_strict_percent_decode_slice).
Flags [A-Za-z0-9-_.~] as unreserved bytes for the percent encoding routines
@@ -60,4 +64,8 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
This cannot fail. */
grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H */
diff --git a/src/core/lib/slice/slice.c b/src/core/lib/slice/slice.cc
index 8a8087805c..0764eda052 100644
--- a/src/core/lib/slice/slice.c
+++ b/src/core/lib/slice/slice.cc
@@ -27,7 +27,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
char *grpc_slice_to_c_string(grpc_slice slice) {
- char *out = gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
+ char *out = (char *)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
out[GRPC_SLICE_LENGTH(slice)] = 0;
return out;
@@ -105,12 +105,12 @@ typedef struct new_slice_refcount {
} new_slice_refcount;
static void new_slice_ref(void *p) {
- new_slice_refcount *r = p;
+ new_slice_refcount *r = (new_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_slice_refcount *r = p;
+ new_slice_refcount *r = (new_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data);
gpr_free(r);
@@ -125,7 +125,8 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
void (*destroy)(void *),
void *user_data) {
grpc_slice slice;
- new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount));
+ new_slice_refcount *rc =
+ (new_slice_refcount *)gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_slice_vtable;
rc->rc.sub_refcount = &rc->rc;
@@ -133,7 +134,7 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
rc->user_data = user_data;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = p;
+ slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@@ -154,12 +155,12 @@ typedef struct new_with_len_slice_refcount {
} new_with_len_slice_refcount;
static void new_with_len_ref(void *p) {
- new_with_len_slice_refcount *r = p;
+ new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_with_len_slice_refcount *r = p;
+ new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data, r->user_length);
gpr_free(r);
@@ -173,8 +174,8 @@ static const grpc_slice_refcount_vtable new_with_len_vtable = {
grpc_slice grpc_slice_new_with_len(void *p, size_t len,
void (*destroy)(void *, size_t)) {
grpc_slice slice;
- new_with_len_slice_refcount *rc =
- gpr_malloc(sizeof(new_with_len_slice_refcount));
+ new_with_len_slice_refcount *rc = (new_with_len_slice_refcount *)gpr_malloc(
+ sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_with_len_vtable;
rc->rc.sub_refcount = &rc->rc;
@@ -183,7 +184,7 @@ grpc_slice grpc_slice_new_with_len(void *p, size_t len,
rc->user_length = len;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = p;
+ slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@@ -205,12 +206,12 @@ typedef struct {
} malloc_refcount;
static void malloc_ref(void *p) {
- malloc_refcount *r = p;
+ malloc_refcount *r = (malloc_refcount *)p;
gpr_ref(&r->refs);
}
static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) {
- malloc_refcount *r = p;
+ malloc_refcount *r = (malloc_refcount *)p;
if (gpr_unref(&r->refs)) {
gpr_free(r);
}
@@ -232,7 +233,8 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
refcount is a malloc_refcount
bytes is an array of bytes of the requested length
Both parts are placed in the same allocation returned from gpr_malloc */
- malloc_refcount *rc = gpr_malloc(sizeof(malloc_refcount) + length);
+ malloc_refcount *rc =
+ (malloc_refcount *)gpr_malloc(sizeof(malloc_refcount) + length);
/* Initial refcount on rc is 1 - and it's up to the caller to release
this reference. */
@@ -451,7 +453,7 @@ int grpc_slice_rchr(grpc_slice s, char c) {
int grpc_slice_chr(grpc_slice s, char c) {
const char *b = (const char *)GRPC_SLICE_START_PTR(s);
- const char *p = memchr(b, c, GRPC_SLICE_LENGTH(s));
+ const char *p = (const char *)memchr(b, c, GRPC_SLICE_LENGTH(s));
return p == NULL ? -1 : (int)(p - b);
}
diff --git a/src/core/lib/slice/slice_buffer.c b/src/core/lib/slice/slice_buffer.cc
index a54a997a0d..63ffc0b00d 100644
--- a/src/core/lib/slice/slice_buffer.c
+++ b/src/core/lib/slice/slice_buffer.cc
@@ -45,11 +45,12 @@ static void maybe_embiggen(grpc_slice_buffer *sb) {
sb->capacity = GROW(sb->capacity);
GPR_ASSERT(sb->capacity > slice_count);
if (sb->base_slices == sb->inlined) {
- sb->base_slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
+ sb->base_slices =
+ (grpc_slice *)gpr_malloc(sb->capacity * sizeof(grpc_slice));
memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
} else {
- sb->base_slices =
- gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice));
+ sb->base_slices = (grpc_slice *)gpr_realloc(
+ sb->base_slices, sb->capacity * sizeof(grpc_slice));
}
sb->slices = sb->base_slices + slice_offset;
@@ -291,7 +292,7 @@ void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n,
void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer *src, size_t n,
void *dst) {
- char *dstp = dst;
+ char *dstp = (char *)dst;
GPR_ASSERT(src->length >= n);
while (n > 0) {
diff --git a/src/core/lib/slice/slice_hash_table.c b/src/core/lib/slice/slice_hash_table.cc
index 1866ed25ac..6c2c9c201c 100644
--- a/src/core/lib/slice/slice_hash_table.c
+++ b/src/core/lib/slice/slice_hash_table.cc
@@ -60,14 +60,15 @@ grpc_slice_hash_table* grpc_slice_hash_table_create(
size_t num_entries, grpc_slice_hash_table_entry* entries,
void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
int (*value_cmp)(void* a, void* b)) {
- grpc_slice_hash_table* table = gpr_zalloc(sizeof(*table));
+ grpc_slice_hash_table* table =
+ (grpc_slice_hash_table*)gpr_zalloc(sizeof(*table));
gpr_ref_init(&table->refs, 1);
table->destroy_value = destroy_value;
table->value_cmp = value_cmp;
// Keep load factor low to improve performance of lookups.
table->size = num_entries * 2;
const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
- table->entries = gpr_zalloc(entry_size);
+ table->entries = (grpc_slice_hash_table_entry*)gpr_zalloc(entry_size);
for (size_t i = 0; i < num_entries; ++i) {
grpc_slice_hash_table_entry* entry = &entries[i];
grpc_slice_hash_table_add(table, entry->key, entry->value);
diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h
index 339078fef5..41250df738 100644
--- a/src/core/lib/slice/slice_hash_table.h
+++ b/src/core/lib/slice/slice_hash_table.h
@@ -19,6 +19,10 @@
#include "src/core/lib/transport/metadata.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/** Hash table implementation.
*
* This implementation uses open addressing
@@ -67,4 +71,8 @@ void *grpc_slice_hash_table_get(const grpc_slice_hash_table *table,
int grpc_slice_hash_table_cmp(const grpc_slice_hash_table *a,
const grpc_slice_hash_table *b);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H */
diff --git a/src/core/lib/slice/slice_intern.c b/src/core/lib/slice/slice_intern.cc
index a6d22c1e1f..1ea9a2aa67 100644
--- a/src/core/lib/slice/slice_intern.c
+++ b/src/core/lib/slice/slice_intern.cc
@@ -18,6 +18,7 @@
#include "src/core/lib/slice/slice_internal.h"
+#include <inttypes.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -69,7 +70,7 @@ static uint32_t max_static_metadata_hash_probe;
static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT];
static void interned_slice_ref(void *p) {
- interned_slice_refcount *s = p;
+ interned_slice_refcount *s = (interned_slice_refcount *)p;
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0);
}
@@ -90,7 +91,7 @@ static void interned_slice_destroy(interned_slice_refcount *s) {
}
static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- interned_slice_refcount *s = p;
+ interned_slice_refcount *s = (interned_slice_refcount *)p;
if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
interned_slice_destroy(s);
}
@@ -129,7 +130,8 @@ static void grow_shard(slice_shard *shard) {
GPR_TIMER_BEGIN("grow_strtab", 0);
- strtab = gpr_zalloc(sizeof(interned_slice_refcount *) * capacity);
+ strtab = (interned_slice_refcount **)gpr_zalloc(
+ sizeof(interned_slice_refcount *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (s = shard->strs[i]; s; s = next) {
@@ -242,7 +244,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
/* not found: create a new string */
/* string data goes after the internal_string header */
- s = gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice));
+ s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
+ GRPC_SLICE_LENGTH(slice));
gpr_atm_rel_store(&s->refcnt, 1);
s->length = GRPC_SLICE_LENGTH(slice);
s->hash = hash;
@@ -280,7 +283,8 @@ void grpc_slice_intern_init(void) {
gpr_mu_init(&shard->mu);
shard->count = 0;
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->strs = gpr_zalloc(sizeof(*shard->strs) * shard->capacity);
+ shard->strs = (interned_slice_refcount **)gpr_zalloc(sizeof(*shard->strs) *
+ shard->capacity);
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
static_metadata_hash[i].hash = 0;
diff --git a/src/core/lib/slice/slice_internal.h b/src/core/lib/slice/slice_internal.h
index 6df0b4b50b..fcf70a0e55 100644
--- a/src/core/lib/slice/slice_internal.h
+++ b/src/core/lib/slice/slice_internal.h
@@ -24,6 +24,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_slice grpc_slice_ref_internal(grpc_slice slice);
void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice);
void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx,
@@ -46,4 +50,8 @@ grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
uint32_t grpc_static_slice_hash(grpc_slice s);
int grpc_static_slice_eq(grpc_slice a, grpc_slice b);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H */
diff --git a/src/core/lib/slice/slice_string_helpers.c b/src/core/lib/slice/slice_string_helpers.cc
index d461c474d2..d461c474d2 100644
--- a/src/core/lib/slice/slice_string_helpers.c
+++ b/src/core/lib/slice/slice_string_helpers.cc
diff --git a/src/core/lib/slice/slice_traits.h b/src/core/lib/slice/slice_traits.h
index 4b898bdcd4..7fdb6752cb 100644
--- a/src/core/lib/slice/slice_traits.h
+++ b/src/core/lib/slice/slice_traits.h
@@ -22,8 +22,16 @@
#include <grpc/slice.h>
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
bool grpc_slice_is_legal_header(grpc_slice s);
bool grpc_slice_is_legal_nonbin_header(grpc_slice s);
bool grpc_slice_is_bin_suffixed(grpc_slice s);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H */
diff --git a/src/core/lib/support/alloc.c b/src/core/lib/support/alloc.cc
index 886d69d64c..886d69d64c 100644
--- a/src/core/lib/support/alloc.c
+++ b/src/core/lib/support/alloc.cc
diff --git a/src/core/lib/support/arena.c b/src/core/lib/support/arena.cc
index 9e0f73ae3d..9e0f73ae3d 100644
--- a/src/core/lib/support/arena.c
+++ b/src/core/lib/support/arena.cc
diff --git a/src/core/lib/support/arena.h b/src/core/lib/support/arena.h
index 47f0e4d16b..8a50786348 100644
--- a/src/core/lib/support/arena.h
+++ b/src/core/lib/support/arena.h
@@ -27,6 +27,10 @@
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct gpr_arena gpr_arena;
// Create an arena, with \a initial_size bytes in the first allocated buffer
@@ -36,4 +40,8 @@ void *gpr_arena_alloc(gpr_arena *arena, size_t size);
// Destroy an arena, returning the total number of bytes allocated
size_t gpr_arena_destroy(gpr_arena *arena);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SUPPORT_ARENA_H */
diff --git a/src/core/lib/support/atm.c b/src/core/lib/support/atm.cc
index 2f37d62f76..2f37d62f76 100644
--- a/src/core/lib/support/atm.c
+++ b/src/core/lib/support/atm.cc
diff --git a/src/core/lib/support/avl.c b/src/core/lib/support/avl.cc
index 0e28b24c98..0e28b24c98 100644
--- a/src/core/lib/support/avl.c
+++ b/src/core/lib/support/avl.cc
diff --git a/src/core/lib/support/cmdline.c b/src/core/lib/support/cmdline.cc
index 9fb80d4460..9fb80d4460 100644
--- a/src/core/lib/support/cmdline.c
+++ b/src/core/lib/support/cmdline.cc
diff --git a/src/core/lib/support/cpu_iphone.c b/src/core/lib/support/cpu_iphone.cc
index dfd69b9fd8..2847e03ba5 100644
--- a/src/core/lib/support/cpu_iphone.c
+++ b/src/core/lib/support/cpu_iphone.cc
@@ -18,6 +18,8 @@
#include <grpc/support/port_platform.h>
+#include <grpc/support/cpu.h>
+
#ifdef GPR_CPU_IPHONE
/* Probably 2 instead of 1, but see comment on gpr_cpu_current_cpu. */
diff --git a/src/core/lib/support/cpu_linux.c b/src/core/lib/support/cpu_linux.cc
index 2280668442..2280668442 100644
--- a/src/core/lib/support/cpu_linux.c
+++ b/src/core/lib/support/cpu_linux.cc
diff --git a/src/core/lib/support/cpu_posix.c b/src/core/lib/support/cpu_posix.cc
index a1ba8202a8..503a96b4c8 100644
--- a/src/core/lib/support/cpu_posix.c
+++ b/src/core/lib/support/cpu_posix.cc
@@ -24,6 +24,7 @@
#include <string.h>
#include <unistd.h>
+#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
diff --git a/src/core/lib/support/cpu_windows.c b/src/core/lib/support/cpu_windows.cc
index af26ff3679..8d89453403 100644
--- a/src/core/lib/support/cpu_windows.c
+++ b/src/core/lib/support/cpu_windows.cc
@@ -19,6 +19,7 @@
#include <grpc/support/port_platform.h>
#ifdef GPR_WINDOWS
+#include <grpc/support/cpu.h>
#include <grpc/support/log.h>
unsigned gpr_cpu_num_cores(void) {
diff --git a/src/core/lib/support/env_linux.c b/src/core/lib/support/env_linux.cc
index 4c45a977ca..4c45a977ca 100644
--- a/src/core/lib/support/env_linux.c
+++ b/src/core/lib/support/env_linux.cc
diff --git a/src/core/lib/support/env_posix.c b/src/core/lib/support/env_posix.cc
index b88822ca02..b88822ca02 100644
--- a/src/core/lib/support/env_posix.c
+++ b/src/core/lib/support/env_posix.cc
diff --git a/src/core/lib/support/env_windows.c b/src/core/lib/support/env_windows.cc
index 652eeb61c6..c5a25dc201 100644
--- a/src/core/lib/support/env_windows.c
+++ b/src/core/lib/support/env_windows.cc
@@ -43,9 +43,12 @@ char *gpr_getenv(const char *name) {
DWORD ret;
ret = GetEnvironmentVariable(tname, NULL, 0);
- if (ret == 0) return NULL;
+ if (ret == 0) {
+ gpr_free(tname);
+ return NULL;
+ }
size = ret * (DWORD)sizeof(TCHAR);
- tresult = gpr_malloc(size);
+ tresult = (LPTSTR)gpr_malloc(size);
ret = GetEnvironmentVariable(tname, tresult, size);
gpr_free(tname);
if (ret == 0) {
diff --git a/src/core/lib/support/histogram.c b/src/core/lib/support/histogram.cc
index 6d5ead9aa6..6d5ead9aa6 100644
--- a/src/core/lib/support/histogram.c
+++ b/src/core/lib/support/histogram.cc
diff --git a/src/core/lib/support/host_port.c b/src/core/lib/support/host_port.cc
index 3302e574ab..3302e574ab 100644
--- a/src/core/lib/support/host_port.c
+++ b/src/core/lib/support/host_port.cc
diff --git a/src/core/lib/support/log.c b/src/core/lib/support/log.cc
index fadb4d9a2c..69f92e001c 100644
--- a/src/core/lib/support/log.c
+++ b/src/core/lib/support/log.cc
@@ -27,7 +27,7 @@
#include <stdio.h>
#include <string.h>
-extern void gpr_default_log(gpr_log_func_args *args);
+extern "C" void gpr_default_log(gpr_log_func_args *args);
static gpr_atm g_log_func = (gpr_atm)gpr_default_log;
static gpr_atm g_min_severity_to_print = GPR_LOG_VERBOSITY_UNSET;
diff --git a/src/core/lib/support/log_android.c b/src/core/lib/support/log_android.cc
index 6f1cec51f1..9e8529cbac 100644
--- a/src/core/lib/support/log_android.c
+++ b/src/core/lib/support/log_android.cc
@@ -39,8 +39,8 @@ static android_LogPriority severity_to_log_priority(gpr_log_severity severity) {
return ANDROID_LOG_DEFAULT;
}
-void gpr_log(const char *file, int line, gpr_log_severity severity,
- const char *format, ...) {
+extern "C" void gpr_log(const char *file, int line, gpr_log_severity severity,
+ const char *format, ...) {
char *message = NULL;
va_list args;
va_start(args, format);
@@ -50,8 +50,8 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
free(message);
}
-void gpr_default_log(gpr_log_func_args *args) {
- char *final_slash;
+extern "C" void gpr_default_log(gpr_log_func_args *args) {
+ const char *final_slash;
const char *display_file;
char *output = NULL;
diff --git a/src/core/lib/support/log_linux.c b/src/core/lib/support/log_linux.cc
index 61d2346427..0914acedf4 100644
--- a/src/core/lib/support/log_linux.c
+++ b/src/core/lib/support/log_linux.cc
@@ -56,8 +56,8 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
free(message);
}
-void gpr_default_log(gpr_log_func_args *args) {
- char *final_slash;
+extern "C" void gpr_default_log(gpr_log_func_args *args) {
+ const char *final_slash;
char *prefix;
const char *display_file;
char time_buffer[64];
diff --git a/src/core/lib/support/log_posix.c b/src/core/lib/support/log_posix.cc
index 8b376fce41..29530c858f 100644
--- a/src/core/lib/support/log_posix.c
+++ b/src/core/lib/support/log_posix.cc
@@ -48,7 +48,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
} else if ((size_t)ret <= sizeof(buf) - 1) {
message = buf;
} else {
- message = allocated = gpr_malloc((size_t)ret + 1);
+ message = allocated = (char *)gpr_malloc((size_t)ret + 1);
va_start(args, format);
vsnprintf(message, (size_t)(ret + 1), format, args);
va_end(args);
@@ -57,8 +57,8 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
gpr_free(allocated);
}
-void gpr_default_log(gpr_log_func_args *args) {
- char *final_slash;
+extern "C" void gpr_default_log(gpr_log_func_args *args) {
+ const char *final_slash;
const char *display_file;
char time_buffer[64];
time_t timer;
diff --git a/src/core/lib/support/log_windows.c b/src/core/lib/support/log_windows.cc
index 0fdab79ae6..ee52abea73 100644
--- a/src/core/lib/support/log_windows.c
+++ b/src/core/lib/support/log_windows.cc
@@ -47,7 +47,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
} else {
/* Allocate a new buffer, with space for the NUL terminator. */
size_t strp_buflen = (size_t)ret + 1;
- message = gpr_malloc(strp_buflen);
+ message = (char *)gpr_malloc(strp_buflen);
/* Print to the buffer. */
va_start(args, format);
@@ -65,8 +65,8 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
}
/* Simple starter implementation */
-void gpr_default_log(gpr_log_func_args *args) {
- char *final_slash;
+extern "C" void gpr_default_log(gpr_log_func_args *args) {
+ const char *final_slash;
const char *display_file;
char time_buffer[64];
time_t timer;
diff --git a/src/core/lib/support/manual_constructor.h b/src/core/lib/support/manual_constructor.h
new file mode 100644
index 0000000000..d753cf98a0
--- /dev/null
+++ b/src/core/lib/support/manual_constructor.h
@@ -0,0 +1,76 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SUPPORT_MANUAL_CONSTRUCTOR_H
+#define GRPC_CORE_LIB_SUPPORT_MANUAL_CONSTRUCTOR_H
+
+// manually construct a region of memory with some type
+
+#include <stddef.h>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace grpc_core {
+
+template <typename Type>
+class ManualConstructor {
+ public:
+ // No constructor or destructor because one of the most useful uses of
+ // this class is as part of a union, and members of a union could not have
+ // constructors or destructors till C++11. And, anyway, the whole point of
+ // this class is to bypass constructor and destructor.
+
+ Type* get() { return reinterpret_cast<Type*>(&space_); }
+ const Type* get() const { return reinterpret_cast<const Type*>(&space_); }
+
+ Type* operator->() { return get(); }
+ const Type* operator->() const { return get(); }
+
+ Type& operator*() { return *get(); }
+ const Type& operator*() const { return *get(); }
+
+ void Init() { new (&space_) Type; }
+
+ // Init() constructs the Type instance using the given arguments
+ // (which are forwarded to Type's constructor).
+ //
+ // Note that Init() with no arguments performs default-initialization,
+ // not zero-initialization (i.e it behaves the same as "new Type;", not
+ // "new Type();"), so it will leave non-class types uninitialized.
+ template <typename... Ts>
+ void Init(Ts&&... args) {
+ new (&space_) Type(std::forward<Ts>(args)...);
+ }
+
+ // Init() that is equivalent to copy and move construction.
+ // Enables usage like this:
+ // ManualConstructor<std::vector<int>> v;
+ // v.Init({1, 2, 3});
+ void Init(const Type& x) { new (&space_) Type(x); }
+ void Init(Type&& x) { new (&space_) Type(std::move(x)); }
+
+ void Destroy() { get()->~Type(); }
+
+ private:
+ typename std::aligned_storage<sizeof(Type), alignof(Type)>::type space_;
+};
+
+} // namespace grpc_core
+
+#endif
diff --git a/src/core/lib/support/memory.h b/src/core/lib/support/memory.h
index dc3d32e1c2..6b336681db 100644
--- a/src/core/lib/support/memory.h
+++ b/src/core/lib/support/memory.h
@@ -21,6 +21,7 @@
#include <grpc/support/alloc.h>
+#include <limits>
#include <memory>
#include <utility>
@@ -54,6 +55,46 @@ inline UniquePtr<T> MakeUnique(Args&&... args) {
return UniquePtr<T>(New<T>(std::forward<Args>(args)...));
}
+// an allocator that uses gpr_malloc/gpr_free
+template <class T>
+class Allocator {
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::false_type propagate_on_container_move_assignment;
+ template <class U>
+ struct rebind {
+ typedef Allocator<U> other;
+ };
+ typedef std::true_type is_always_equal;
+
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+ pointer allocate(std::size_t n,
+ std::allocator<void>::const_pointer hint = 0) {
+ return static_cast<pointer>(gpr_malloc(n * sizeof(T)));
+ }
+ void deallocate(T* p, std::size_t n) { gpr_free(p); }
+ size_t max_size() const {
+ return std::numeric_limits<size_type>::max() / sizeof(value_type);
+ }
+ void construct(pointer p, const_reference val) { new ((void*)p) T(val); }
+ template <class U, class... Args>
+ void construct(U* p, Args&&... args) {
+ ::new ((void*)p) U(std::forward<Args>(args)...);
+ }
+ void destroy(pointer p) { p->~T(); }
+ template <class U>
+ void destroy(U* p) {
+ p->~U();
+ }
+};
+
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_SUPPORT_MEMORY_H */
diff --git a/src/core/lib/support/mpscq.c b/src/core/lib/support/mpscq.cc
index e9f893988d..e9f893988d 100644
--- a/src/core/lib/support/mpscq.c
+++ b/src/core/lib/support/mpscq.cc
diff --git a/src/core/lib/support/mpscq.h b/src/core/lib/support/mpscq.h
index daa51768f7..ca63a044bb 100644
--- a/src/core/lib/support/mpscq.h
+++ b/src/core/lib/support/mpscq.h
@@ -23,6 +23,10 @@
#include <stdbool.h>
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Multiple-producer single-consumer lock free queue, based upon the
// implementation from Dmitry Vyukov here:
// http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
@@ -50,4 +54,8 @@ gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q);
// Pop a node; sets *empty to true if the queue is empty, or false if it is not
gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SUPPORT_MPSCQ_H */
diff --git a/src/core/lib/support/murmur_hash.c b/src/core/lib/support/murmur_hash.cc
index f06b970de7..f06b970de7 100644
--- a/src/core/lib/support/murmur_hash.c
+++ b/src/core/lib/support/murmur_hash.cc
diff --git a/src/core/lib/support/murmur_hash.h b/src/core/lib/support/murmur_hash.h
index 7510b4d09c..a4c642e49f 100644
--- a/src/core/lib/support/murmur_hash.h
+++ b/src/core/lib/support/murmur_hash.h
@@ -23,7 +23,15 @@
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* compute the hash of key (length len) */
uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SUPPORT_MURMUR_HASH_H */
diff --git a/src/core/lib/support/spinlock.h b/src/core/lib/support/spinlock.h
index 37adda11b0..47584f6279 100644
--- a/src/core/lib/support/spinlock.h
+++ b/src/core/lib/support/spinlock.h
@@ -25,9 +25,14 @@
a concurrency code smell. */
typedef struct { gpr_atm atm; } gpr_spinlock;
+#ifdef __cplusplus
+#define GPR_SPINLOCK_INITIALIZER (gpr_spinlock{0})
+#else
#define GPR_SPINLOCK_INITIALIZER ((gpr_spinlock){0})
+#endif
#define GPR_SPINLOCK_STATIC_INITIALIZER \
{ 0 }
+
#define gpr_spinlock_trylock(lock) (gpr_atm_acq_cas(&(lock)->atm, 0, 1))
#define gpr_spinlock_unlock(lock) (gpr_atm_rel_store(&(lock)->atm, 0))
#define gpr_spinlock_lock(lock) \
diff --git a/src/core/lib/support/stack_lockfree.c b/src/core/lib/support/stack_lockfree.cc
index 0fb64ed001..0fb64ed001 100644
--- a/src/core/lib/support/stack_lockfree.c
+++ b/src/core/lib/support/stack_lockfree.cc
diff --git a/src/core/lib/support/stack_lockfree.h b/src/core/lib/support/stack_lockfree.h
index 6324211b72..706f63fbf6 100644
--- a/src/core/lib/support/stack_lockfree.h
+++ b/src/core/lib/support/stack_lockfree.h
@@ -21,6 +21,10 @@
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct gpr_stack_lockfree gpr_stack_lockfree;
/* This stack must specify the maximum number of entries to track.
@@ -35,4 +39,8 @@ int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry);
/* Returns -1 on empty or the actual entry number */
int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H */
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.cc
index b65009754a..d55863892f 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.cc
@@ -27,6 +27,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
+#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
char *gpr_strdup(const char *src) {
@@ -276,7 +277,7 @@ static void add_string_to_split(const char *beg, const char *end, char ***strs,
void gpr_string_split(const char *input, const char *sep, char ***strs,
size_t *nstrs) {
- char *next;
+ const char *next;
*strs = NULL;
*nstrs = 0;
size_t capstrs = 0;
@@ -298,3 +299,17 @@ void *gpr_memrchr(const void *s, int c, size_t n) {
}
return NULL;
}
+
+bool gpr_is_true(const char *s) {
+ size_t i;
+ if (s == NULL) {
+ return false;
+ }
+ static const char *truthy[] = {"yes", "true", "1"};
+ for (i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
+ if (0 == gpr_stricmp(s, truthy[i])) {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index e11df8439d..5a56fa3a0a 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -19,6 +19,7 @@
#ifndef GRPC_CORE_LIB_SUPPORT_STRING_H
#define GRPC_CORE_LIB_SUPPORT_STRING_H
+#include <stdbool.h>
#include <stddef.h>
#include <grpc/support/port_platform.h>
@@ -106,6 +107,8 @@ int gpr_stricmp(const char *a, const char *b);
void *gpr_memrchr(const void *s, int c, size_t n);
+/** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */
+bool gpr_is_true(const char *s);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/support/string_posix.c b/src/core/lib/support/string_posix.cc
index e768faf739..92de21a6e1 100644
--- a/src/core/lib/support/string_posix.c
+++ b/src/core/lib/support/string_posix.cc
@@ -25,6 +25,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
int gpr_asprintf(char **strp, const char *format, ...) {
va_list args;
diff --git a/src/core/lib/support/string_util_windows.c b/src/core/lib/support/string_util_windows.cc
index 2a03404448..b365512ee3 100644
--- a/src/core/lib/support/string_util_windows.c
+++ b/src/core/lib/support/string_util_windows.cc
@@ -26,15 +26,18 @@
anything else, especially strsafe.h. */
#include <wchar.h>
+#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <strsafe.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/log_windows.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/support/string.h"
+#include "src/core/lib/support/string_windows.h"
#if defined UNICODE || defined _UNICODE
LPTSTR
@@ -42,7 +45,7 @@ gpr_char_to_tchar(LPCSTR input) {
LPTSTR ret;
int needed = MultiByteToWideChar(CP_UTF8, 0, input, -1, NULL, 0);
if (needed <= 0) return NULL;
- ret = gpr_malloc((unsigned)needed * sizeof(TCHAR));
+ ret = (LPTSTR)gpr_malloc((unsigned)needed * sizeof(TCHAR));
MultiByteToWideChar(CP_UTF8, 0, input, -1, ret, needed);
return ret;
}
@@ -52,14 +55,14 @@ gpr_tchar_to_char(LPCTSTR input) {
LPSTR ret;
int needed = WideCharToMultiByte(CP_UTF8, 0, input, -1, NULL, 0, NULL, NULL);
if (needed <= 0) return NULL;
- ret = gpr_malloc((unsigned)needed);
+ ret = (LPSTR)gpr_malloc((unsigned)needed);
WideCharToMultiByte(CP_UTF8, 0, input, -1, ret, needed, NULL, NULL);
return ret;
}
#else
-char *gpr_tchar_to_char(LPTSTR input) { return gpr_strdup(input); }
+LPSTR gpr_tchar_to_char(LPCTSTR input) { return (LPSTR)gpr_strdup(input); }
-char *gpr_char_to_tchar(LPTSTR input) { return gpr_strdup(input); }
+LPTSTR gpr_char_to_tchar(LPCTSTR input) { return (LPTSTR)gpr_strdup(input); }
#endif
char *gpr_format_message(int messageid) {
diff --git a/src/core/lib/support/string_windows.c b/src/core/lib/support/string_windows.cc
index 50278d9559..d37863c066 100644
--- a/src/core/lib/support/string_windows.c
+++ b/src/core/lib/support/string_windows.cc
@@ -27,6 +27,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
#include "src/core/lib/support/string.h"
@@ -46,7 +47,7 @@ int gpr_asprintf(char **strp, const char *format, ...) {
/* Allocate a new buffer, with space for the NUL terminator. */
strp_buflen = (size_t)ret + 1;
- if ((*strp = gpr_malloc(strp_buflen)) == NULL) {
+ if ((*strp = (char *)gpr_malloc(strp_buflen)) == NULL) {
/* This shouldn't happen, because gpr_malloc() calls abort(). */
return -1;
}
diff --git a/src/core/lib/support/string_windows.h b/src/core/lib/support/string_windows.h
index 7c7f31e7aa..6771647581 100644
--- a/src/core/lib/support/string_windows.h
+++ b/src/core/lib/support/string_windows.h
@@ -21,6 +21,10 @@
#include <grpc/support/port_platform.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifdef GPR_WINDOWS
/* These allocate new strings using gpr_malloc to convert from and to utf-8. */
@@ -29,4 +33,8 @@ LPSTR gpr_tchar_to_char(LPCTSTR input);
#endif /* GPR_WINDOWS */
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H */
diff --git a/src/core/lib/support/subprocess_posix.c b/src/core/lib/support/subprocess_posix.cc
index af75162ee9..af75162ee9 100644
--- a/src/core/lib/support/subprocess_posix.c
+++ b/src/core/lib/support/subprocess_posix.cc
diff --git a/src/core/lib/support/subprocess_windows.c b/src/core/lib/support/subprocess_windows.cc
index 7412f8d344..6769f1d3a4 100644
--- a/src/core/lib/support/subprocess_windows.c
+++ b/src/core/lib/support/subprocess_windows.cc
@@ -61,7 +61,7 @@ gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
}
gpr_free(args_tchar);
- r = gpr_malloc(sizeof(gpr_subprocess));
+ r = (gpr_subprocess *)gpr_malloc(sizeof(gpr_subprocess));
memset(r, 0, sizeof(*r));
r->pi = pi;
return r;
diff --git a/src/core/lib/support/sync.c b/src/core/lib/support/sync.cc
index 994dcb0e14..994dcb0e14 100644
--- a/src/core/lib/support/sync.c
+++ b/src/core/lib/support/sync.cc
diff --git a/src/core/lib/support/sync_posix.c b/src/core/lib/support/sync_posix.cc
index 62d800b18c..62d800b18c 100644
--- a/src/core/lib/support/sync_posix.c
+++ b/src/core/lib/support/sync_posix.cc
diff --git a/src/core/lib/support/sync_windows.c b/src/core/lib/support/sync_windows.cc
index 008c5aecba..62fdd40af7 100644
--- a/src/core/lib/support/sync_windows.c
+++ b/src/core/lib/support/sync_windows.cc
@@ -104,7 +104,7 @@ struct run_once_func_arg {
void (*init_function)(void);
};
static BOOL CALLBACK run_once_func(gpr_once *once, void *v, void **pv) {
- struct run_once_func_arg *arg = v;
+ struct run_once_func_arg *arg = (struct run_once_func_arg *)v;
(*arg->init_function)();
return 1;
}
diff --git a/src/core/lib/support/thd.c b/src/core/lib/support/thd.cc
index ca62615d65..ca62615d65 100644
--- a/src/core/lib/support/thd.c
+++ b/src/core/lib/support/thd.cc
diff --git a/src/core/lib/support/thd_posix.c b/src/core/lib/support/thd_posix.cc
index 98afd10df7..98afd10df7 100644
--- a/src/core/lib/support/thd_posix.c
+++ b/src/core/lib/support/thd_posix.cc
diff --git a/src/core/lib/support/thd_windows.c b/src/core/lib/support/thd_windows.cc
index 54533e9412..1a82805dd9 100644
--- a/src/core/lib/support/thd_windows.c
+++ b/src/core/lib/support/thd_windows.cc
@@ -66,7 +66,7 @@ static DWORD WINAPI thread_body(void *v) {
int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
const gpr_thd_options *options) {
HANDLE handle;
- struct thd_info *info = gpr_malloc(sizeof(*info));
+ struct thd_info *info = (struct thd_info *)gpr_malloc(sizeof(*info));
info->body = thd_body;
info->arg = arg;
*t = 0;
diff --git a/src/core/lib/support/time.c b/src/core/lib/support/time.cc
index 6903674d75..6903674d75 100644
--- a/src/core/lib/support/time.c
+++ b/src/core/lib/support/time.cc
diff --git a/src/core/lib/support/time_posix.c b/src/core/lib/support/time_posix.cc
index 3ead40d807..3f8a9094fd 100644
--- a/src/core/lib/support/time_posix.c
+++ b/src/core/lib/support/time_posix.cc
@@ -30,7 +30,6 @@
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
-#include "src/core/lib/support/block_annotate.h"
static struct timespec timespec_from_gpr(gpr_timespec gts) {
struct timespec rv;
@@ -43,7 +42,7 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
return rv;
}
-#if _POSIX_TIMERS > 0
+#if _POSIX_TIMERS > 0 || defined(__OpenBSD__)
static gpr_timespec gpr_from_timespec(struct timespec ts,
gpr_clock_type clock_type) {
/*
@@ -128,7 +127,9 @@ static gpr_timespec now_impl(gpr_clock_type clock) {
}
#endif
+extern "C" {
gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type) = now_impl;
+}
#ifdef GPR_LOW_LEVEL_COUNTERS
gpr_atm gpr_now_call_count;
@@ -157,9 +158,7 @@ void gpr_sleep_until(gpr_timespec until) {
delta = gpr_time_sub(until, now);
delta_ts = timespec_from_gpr(delta);
- GRPC_SCHEDULING_START_BLOCKING_REGION;
ns_result = nanosleep(&delta_ts, NULL);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
if (ns_result == 0) {
break;
}
diff --git a/src/core/lib/support/time_precise.c b/src/core/lib/support/time_precise.cc
index 6ce19e53cc..05ef7c59bc 100644
--- a/src/core/lib/support/time_precise.c
+++ b/src/core/lib/support/time_precise.cc
@@ -20,6 +20,8 @@
#include <grpc/support/time.h>
#include <stdio.h>
+#include "src/core/lib/support/time_precise.h"
+
#ifdef GRPC_TIMERS_RDTSC
#if defined(__i386__)
static void gpr_get_cycle_counter(int64_t int *clk) {
diff --git a/src/core/lib/support/time_precise.h b/src/core/lib/support/time_precise.h
index aa28d6d7c4..cb15cdf919 100644
--- a/src/core/lib/support/time_precise.h
+++ b/src/core/lib/support/time_precise.h
@@ -21,7 +21,15 @@
#include <grpc/support/time.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void gpr_precise_clock_init(void);
void gpr_precise_clock_now(gpr_timespec *clk);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SUPPORT_TIME_PRECISE_H */
diff --git a/src/core/lib/support/time_windows.c b/src/core/lib/support/time_windows.cc
index 40df3761c0..08c1b22964 100644
--- a/src/core/lib/support/time_windows.c
+++ b/src/core/lib/support/time_windows.cc
@@ -28,7 +28,6 @@
#include <process.h>
#include <sys/timeb.h>
-#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/time_precise.h"
static LARGE_INTEGER g_start_time;
@@ -69,7 +68,9 @@ static gpr_timespec now_impl(gpr_clock_type clock) {
return now_tv;
}
+extern "C" {
gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type) = now_impl;
+}
gpr_timespec gpr_now(gpr_clock_type clock_type) {
return gpr_now_impl(clock_type);
@@ -92,9 +93,7 @@ void gpr_sleep_until(gpr_timespec until) {
sleep_millis =
delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS;
GPR_ASSERT((sleep_millis >= 0) && (sleep_millis <= INT_MAX));
- GRPC_SCHEDULING_START_BLOCKING_REGION;
Sleep((DWORD)sleep_millis);
- GRPC_SCHEDULING_END_BLOCKING_REGION;
}
}
diff --git a/src/core/lib/support/tls_pthread.c b/src/core/lib/support/tls_pthread.cc
index 9ebee577fe..9ebee577fe 100644
--- a/src/core/lib/support/tls_pthread.c
+++ b/src/core/lib/support/tls_pthread.cc
diff --git a/src/core/lib/support/tmpfile_msys.c b/src/core/lib/support/tmpfile_msys.cc
index 614c0a4a18..614c0a4a18 100644
--- a/src/core/lib/support/tmpfile_msys.c
+++ b/src/core/lib/support/tmpfile_msys.cc
diff --git a/src/core/lib/support/tmpfile_posix.c b/src/core/lib/support/tmpfile_posix.cc
index 7ad3af0a57..7ad3af0a57 100644
--- a/src/core/lib/support/tmpfile_posix.c
+++ b/src/core/lib/support/tmpfile_posix.cc
diff --git a/src/core/lib/support/tmpfile_windows.c b/src/core/lib/support/tmpfile_windows.cc
index 47b4510a72..47b4510a72 100644
--- a/src/core/lib/support/tmpfile_windows.c
+++ b/src/core/lib/support/tmpfile_windows.cc
diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/support/vector.h
index 2203358a7e..4a7db80676 100644
--- a/src/core/lib/debug/stats_data.c
+++ b/src/core/lib/support/vector.h
@@ -1,4 +1,5 @@
/*
+ *
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,14 +13,20 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
+ *
*/
-/*
- * Automatically generated by tools/codegen/core/gen_stats_data.py
- */
+#ifndef GRPC_CORE_LIB_SUPPORT_VECTOR_H
+#define GRPC_CORE_LIB_SUPPORT_VECTOR_H
+
+#include "absl/container/inlined_vector.h"
+#include "src/core/lib/support/memory.h"
+
+namespace grpc_core {
+
+template <typename T, size_t N>
+using InlinedVector = absl::InlinedVector<T, N, Allocator<T>>;
+
+} // namespace grpc_core
-#include "src/core/lib/debug/stats_data.h"
-const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
- "client_calls_created", "server_calls_created", "syscall_write",
- "syscall_read", "syscall_poll", "syscall_wait",
-};
+#endif
diff --git a/src/core/lib/support/wrap_memcpy.c b/src/core/lib/support/wrap_memcpy.cc
index cff056dc3b..c2362bf5b8 100644
--- a/src/core/lib/support/wrap_memcpy.c
+++ b/src/core/lib/support/wrap_memcpy.cc
@@ -26,6 +26,7 @@
* Enable by setting LDFLAGS=-Wl,-wrap,memcpy when linking.
*/
+extern "C" {
#ifdef __linux__
#if defined(__x86_64__) && !defined(GPR_MUSL_LIBC_COMPAT)
__asm__(".symver memcpy,memcpy@GLIBC_2.2.5");
@@ -38,3 +39,4 @@ void *__wrap_memcpy(void *destination, const void *source, size_t num) {
}
#endif
#endif
+}
diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.cc
index 7d60b1de17..16a16bfd93 100644
--- a/src/core/lib/surface/alarm.c
+++ b/src/core/lib/surface/alarm.cc
@@ -15,6 +15,10 @@
* limitations under the License.
*
*/
+#include <grpc/support/port_platform.h>
+
+#include <inttypes.h>
+
#include "src/core/lib/surface/alarm_internal.h"
#include <grpc/grpc.h>
@@ -44,7 +48,9 @@ static void alarm_ref(grpc_alarm *alarm) { gpr_ref(&alarm->refs); }
static void alarm_unref(grpc_alarm *alarm) {
if (gpr_unref(&alarm->refs)) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm");
+ if (alarm->cq != NULL) {
+ GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm");
+ }
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(alarm);
}
@@ -78,12 +84,12 @@ static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
grpc_cq_completion *c) {
- grpc_alarm *alarm = arg;
+ grpc_alarm *alarm = (grpc_alarm *)arg;
GRPC_ALARM_UNREF(alarm, "dequeue-end-op");
}
static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_alarm *alarm = arg;
+ grpc_alarm *alarm = (grpc_alarm *)arg;
/* We are queuing an op on completion queue. This means, the alarm's structure
cannot be destroyed until the op is dequeued. Adding an extra ref
@@ -93,12 +99,8 @@ static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
(void *)alarm, &alarm->completion);
}
-grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
- void *tag) {
- grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-
- gpr_ref_init(&alarm->refs, 1);
+grpc_alarm *grpc_alarm_create(void *reserved) {
+ grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
@@ -106,27 +108,35 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
}
#endif
+ gpr_ref_init(&alarm->refs, 1);
+ grpc_timer_init_unset(&alarm->alarm);
+ alarm->cq = NULL;
+ GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
+ grpc_schedule_on_exec_ctx);
+ return alarm;
+}
+
+void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
+ gpr_timespec deadline, void *tag, void *reserved) {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
GRPC_CQ_INTERNAL_REF(cq, "alarm");
alarm->cq = cq;
alarm->tag = tag;
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
- GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
- grpc_schedule_on_exec_ctx);
grpc_timer_init(&exec_ctx, &alarm->alarm,
- gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
- &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
+ grpc_timespec_to_millis_round_up(deadline), &alarm->on_alarm);
grpc_exec_ctx_finish(&exec_ctx);
- return alarm;
}
-void grpc_alarm_cancel(grpc_alarm *alarm) {
+void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_timer_cancel(&exec_ctx, &alarm->alarm);
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_alarm_destroy(grpc_alarm *alarm) {
- grpc_alarm_cancel(alarm);
+void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved) {
+ grpc_alarm_cancel(alarm, reserved);
GRPC_ALARM_UNREF(alarm, "alarm_destroy");
}
diff --git a/src/core/lib/surface/alarm_internal.h b/src/core/lib/surface/alarm_internal.h
index 7f2126c5c9..136b60547f 100644
--- a/src/core/lib/surface/alarm_internal.h
+++ b/src/core/lib/surface/alarm_internal.h
@@ -22,6 +22,10 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/trace.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_alarm_refcount;
@@ -37,4 +41,8 @@ extern grpc_tracer_flag grpc_trace_alarm_refcount;
#endif /* defined(NDEBUG) */
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_ALARM_INTERNAL_H */
diff --git a/src/core/lib/surface/api_trace.c b/src/core/lib/surface/api_trace.cc
index 56973303da..56973303da 100644
--- a/src/core/lib/surface/api_trace.c
+++ b/src/core/lib/surface/api_trace.cc
diff --git a/src/core/lib/surface/api_trace.h b/src/core/lib/surface/api_trace.h
index 849cbaaef6..105abdf629 100644
--- a/src/core/lib/surface/api_trace.h
+++ b/src/core/lib/surface/api_trace.h
@@ -22,6 +22,10 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/trace.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern grpc_tracer_flag grpc_api_trace;
/* Provide unwrapping macros because we're in C89 and variadic macros weren't
@@ -47,4 +51,8 @@ extern grpc_tracer_flag grpc_api_trace;
gpr_log(GPR_INFO, fmt GRPC_API_TRACE_UNWRAP##nargs args); \
}
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_API_TRACE_H */
diff --git a/src/core/lib/surface/byte_buffer.c b/src/core/lib/surface/byte_buffer.cc
index 0bc990d487..7ed550ef87 100644
--- a/src/core/lib/surface/byte_buffer.c
+++ b/src/core/lib/surface/byte_buffer.cc
@@ -32,7 +32,8 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_slice *slices, size_t nslices,
grpc_compression_algorithm compression) {
size_t i;
- grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer *bb =
+ (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = compression;
grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
@@ -45,7 +46,8 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
grpc_byte_buffer_reader *reader) {
- grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer *bb =
+ (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
grpc_slice slice;
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = GRPC_COMPRESS_NONE;
diff --git a/src/core/lib/surface/byte_buffer_reader.c b/src/core/lib/surface/byte_buffer_reader.cc
index 87bd3239c0..87bd3239c0 100644
--- a/src/core/lib/surface/byte_buffer_reader.c
+++ b/src/core/lib/surface/byte_buffer_reader.cc
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.cc
index e68c201134..8216aa0ec8 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.cc
@@ -41,6 +41,7 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/call_test_only.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/validate_metadata.h"
@@ -93,10 +94,11 @@ static gpr_atm pack_received_status(received_status r) {
}
static received_status unpack_received_status(gpr_atm atm) {
- return (atm & 1) == 0
- ? (received_status){.is_set = false, .error = GRPC_ERROR_NONE}
- : (received_status){.is_set = true,
- .error = (grpc_error *)(atm & ~(gpr_atm)1)};
+ if ((atm & 1) == 0) {
+ return {false, GRPC_ERROR_NONE};
+ } else {
+ return {true, (grpc_error *)(atm & ~(gpr_atm)1)};
+ }
}
#define MAX_ERRORS_PER_BATCH 4
@@ -122,6 +124,7 @@ typedef struct batch_control {
bool is_closure;
} notify_tag;
} completion_data;
+ grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
@@ -151,12 +154,13 @@ typedef struct {
struct grpc_call {
gpr_refcount ext_ref;
gpr_arena *arena;
+ grpc_call_combiner call_combiner;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
grpc_channel *channel;
gpr_timespec start_time;
/* parent_call* */ gpr_atm parent_call_atm;
- child_call *child_call;
+ child_call *child;
/* client or server call */
bool is_client;
@@ -184,6 +188,11 @@ struct grpc_call {
Element 0 is initial metadata, element 1 is trailing metadata. */
grpc_metadata_array *buffered_metadata[2];
+ grpc_metadata compression_md;
+
+ // A char* indicating the peer name.
+ gpr_atm peer_string;
+
/* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT];
@@ -207,7 +216,7 @@ struct grpc_call {
server, it's trailing metadata */
grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT];
int send_extra_metadata_count;
- gpr_timespec send_deadline;
+ grpc_millis send_deadline;
grpc_slice_buffer_stream sending_stream;
@@ -262,8 +271,9 @@ grpc_tracer_flag grpc_compression_trace =
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op);
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *op,
+ grpc_closure *start_batch_closure);
static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_status_code status,
const char *description);
@@ -273,7 +283,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
grpc_error *error);
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error);
-static void get_final_status(grpc_call *call,
+static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
void (*set_value)(grpc_status_code code,
void *user_data),
void *set_value_user_data, grpc_slice *details);
@@ -285,11 +295,11 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
grpc_error *error, bool has_cancelled);
-static void add_init_error(grpc_error **composite, grpc_error *new) {
- if (new == GRPC_ERROR_NONE) return;
+static void add_init_error(grpc_error **composite, grpc_error *new_err) {
+ if (new_err == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
- *composite = grpc_error_add_child(*composite, new);
+ *composite = grpc_error_add_child(*composite, new_err);
}
void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
@@ -299,7 +309,7 @@ void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
static parent_call *get_or_create_parent_call(grpc_call *call) {
parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
- p = gpr_arena_alloc(call->arena, sizeof(*p));
+ p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p));
gpr_mu_init(&p->child_list_mu);
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
gpr_mu_destroy(&p->child_list_mu);
@@ -322,12 +332,14 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
- gpr_arena *arena =
- gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
- call = gpr_arena_alloc(arena,
- sizeof(grpc_call) + channel_stack->call_stack_size);
+ size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
+ GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
+ gpr_arena *arena = gpr_arena_create(initial_size);
+ call = (grpc_call *)gpr_arena_alloc(
+ arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
+ grpc_call_combiner_init(&call->call_combiner);
*out_call = call;
call->channel = args->channel;
call->cq = args->cq;
@@ -360,32 +372,28 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
- call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
}
}
- gpr_timespec send_deadline =
- gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC);
+ grpc_millis send_deadline = args->send_deadline;
bool immediately_cancel = false;
- if (args->parent_call != NULL) {
- child_call *cc = call->child_call =
- gpr_arena_alloc(arena, sizeof(child_call));
- call->child_call->parent = args->parent_call;
+ if (args->parent != NULL) {
+ child_call *cc = call->child =
+ (child_call *)gpr_arena_alloc(arena, sizeof(child_call));
+ call->child->parent = args->parent;
- GRPC_CALL_INTERNAL_REF(args->parent_call, "child");
+ GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
- GPR_ASSERT(!args->parent_call->is_client);
+ GPR_ASSERT(!args->parent->is_client);
- parent_call *pc = get_or_create_parent_call(args->parent_call);
+ parent_call *pc = get_or_create_parent_call(args->parent);
gpr_mu_lock(&pc->child_list_mu);
if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) {
- send_deadline = gpr_time_min(
- gpr_convert_clock_type(send_deadline,
- args->parent_call->send_deadline.clock_type),
- args->parent_call->send_deadline);
+ send_deadline = GPR_MIN(send_deadline, args->parent->send_deadline);
}
/* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
* GRPC_PROPAGATE_STATS_CONTEXT */
@@ -397,9 +405,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
"Census tracing propagation requested "
"without Census context propagation"));
}
- grpc_call_context_set(
- call, GRPC_CONTEXT_TRACING,
- args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
+ grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
+ args->parent->context[GRPC_CONTEXT_TRACING].value,
+ NULL);
} else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) {
add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Census context propagation requested "
@@ -407,7 +415,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
- if (gpr_atm_acq_load(&args->parent_call->received_final_op_atm)) {
+ if (gpr_atm_acq_load(&args->parent->received_final_op_atm)) {
immediately_cancel = true;
}
}
@@ -417,9 +425,9 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
cc->sibling_next = cc->sibling_prev = call;
} else {
cc->sibling_next = pc->first_child;
- cc->sibling_prev = pc->first_child->child_call->sibling_prev;
- cc->sibling_next->child_call->sibling_prev =
- cc->sibling_prev->child_call->sibling_next = call;
+ cc->sibling_prev = pc->first_child->child->sibling_prev;
+ cc->sibling_next->child->sibling_prev =
+ cc->sibling_prev->child->sibling_next = call;
}
gpr_mu_unlock(&pc->child_list_mu);
@@ -429,14 +437,14 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_unref */
- grpc_call_element_args call_args = {
- .call_stack = CALL_STACK_FROM_CALL(call),
- .server_transport_data = args->server_transport_data,
- .context = call->context,
- .path = path,
- .start_time = call->start_time,
- .deadline = send_deadline,
- .arena = call->arena};
+ grpc_call_element_args call_args = {CALL_STACK_FROM_CALL(call),
+ args->server_transport_data,
+ call->context,
+ path,
+ call->start_time,
+ send_deadline,
+ call->arena,
+ &call->call_combiner};
add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
destroy_call, call, &call_args));
if (error != GRPC_ERROR_NONE) {
@@ -501,8 +509,10 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
static void release_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
- grpc_call *c = call;
+ grpc_call *c = (grpc_call *)call;
grpc_channel *channel = c->channel;
+ grpc_call_combiner_destroy(&c->call_combiner);
+ gpr_free((char *)c->peer_string);
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
}
@@ -512,7 +522,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
size_t i;
int ii;
- grpc_call *c = call;
+ grpc_call *c = (grpc_call *)call;
GPR_TIMER_BEGIN("destroy_call", 0);
for (i = 0; i < 2; i++) {
grpc_metadata_batch_destroy(
@@ -537,8 +547,8 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
}
- get_final_status(call, set_status_value_directly, &c->final_info.final_status,
- NULL);
+ get_final_status(exec_ctx, c, set_status_value_directly,
+ &c->final_info.final_status, NULL);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
@@ -558,7 +568,7 @@ void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
void grpc_call_unref(grpc_call *c) {
if (!gpr_unref(&c->ext_ref)) return;
- child_call *cc = c->child_call;
+ child_call *cc = c->child;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
@@ -573,8 +583,8 @@ void grpc_call_unref(grpc_call *c) {
pc->first_child = NULL;
}
}
- cc->sibling_prev->child_call->sibling_next = cc->sibling_next;
- cc->sibling_next->child_call->sibling_prev = cc->sibling_prev;
+ cc->sibling_prev->child->sibling_next = cc->sibling_next;
+ cc->sibling_next->child->sibling_prev = cc->sibling_prev;
gpr_mu_unlock(&pc->child_list_mu);
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
}
@@ -586,6 +596,12 @@ void grpc_call_unref(grpc_call *c) {
if (cancel) {
cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
+ } else {
+ // Unset the call combiner cancellation closure. This has the
+ // effect of scheduling the previously set cancellation closure, if
+ // any, so that it can release any internal references it may be
+ // holding to the call stack.
+ grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner, NULL);
}
GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
@@ -602,30 +618,37 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
return GRPC_CALL_OK;
}
-static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op) {
- grpc_call_element *elem;
-
- GPR_TIMER_BEGIN("execute_op", 0);
- elem = CALL_ELEM_FROM_CALL(call, 0);
- elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op);
- GPR_TIMER_END("execute_op", 0);
+// This is called via the call combiner to start sending a batch down
+// the filter stack.
+static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *ignored) {
+ grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
+ grpc_call *call = (grpc_call *)batch->handler_private.extra_arg;
+ GPR_TIMER_BEGIN("execute_batch", 0);
+ grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
+ elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+ GPR_TIMER_END("execute_batch", 0);
+}
+
+// start_batch_closure points to a caller-allocated closure to be used
+// for entering the call combiner.
+static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_transport_stream_op_batch *batch,
+ grpc_closure *start_batch_closure) {
+ batch->handler_private.extra_arg = call;
+ GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
+ grpc_schedule_on_exec_ctx);
+ GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure,
+ GRPC_ERROR_NONE, "executing batch");
}
char *grpc_call_get_peer(grpc_call *call) {
- grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *result;
- GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
- result = elem->filter->get_peer(&exec_ctx, elem);
- if (result == NULL) {
- result = grpc_channel_get_target(call->channel);
- }
- if (result == NULL) {
- result = gpr_strdup("unknown");
- }
- grpc_exec_ctx_finish(&exec_ctx);
- return result;
+ char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string);
+ if (peer_string != NULL) return gpr_strdup(peer_string);
+ peer_string = grpc_channel_get_target(call->channel);
+ if (peer_string != NULL) return peer_string;
+ return gpr_strdup("unknown");
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
@@ -652,20 +675,41 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
return GRPC_CALL_OK;
}
-static void done_termination(grpc_exec_ctx *exec_ctx, void *call,
+typedef struct {
+ grpc_call *call;
+ grpc_closure start_batch;
+ grpc_closure finish_batch;
+} cancel_state;
+
+// The on_complete callback used when sending a cancel_stream batch down
+// the filter stack. Yields the call combiner when the batch is done.
+static void done_termination(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination");
+ cancel_state *state = (cancel_state *)arg;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
+ "on_complete for cancel_stream op");
+ GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
+ gpr_free(state);
}
static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
status_source source, grpc_error *error) {
GRPC_CALL_INTERNAL_REF(c, "termination");
+ // Inform the call combiner of the cancellation, so that it can cancel
+ // any in-flight asynchronous actions that may be holding the call
+ // combiner. This ensures that the cancel_stream batch can be sent
+ // down the filter stack in a timely manner.
+ grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(
- GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx));
+ cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state));
+ state->call = c;
+ GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
+ grpc_schedule_on_exec_ctx);
+ grpc_transport_stream_op_batch *op =
+ grpc_make_transport_stream_op(&state->finish_batch);
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
- execute_op(exec_ctx, c, op);
+ execute_batch(exec_ctx, c, op, &state->start_batch);
}
static grpc_error *error_from_status(grpc_status_code status,
@@ -690,13 +734,16 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
* FINAL STATUS CODE MANIPULATION
*/
-static bool get_final_status_from(
- grpc_call *call, grpc_error *error, bool allow_ok_status,
- void (*set_value)(grpc_status_code code, void *user_data),
- void *set_value_user_data, grpc_slice *details) {
+static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ grpc_error *error, bool allow_ok_status,
+ void (*set_value)(grpc_status_code code,
+ void *user_data),
+ void *set_value_user_data,
+ grpc_slice *details) {
grpc_status_code code;
grpc_slice slice = grpc_empty_slice();
- grpc_error_get_status(error, call->send_deadline, &code, &slice, NULL);
+ grpc_error_get_status(exec_ctx, error, call->send_deadline, &code, &slice,
+ NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
@@ -708,7 +755,7 @@ static bool get_final_status_from(
return true;
}
-static void get_final_status(grpc_call *call,
+static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
void (*set_value)(grpc_status_code code,
void *user_data),
void *set_value_user_data, grpc_slice *details) {
@@ -733,8 +780,9 @@ static void get_final_status(grpc_call *call,
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set &&
grpc_error_has_clear_grpc_status(status[i].error)) {
- if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
- set_value, set_value_user_data, details)) {
+ if (get_final_status_from(exec_ctx, call, status[i].error,
+ allow_ok_status != 0, set_value,
+ set_value_user_data, details)) {
return;
}
}
@@ -742,8 +790,9 @@ static void get_final_status(grpc_call *call,
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) {
- if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
- set_value, set_value_user_data, details)) {
+ if (get_final_status_from(exec_ctx, call, status[i].error,
+ allow_ok_status != 0, set_value,
+ set_value_user_data, details)) {
return;
}
}
@@ -760,10 +809,8 @@ static void get_final_status(grpc_call *call,
static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
status_source source, grpc_error *error) {
if (!gpr_atm_rel_cas(&call->status[source],
- pack_received_status((received_status){
- .is_set = false, .error = GRPC_ERROR_NONE}),
- pack_received_status((received_status){
- .is_set = true, .error = error}))) {
+ pack_received_status({false, GRPC_ERROR_NONE}),
+ pack_received_status({true, error}))) {
GRPC_ERROR_UNREF(error);
}
}
@@ -1059,8 +1106,8 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
if (dest->count + b->list.count > dest->capacity) {
dest->capacity =
GPR_MAX(dest->capacity + b->list.count, dest->capacity * 3 / 2);
- dest->metadata =
- gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity);
+ dest->metadata = (grpc_metadata *)gpr_realloc(
+ dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
for (grpc_linked_mdelem *l = b->list.head; l != NULL; l = l->next) {
mdusr = &dest->metadata[dest->count++];
@@ -1111,7 +1158,7 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
grpc_metadata_batch *b) {
- grpc_call *call = args;
+ grpc_call *call = (grpc_call *)args;
if (b->idx.named.grpc_status != NULL) {
uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
grpc_error *error =
@@ -1195,7 +1242,8 @@ static batch_control *allocate_batch_control(grpc_call *call,
int slot = batch_slot_for_op(ops[0].op);
batch_control **pslot = &call->active_batches[slot];
if (*pslot == NULL) {
- *pslot = gpr_arena_alloc(call->arena, sizeof(batch_control));
+ *pslot =
+ (batch_control *)gpr_arena_alloc(call->arena, sizeof(batch_control));
}
batch_control *bctl = *pslot;
if (bctl->call != NULL) {
@@ -1209,7 +1257,7 @@ static batch_control *allocate_batch_control(grpc_call *call,
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_cq_completion *storage) {
- batch_control *bctl = user_data;
+ batch_control *bctl = (batch_control *)user_data;
grpc_call *call = bctl->call;
bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
@@ -1269,7 +1317,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
child = pc->first_child;
if (child != NULL) {
do {
- next_child_call = child->child_call->sibling_next;
+ next_child_call = child->child->sibling_next;
if (child->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
@@ -1283,22 +1331,28 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
}
if (call->is_client) {
- get_final_status(call, set_status_value_directly,
+ get_final_status(exec_ctx, call, set_status_value_directly,
call->final_op.client.status,
call->final_op.client.status_details);
} else {
- get_final_status(call, set_cancelled_value,
+ get_final_status(exec_ctx, call, set_cancelled_value,
call->final_op.server.cancelled, NULL);
}
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
+ if (error != GRPC_ERROR_NONE && bctl->op.recv_message &&
+ *call->receiving_buffer != NULL) {
+ grpc_byte_buffer_destroy(*call->receiving_buffer);
+ *call->receiving_buffer = NULL;
+ }
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
bctl->call = NULL;
- GRPC_CLOSURE_RUN(exec_ctx, bctl->completion_data.notify_tag.tag, error);
+ GRPC_CLOSURE_RUN(
+ exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1352,7 +1406,7 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
grpc_byte_stream *bs = call->receiving_stream;
bool release_error = false;
@@ -1411,7 +1465,7 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
if (error != GRPC_ERROR_NONE) {
if (call->receiving_stream != NULL) {
@@ -1427,10 +1481,22 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
* acq_load is in receiving_initial_metadata_ready() */
if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL ||
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
- process_data_after_md(exec_ctx, bctlp);
+ process_data_after_md(exec_ctx, bctl);
}
}
+// The recv_message_ready callback used when sending a batch containing
+// a recv_message op down the filter stack. Yields the call combiner
+// before processing the received message.
+static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
+ void *bctlp,
+ grpc_error *error) {
+ batch_control *bctl = (batch_control *)bctlp;
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
+ receiving_stream_ready(exec_ctx, bctlp, error);
+}
+
static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
batch_control *bctl) {
grpc_call *call = bctl->call;
@@ -1451,7 +1517,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
algo_name);
@@ -1465,7 +1531,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
call->incoming_stream_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_stream_compression_algorithm_name(
call->incoming_stream_compression_algorithm, &algo_name);
gpr_log(
@@ -1492,7 +1558,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
@@ -1508,7 +1574,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->encodings_accepted_by_peer,
call->incoming_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- char *algo_name = NULL;
+ const char *algo_name = NULL;
grpc_compression_algorithm_name(call->incoming_compression_algorithm,
&algo_name);
gpr_log(GPR_ERROR,
@@ -1534,9 +1600,12 @@ static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *bctlp, grpc_error *error) {
- batch_control *bctl = bctlp;
+ batch_control *bctl = (batch_control *)bctlp;
grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
+ "recv_initial_metadata_ready");
+
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch *md =
@@ -1548,11 +1617,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
validate_filtered_metadata(exec_ctx, bctl);
GPR_TIMER_END("validate_filtered_metadata", 0);
- if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
- 0 &&
- !call->is_client) {
- call->send_deadline =
- gpr_convert_clock_type(md->deadline, GPR_CLOCK_MONOTONIC);
+ if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) {
+ call->send_deadline = md->deadline;
}
}
@@ -1589,8 +1655,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_error *error) {
- batch_control *bctl = bctlp;
-
+ batch_control *bctl = (batch_control *)bctlp;
+ grpc_call *call = bctl->call;
+ GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
@@ -1609,9 +1676,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
batch_control *bctl;
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
-
- // sent_initial_metadata guards against variable reuse.
- grpc_metadata compression_md;
+ grpc_transport_stream_op_batch *stream_op;
+ grpc_transport_stream_op_batch_payload *stream_op_payload;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1619,11 +1685,12 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (nops == 0) {
if (!is_notify_tag_closure) {
GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag));
- grpc_cq_end_op(exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
- free_no_op_completion, NULL,
- gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
+ free_no_op_completion, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, notify_tag, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;
@@ -1637,9 +1704,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
bctl->completion_data.notify_tag.is_closure =
(uint8_t)(is_notify_tag_closure != 0);
- grpc_transport_stream_op_batch *stream_op = &bctl->op;
- grpc_transport_stream_op_batch_payload *stream_op_payload =
- &call->stream_op_payload;
+ stream_op = &bctl->op;
+ stream_op_payload = &call->stream_op_payload;
/* rewrite batch ops into a transport op */
for (i = 0; i < nops; i++) {
@@ -1649,7 +1715,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
switch (op->op) {
- case GRPC_OP_SEND_INITIAL_METADATA:
+ case GRPC_OP_SEND_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (!are_initial_metadata_flags_valid(op->flags, call->is_client)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1660,7 +1726,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
goto done_with_error;
}
/* process compression level */
- memset(&compression_md, 0, sizeof(compression_md));
+ memset(&call->compression_md, 0, sizeof(call->compression_md));
size_t additional_metadata_count = 0;
grpc_compression_level effective_compression_level =
GRPC_COMPRESS_LEVEL_NONE;
@@ -1698,9 +1764,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
const grpc_stream_compression_algorithm calgo =
stream_compression_algorithm_for_level_locked(
call, effective_stream_compression_level);
- compression_md.key =
+ call->compression_md.key =
GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST;
- compression_md.value =
+ call->compression_md.value =
grpc_stream_compression_algorithm_slice(calgo);
} else {
const grpc_compression_algorithm calgo =
@@ -1708,8 +1774,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call, effective_compression_level);
/* the following will be picked up by the compress filter and used
* as the call's compression algorithm. */
- compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
- compression_md.value = grpc_compression_algorithm_slice(calgo);
+ call->compression_md.key =
+ GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST;
+ call->compression_md.value =
+ grpc_compression_algorithm_slice(calgo);
additional_metadata_count++;
}
}
@@ -1724,7 +1792,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
if (!prepare_application_metadata(
exec_ctx, call, (int)op->data.send_initial_metadata.count,
op->data.send_initial_metadata.metadata, 0, call->is_client,
- &compression_md, (int)additional_metadata_count)) {
+ &call->compression_md, (int)additional_metadata_count)) {
error = GRPC_CALL_ERROR_INVALID_METADATA;
goto done_with_error;
}
@@ -1736,8 +1804,13 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->send_initial_metadata.send_initial_metadata_flags =
op->flags;
+ if (call->is_client) {
+ stream_op_payload->send_initial_metadata.peer_string =
+ &call->peer_string;
+ }
break;
- case GRPC_OP_SEND_MESSAGE:
+ }
+ case GRPC_OP_SEND_MESSAGE: {
if (!are_write_flags_valid(op->flags)) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
goto done_with_error;
@@ -1766,7 +1839,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_message.send_message =
&call->sending_stream.base;
break;
- case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
+ }
+ case GRPC_OP_SEND_CLOSE_FROM_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1785,7 +1859,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_SEND_STATUS_FROM_SERVER:
+ }
+ case GRPC_OP_SEND_STATUS_FROM_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1847,7 +1922,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->send_trailing_metadata.send_trailing_metadata =
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
break;
- case GRPC_OP_RECV_INITIAL_METADATA:
+ }
+ case GRPC_OP_RECV_INITIAL_METADATA: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1868,9 +1944,14 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready =
&call->receiving_initial_metadata_ready;
+ if (!call->is_client) {
+ stream_op_payload->recv_initial_metadata.peer_string =
+ &call->peer_string;
+ }
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_MESSAGE:
+ }
+ case GRPC_OP_RECV_MESSAGE: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1884,13 +1965,15 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->recv_message = true;
call->receiving_buffer = op->data.recv_message.recv_message;
stream_op_payload->recv_message.recv_message = &call->receiving_stream;
- GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready,
- bctl, grpc_schedule_on_exec_ctx);
+ GRPC_CLOSURE_INIT(&call->receiving_stream_ready,
+ receiving_stream_ready_in_call_combiner, bctl,
+ grpc_schedule_on_exec_ctx);
stream_op_payload->recv_message.recv_message_ready =
&call->receiving_stream_ready;
num_completion_callbacks_needed++;
break;
- case GRPC_OP_RECV_STATUS_ON_CLIENT:
+ }
+ case GRPC_OP_RECV_STATUS_ON_CLIENT: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1917,7 +2000,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
- case GRPC_OP_RECV_CLOSE_ON_SERVER:
+ }
+ case GRPC_OP_RECV_CLOSE_ON_SERVER: {
/* Flag validation: currently allow no flags */
if (op->flags != 0) {
error = GRPC_CALL_ERROR_INVALID_FLAGS;
@@ -1941,6 +2025,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op_payload->collect_stats.collect_stats =
&call->final_info.stats.transport_stream_stats;
break;
+ }
}
}
@@ -1955,7 +2040,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
stream_op->on_complete = &bctl->finish_batch;
gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
- execute_op(exec_ctx, call, stream_op);
+ execute_batch(exec_ctx, call, stream_op, &bctl->start_batch);
done:
GPR_TIMER_END("grpc_call_start_batch", 0);
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index d537637cbb..27c2f5243c 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -37,7 +37,7 @@ typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
typedef struct grpc_call_create_args {
grpc_channel *channel;
- grpc_call *parent_call;
+ grpc_call *parent;
uint32_t propagation_mask;
grpc_completion_queue *cq;
@@ -49,7 +49,7 @@ typedef struct grpc_call_create_args {
grpc_mdelem *add_initial_metadata;
size_t add_initial_metadata_count;
- gpr_timespec send_deadline;
+ grpc_millis send_deadline;
} grpc_call_create_args;
/* Create a new call based on \a args.
diff --git a/src/core/lib/surface/call_details.c b/src/core/lib/surface/call_details.cc
index ea9208c7e3..ea9208c7e3 100644
--- a/src/core/lib/surface/call_details.c
+++ b/src/core/lib/surface/call_details.cc
diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.cc
index 4a1c265817..5557927b7c 100644
--- a/src/core/lib/surface/call_log_batch.c
+++ b/src/core/lib/surface/call_log_batch.cc
@@ -18,6 +18,8 @@
#include "src/core/lib/surface/call.h"
+#include <inttypes.h>
+
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/slice/slice_string_helpers.h"
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.cc
index 850fbe6a69..860dcc82db 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.cc
@@ -18,6 +18,7 @@
#include "src/core/lib/surface/channel.h"
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
@@ -27,6 +28,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
@@ -77,6 +79,11 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_channel_args *args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_channel *channel;
+ if (channel_stack_type == GRPC_SERVER_CHANNEL) {
+ GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
+ } else {
+ GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
+ }
grpc_error *error = grpc_channel_stack_builder_finish(
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
(void **)&channel);
@@ -138,9 +145,8 @@ grpc_channel *grpc_channel_create_with_builder(
channel->compression_options.default_level.level =
(grpc_compression_level)grpc_channel_arg_get_integer(
&args->args[i],
- (grpc_integer_options){GRPC_COMPRESS_LEVEL_NONE,
- GRPC_COMPRESS_LEVEL_NONE,
- GRPC_COMPRESS_LEVEL_COUNT - 1});
+ {GRPC_COMPRESS_LEVEL_NONE, GRPC_COMPRESS_LEVEL_NONE,
+ GRPC_COMPRESS_LEVEL_COUNT - 1});
} else if (0 == strcmp(args->args[i].key,
GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
channel->compression_options.default_stream_compression_level.is_set =
@@ -148,17 +154,15 @@ grpc_channel *grpc_channel_create_with_builder(
channel->compression_options.default_stream_compression_level.level =
(grpc_stream_compression_level)grpc_channel_arg_get_integer(
&args->args[i],
- (grpc_integer_options){GRPC_STREAM_COMPRESS_LEVEL_NONE,
- GRPC_STREAM_COMPRESS_LEVEL_NONE,
- GRPC_STREAM_COMPRESS_LEVEL_COUNT - 1});
+ {GRPC_STREAM_COMPRESS_LEVEL_NONE, GRPC_STREAM_COMPRESS_LEVEL_NONE,
+ GRPC_STREAM_COMPRESS_LEVEL_COUNT - 1});
} else if (0 == strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
channel->compression_options.default_algorithm.is_set = true;
channel->compression_options.default_algorithm.algorithm =
(grpc_compression_algorithm)grpc_channel_arg_get_integer(
- &args->args[i],
- (grpc_integer_options){GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
- GRPC_COMPRESS_ALGORITHMS_COUNT - 1});
+ &args->args[i], {GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
+ GRPC_COMPRESS_ALGORITHMS_COUNT - 1});
} else if (0 == strcmp(args->args[i].key,
GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
channel->compression_options.default_stream_compression_algorithm.is_set =
@@ -167,9 +171,8 @@ grpc_channel *grpc_channel_create_with_builder(
.algorithm =
(grpc_stream_compression_algorithm)grpc_channel_arg_get_integer(
&args->args[i],
- (grpc_integer_options){
- GRPC_STREAM_COMPRESS_NONE, GRPC_STREAM_COMPRESS_NONE,
- GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT - 1});
+ {GRPC_STREAM_COMPRESS_NONE, GRPC_STREAM_COMPRESS_NONE,
+ GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT - 1});
} else if (0 ==
strcmp(args->args[i].key,
GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
@@ -259,7 +262,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_completion_queue *cq,
grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem,
- grpc_mdelem authority_mdelem, gpr_timespec deadline) {
+ grpc_mdelem authority_mdelem, grpc_millis deadline) {
grpc_mdelem send_metadata[2];
size_t num_metadata = 0;
@@ -276,7 +279,7 @@ static grpc_call *grpc_channel_create_call_internal(
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = channel;
- args.parent_call = parent_call;
+ args.parent = parent_call;
args.propagation_mask = propagation_mask;
args.cq = cq;
args.pollset_set_alternative = pollset_set_alternative;
@@ -305,7 +308,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY,
grpc_slice_ref_internal(*host))
: GRPC_MDNULL,
- deadline);
+ grpc_timespec_to_millis_round_up(deadline));
grpc_exec_ctx_finish(&exec_ctx);
return call;
}
@@ -313,7 +316,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *grpc_channel_create_pollset_set_call(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
- const grpc_slice *host, gpr_timespec deadline, void *reserved) {
+ const grpc_slice *host, grpc_millis deadline, void *reserved) {
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set,
@@ -327,7 +330,7 @@ grpc_call *grpc_channel_create_pollset_set_call(
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved) {
- registered_call *rc = gpr_malloc(sizeof(registered_call));
+ registered_call *rc = (registered_call *)gpr_malloc(sizeof(registered_call));
GRPC_API_TRACE(
"grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
4, (channel, method, host, reserved));
@@ -354,7 +357,7 @@ grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline, void *reserved) {
- registered_call *rc = registered_call_handle;
+ registered_call *rc = (registered_call *)registered_call_handle;
GRPC_API_TRACE(
"grpc_channel_create_registered_call("
"channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
@@ -369,7 +372,8 @@ grpc_call *grpc_channel_create_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call = grpc_channel_create_call_internal(
&exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL,
- GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), deadline);
+ GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
+ grpc_timespec_to_millis_round_up(deadline));
grpc_exec_ctx_finish(&exec_ctx);
return call;
}
@@ -392,7 +396,7 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_channel *channel = arg;
+ grpc_channel *channel = (grpc_channel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {
registered_call *rc = channel->registered_calls;
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 528bb868e2..4d1c7e369f 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -23,6 +23,10 @@
#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/channel_stack_type.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_args *args,
grpc_channel_stack_type channel_stack_type,
@@ -43,7 +47,7 @@ grpc_channel *grpc_channel_create_with_builder(
grpc_call *grpc_channel_create_pollset_set_call(
grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
- const grpc_slice *host, gpr_timespec deadline, void *reserved);
+ const grpc_slice *host, grpc_millis deadline, void *reserved);
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
@@ -81,4 +85,8 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_compression_options grpc_channel_compression_options(
const grpc_channel *channel);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */
diff --git a/src/core/lib/surface/channel_init.c b/src/core/lib/surface/channel_init.cc
index a1391ffe56..33f444b89e 100644
--- a/src/core/lib/surface/channel_init.c
+++ b/src/core/lib/surface/channel_init.cc
@@ -53,9 +53,9 @@ void grpc_channel_init_register_stage(grpc_channel_stack_type type,
GPR_ASSERT(!g_finalized);
if (g_slots[type].cap_slots == g_slots[type].num_slots) {
g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2);
- g_slots[type].slots =
- gpr_realloc(g_slots[type].slots,
- g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
+ g_slots[type].slots = (stage_slot *)gpr_realloc(
+ g_slots[type].slots,
+ g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
}
stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++];
s->insertion_order = g_slots[type].num_slots;
@@ -65,8 +65,8 @@ void grpc_channel_init_register_stage(grpc_channel_stack_type type,
}
static int compare_slots(const void *a, const void *b) {
- const stage_slot *sa = a;
- const stage_slot *sb = b;
+ const stage_slot *sa = (const stage_slot *)a;
+ const stage_slot *sb = (const stage_slot *)b;
int c = GPR_ICMP(sa->priority, sb->priority);
if (c != 0) return c;
@@ -85,7 +85,7 @@ void grpc_channel_init_finalize(void) {
void grpc_channel_init_shutdown(void) {
for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
gpr_free(g_slots[i].slots);
- g_slots[i].slots = (void *)(uintptr_t)0xdeadbeef;
+ g_slots[i].slots = (stage_slot *)(void *)(uintptr_t)0xdeadbeef;
}
}
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.cc
index e85b308850..f45b568958 100644
--- a/src/core/lib/surface/channel_ping.c
+++ b/src/core/lib/surface/channel_ping.cc
@@ -39,7 +39,7 @@ static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
}
static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- ping_result *pr = arg;
+ ping_result *pr = (ping_result *)arg;
grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy,
pr, &pr->completion_storage);
}
@@ -49,7 +49,7 @@ void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4,
(channel, cq, tag, reserved));
grpc_transport_op *op = grpc_make_transport_op(NULL);
- ping_result *pr = gpr_malloc(sizeof(*pr));
+ ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr));
grpc_channel_element *top_elem =
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
diff --git a/src/core/lib/surface/channel_stack_type.c b/src/core/lib/surface/channel_stack_type.cc
index 5f5c877727..5f5c877727 100644
--- a/src/core/lib/surface/channel_stack_type.c
+++ b/src/core/lib/surface/channel_stack_type.cc
diff --git a/src/core/lib/surface/channel_stack_type.h b/src/core/lib/surface/channel_stack_type.h
index 3f0e14ffc0..c77848794c 100644
--- a/src/core/lib/surface/channel_stack_type.h
+++ b/src/core/lib/surface/channel_stack_type.h
@@ -21,6 +21,10 @@
#include <stdbool.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum {
// normal top-half client channel with load-balancing, connection management
GRPC_CLIENT_CHANNEL,
@@ -42,4 +46,8 @@ bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type);
const char *grpc_channel_stack_type_string(grpc_channel_stack_type type);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H */
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.cc
index 10e4e5ab0c..5009f786e6 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.cc
@@ -15,8 +15,11 @@
* limitations under the License.
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/surface/completion_queue.h"
+#include <inttypes.h>
#include <stdio.h>
#include <string.h>
@@ -25,7 +28,9 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
+#include <grpc/support/tls.h>
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@@ -44,6 +49,14 @@ grpc_tracer_flag grpc_trace_cq_refcount =
GRPC_TRACER_INITIALIZER(false, "cq_refcount");
#endif
+// Specifies a cq thread local cache.
+// The first event that occurs on a thread
+// with a cq cache will go into that cache, and
+// will only be returned on the thread that initialized the cache.
+// NOTE: Only one event will ever be cached.
+GPR_TLS_DECL(g_cached_event);
+GPR_TLS_DECL(g_cached_cq);
+
typedef struct {
grpc_pollset_worker **worker;
void *tag;
@@ -54,11 +67,10 @@ typedef struct {
bool can_listen;
size_t (*size)(void);
void (*init)(grpc_pollset *pollset, gpr_mu **mu);
- grpc_error *(*kick)(grpc_pollset *pollset,
+ grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, gpr_timespec now,
- gpr_timespec deadline);
+ grpc_pollset_worker **worker, grpc_millis deadline);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
@@ -96,8 +108,7 @@ static void non_polling_poller_destroy(grpc_exec_ctx *exec_ctx,
static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,
grpc_pollset_worker **worker,
- gpr_timespec now,
- gpr_timespec deadline) {
+ grpc_millis deadline) {
non_polling_poller *npp = (non_polling_poller *)pollset;
if (npp->shutdown) return GRPC_ERROR_NONE;
non_polling_worker w;
@@ -111,7 +122,10 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
w.next->prev = w.prev->next = &w;
}
w.kicked = false;
- while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline))
+ gpr_timespec deadline_ts =
+ grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME);
+ while (!npp->shutdown && !w.kicked &&
+ !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts))
;
if (&w == npp->root) {
npp->root = w.next;
@@ -130,7 +144,8 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
}
static grpc_error *non_polling_poller_kick(
- grpc_pollset *pollset, grpc_pollset_worker *specific_worker) {
+ grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
+ grpc_pollset_worker *specific_worker) {
non_polling_poller *p = (non_polling_poller *)pollset;
if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
if (specific_worker != NULL) {
@@ -162,32 +177,15 @@ static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx,
static const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
/* GRPC_CQ_DEFAULT_POLLING */
- {.can_get_pollset = true,
- .can_listen = true,
- .size = grpc_pollset_size,
- .init = grpc_pollset_init,
- .kick = grpc_pollset_kick,
- .work = grpc_pollset_work,
- .shutdown = grpc_pollset_shutdown,
- .destroy = grpc_pollset_destroy},
+ {true, true, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
+ grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
/* GRPC_CQ_NON_LISTENING */
- {.can_get_pollset = true,
- .can_listen = false,
- .size = grpc_pollset_size,
- .init = grpc_pollset_init,
- .kick = grpc_pollset_kick,
- .work = grpc_pollset_work,
- .shutdown = grpc_pollset_shutdown,
- .destroy = grpc_pollset_destroy},
+ {true, false, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
+ grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
/* GRPC_CQ_NON_POLLING */
- {.can_get_pollset = false,
- .can_listen = false,
- .size = non_polling_poller_size,
- .init = non_polling_poller_init,
- .kick = non_polling_poller_kick,
- .work = non_polling_poller_work,
- .shutdown = non_polling_poller_shutdown,
- .destroy = non_polling_poller_destroy},
+ {false, false, non_polling_poller_size, non_polling_poller_init,
+ non_polling_poller_kick, non_polling_poller_work,
+ non_polling_poller_shutdown, non_polling_poller_destroy},
};
typedef struct cq_vtable {
@@ -327,25 +325,12 @@ static void cq_destroy_pluck(void *data);
/* Completion queue vtables based on the completion-type */
static const cq_vtable g_cq_vtable[] = {
/* GRPC_CQ_NEXT */
- {.data_size = sizeof(cq_next_data),
- .cq_completion_type = GRPC_CQ_NEXT,
- .init = cq_init_next,
- .shutdown = cq_shutdown_next,
- .destroy = cq_destroy_next,
- .begin_op = cq_begin_op_for_next,
- .end_op = cq_end_op_for_next,
- .next = cq_next,
- .pluck = NULL},
+ {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
+ cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, NULL},
/* GRPC_CQ_PLUCK */
- {.data_size = sizeof(cq_pluck_data),
- .cq_completion_type = GRPC_CQ_PLUCK,
- .init = cq_init_pluck,
- .shutdown = cq_shutdown_pluck,
- .destroy = cq_destroy_pluck,
- .begin_op = cq_begin_op_for_pluck,
- .end_op = cq_end_op_for_pluck,
- .next = NULL,
- .pluck = cq_pluck},
+ {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
+ cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, NULL,
+ cq_pluck},
};
#define DATA_FROM_CQ(cq) ((void *)(cq + 1))
@@ -369,6 +354,46 @@ grpc_tracer_flag grpc_cq_event_timeout_trace =
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cq,
grpc_error *error);
+void grpc_cq_global_init() {
+ gpr_tls_init(&g_cached_event);
+ gpr_tls_init(&g_cached_cq);
+}
+
+void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue *cq) {
+ if ((grpc_completion_queue *)gpr_tls_get(&g_cached_cq) == nullptr) {
+ gpr_tls_set(&g_cached_event, (intptr_t)0);
+ gpr_tls_set(&g_cached_cq, (intptr_t)cq);
+ }
+}
+
+int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue *cq,
+ void **tag, int *ok) {
+ grpc_cq_completion *storage =
+ (grpc_cq_completion *)gpr_tls_get(&g_cached_event);
+ int ret = 0;
+ if (storage != NULL &&
+ (grpc_completion_queue *)gpr_tls_get(&g_cached_cq) == cq) {
+ *tag = storage->tag;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ storage->done(&exec_ctx, storage->done_arg, storage);
+ *ok = (storage->next & (uintptr_t)(1)) == 1;
+ ret = 1;
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+ if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
+ GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
+ gpr_mu_lock(cq->mu);
+ cq_finish_shutdown_next(&exec_ctx, cq);
+ gpr_mu_unlock(cq->mu);
+ GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "shutting_down");
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+ }
+ gpr_tls_set(&g_cached_event, (intptr_t)0);
+ gpr_tls_set(&g_cached_cq, (intptr_t)0);
+
+ return ret;
+}
+
static void cq_event_queue_init(grpc_cq_event_queue *q) {
gpr_mpscq_init(&q->queue);
q->queue_lock = GPR_SPINLOCK_INITIALIZER;
@@ -386,11 +411,24 @@ static bool cq_event_queue_push(grpc_cq_event_queue *q, grpc_cq_completion *c) {
static grpc_cq_completion *cq_event_queue_pop(grpc_cq_event_queue *q) {
grpc_cq_completion *c = NULL;
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+
if (gpr_spinlock_trylock(&q->queue_lock)) {
- c = (grpc_cq_completion *)gpr_mpscq_pop(&q->queue);
+ GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(&exec_ctx);
+
+ bool is_empty = false;
+ c = (grpc_cq_completion *)gpr_mpscq_pop_and_check_end(&q->queue, &is_empty);
gpr_spinlock_unlock(&q->queue_lock);
+
+ if (c == NULL && !is_empty) {
+ GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(&exec_ctx);
+ }
+ } else {
+ GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(&exec_ctx);
}
+ grpc_exec_ctx_finish(&exec_ctx);
+
if (c) {
gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1);
}
@@ -420,8 +458,13 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
const cq_poller_vtable *poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
- cq = gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +
- poller_vtable->size());
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
+ grpc_exec_ctx_finish(&exec_ctx);
+
+ cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
+ vtable->data_size +
+ poller_vtable->size());
cq->vtable = vtable;
cq->poller_vtable = poller_vtable;
@@ -441,7 +484,7 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
}
static void cq_init_next(void *ptr) {
- cq_next_data *cqd = ptr;
+ cq_next_data *cqd = (cq_next_data *)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->shutdown_called = false;
@@ -450,13 +493,13 @@ static void cq_init_next(void *ptr) {
}
static void cq_destroy_next(void *ptr) {
- cq_next_data *cqd = ptr;
+ cq_next_data *cqd = (cq_next_data *)ptr;
GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0);
cq_event_queue_destroy(&cqd->queue);
}
static void cq_init_pluck(void *ptr) {
- cq_pluck_data *cqd = ptr;
+ cq_pluck_data *cqd = (cq_pluck_data *)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->completed_tail = &cqd->completed_head;
@@ -468,7 +511,7 @@ static void cq_init_pluck(void *ptr) {
}
static void cq_destroy_pluck(void *ptr) {
- cq_pluck_data *cqd = ptr;
+ cq_pluck_data *cqd = (cq_pluck_data *)ptr;
GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head);
}
@@ -501,7 +544,7 @@ void grpc_cq_internal_ref(grpc_completion_queue *cq) {
static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_completion_queue *cq = arg;
+ grpc_completion_queue *cq = (grpc_completion_queue *)arg;
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy");
}
@@ -559,13 +602,13 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {}
* true if the increment was successful; false if the counter is zero */
static bool atm_inc_if_nonzero(gpr_atm *counter) {
while (true) {
- gpr_atm count = gpr_atm_no_barrier_load(counter);
+ gpr_atm count = gpr_atm_acq_load(counter);
/* If zero, we are done. If not, we must to a CAS (instead of an atomic
* increment) to maintain the contract: do not increment the counter if it
* is zero. */
if (count == 0) {
return false;
- } else if (gpr_atm_no_barrier_cas(counter, count, count + 1)) {
+ } else if (gpr_atm_full_cas(counter, count, count + 1)) {
break;
}
}
@@ -574,12 +617,12 @@ static bool atm_inc_if_nonzero(gpr_atm *counter) {
}
static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
@@ -588,9 +631,9 @@ bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) {
gpr_mu_lock(cq->mu);
if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) {
cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity);
- cq->outstanding_tags =
- gpr_realloc(cq->outstanding_tags, sizeof(*cq->outstanding_tags) *
- cq->outstanding_tag_capacity);
+ cq->outstanding_tags = (void **)gpr_realloc(
+ cq->outstanding_tags,
+ sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity);
}
cq->outstanding_tags[cq->outstanding_tag_count++] = tag;
gpr_mu_unlock(cq->mu);
@@ -623,8 +666,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
}
}
-
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
storage->tag = tag;
@@ -634,40 +676,50 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
cq_check_tag(cq, tag, true); /* Used in debug builds only */
- /* Add the completion to the queue */
- bool is_first = cq_event_queue_push(&cqd->queue, storage);
- gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
- bool will_definitely_shutdown =
- gpr_atm_no_barrier_load(&cqd->pending_events) == 1;
-
- if (!will_definitely_shutdown) {
- /* Only kick if this is the first item queued */
- if (is_first) {
- gpr_mu_lock(cq->mu);
- grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
- gpr_mu_unlock(cq->mu);
+ if ((grpc_completion_queue *)gpr_tls_get(&g_cached_cq) == cq &&
+ (grpc_cq_completion *)gpr_tls_get(&g_cached_event) == nullptr) {
+ gpr_tls_set(&g_cached_event, (intptr_t)storage);
+ } else {
+ /* Add the completion to the queue */
+ bool is_first = cq_event_queue_push(&cqd->queue, storage);
+ gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
+
+ /* Since we do not hold the cq lock here, it is important to do an 'acquire'
+ load here (instead of a 'no_barrier' load) to match with the release
+ store
+ (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next
+ */
+ bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1;
+
+ if (!will_definitely_shutdown) {
+ /* Only kick if this is the first item queued */
+ if (is_first) {
+ gpr_mu_lock(cq->mu);
+ grpc_error *kick_error =
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL);
+ gpr_mu_unlock(cq->mu);
- if (kick_error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(kick_error);
- gpr_log(GPR_ERROR, "Kick failed: %s", msg);
- GRPC_ERROR_UNREF(kick_error);
+ if (kick_error != GRPC_ERROR_NONE) {
+ const char *msg = grpc_error_string(kick_error);
+ gpr_log(GPR_ERROR, "Kick failed: %s", msg);
+ GRPC_ERROR_UNREF(kick_error);
+ }
}
- }
- if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
+ if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
+ GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
+ gpr_mu_lock(cq->mu);
+ cq_finish_shutdown_next(exec_ctx, cq);
+ gpr_mu_unlock(cq->mu);
+ GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
+ }
+ } else {
GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
+ gpr_atm_rel_store(&cqd->pending_events, 0);
gpr_mu_lock(cq->mu);
cq_finish_shutdown_next(exec_ctx, cq);
gpr_mu_unlock(cq->mu);
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
}
- } else {
- GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
- gpr_atm_rel_store(&cqd->pending_events, 0);
- gpr_mu_lock(cq->mu);
- cq_finish_shutdown_next(exec_ctx, cq);
- gpr_mu_unlock(cq->mu);
- GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
}
GPR_TIMER_END("cq_end_op_for_next", 0);
@@ -685,7 +737,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
void *done_arg,
grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0);
@@ -731,7 +783,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
}
grpc_error *kick_error =
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
+ cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
gpr_mu_unlock(cq->mu);
@@ -759,16 +811,16 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq,
typedef struct {
gpr_atm last_seen_things_queued_ever;
grpc_completion_queue *cq;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_cq_completion *stolen_completion;
void *tag; /* for pluck */
bool first_loop;
} cq_is_finished_arg;
static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = arg;
+ cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -788,8 +840,7 @@ static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
return true;
}
}
- return !a->first_loop &&
- gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+ return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
}
#ifndef NDEBUG
@@ -818,8 +869,7 @@ static void dump_pending_tags(grpc_completion_queue *cq) {}
static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
void *reserved) {
grpc_event ret;
- gpr_timespec now;
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -835,23 +885,20 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
dump_pending_tags(cq);
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
-
GRPC_CQ_INTERNAL_REF(cq, "next");
+ grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
cq_is_finished_arg is_finished_arg = {
- .last_seen_things_queued_ever =
- gpr_atm_no_barrier_load(&cqd->things_queued_ever),
- .cq = cq,
- .deadline = deadline,
- .stolen_completion = NULL,
- .tag = NULL,
- .first_loop = true};
+ gpr_atm_no_barrier_load(&cqd->things_queued_ever),
+ cq,
+ deadline_millis,
+ NULL,
+ NULL,
+ true};
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg);
-
for (;;) {
- gpr_timespec iteration_deadline = deadline;
+ grpc_millis iteration_deadline = deadline_millis;
if (is_finished_arg.stolen_completion != NULL) {
grpc_cq_completion *c = is_finished_arg.stolen_completion;
@@ -878,11 +925,11 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
attempt at popping. Not doing this can potentially deadlock this
thread forever (if the deadline is infinity) */
if (cq_event_queue_num_items(&cqd->queue) > 0) {
- iteration_deadline = gpr_time_0(GPR_CLOCK_MONOTONIC);
+ iteration_deadline = 0;
}
}
- if (gpr_atm_no_barrier_load(&cqd->pending_events) == 0) {
+ if (gpr_atm_acq_load(&cqd->pending_events) == 0) {
/* Before returning, check if the queue has any items left over (since
gpr_mpscq_pop() can sometimes return NULL even if the queue is not
empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */
@@ -899,8 +946,8 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
break;
}
- now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop &&
+ grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) {
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
dump_pending_tags(cq);
@@ -911,7 +958,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
gpr_mu_lock(cq->mu);
cq->num_polls++;
grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
- NULL, now, iteration_deadline);
+ NULL, iteration_deadline);
gpr_mu_unlock(cq->mu);
if (err != GRPC_ERROR_NONE) {
@@ -928,9 +975,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
}
if (cq_event_queue_num_items(&cqd->queue) > 0 &&
- gpr_atm_no_barrier_load(&cqd->pending_events) > 0) {
+ gpr_atm_acq_load(&cqd->pending_events) > 0) {
gpr_mu_lock(cq->mu);
- cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL);
+ cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
}
@@ -952,7 +999,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
this function */
static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
@@ -963,7 +1010,7 @@ static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_next_data *cqd = DATA_FROM_CQ(cq);
+ cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_next() below, that would call pollset shutdown.
@@ -979,6 +1026,9 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
return;
}
cqd->shutdown_called = true;
+ /* Doing a full_fetch_add (i.e acq/release) here to match with
+ * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
+ * on this counter without necessarily holding a lock on cq */
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
cq_finish_shutdown_next(exec_ctx, cq);
}
@@ -993,7 +1043,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
static int add_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
@@ -1005,7 +1055,7 @@ static int add_plucker(grpc_completion_queue *cq, void *tag,
static void del_plucker(grpc_completion_queue *cq, void *tag,
grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
cqd->num_pluckers--;
@@ -1017,9 +1067,9 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
}
static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = arg;
+ cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
grpc_completion_queue *cq = a->cq;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -1045,8 +1095,7 @@ static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
}
gpr_mu_unlock(cq->mu);
}
- return !a->first_loop &&
- gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0;
+ return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
}
static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
@@ -1055,8 +1104,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
grpc_cq_completion *c;
grpc_cq_completion *prev;
grpc_pollset_worker *worker = NULL;
- gpr_timespec now;
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -1074,18 +1122,16 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
dump_pending_tags(cq);
- deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
-
GRPC_CQ_INTERNAL_REF(cq, "pluck");
gpr_mu_lock(cq->mu);
+ grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
cq_is_finished_arg is_finished_arg = {
- .last_seen_things_queued_ever =
- gpr_atm_no_barrier_load(&cqd->things_queued_ever),
- .cq = cq,
- .deadline = deadline,
- .stolen_completion = NULL,
- .tag = tag,
- .first_loop = true};
+ gpr_atm_no_barrier_load(&cqd->things_queued_ever),
+ cq,
+ deadline_millis,
+ NULL,
+ tag,
+ true};
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, cq_is_pluck_finished, &is_finished_arg);
for (;;) {
@@ -1134,8 +1180,8 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
dump_pending_tags(cq);
break;
}
- now = gpr_now(GPR_CLOCK_MONOTONIC);
- if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) {
+ if (!is_finished_arg.first_loop &&
+ grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) {
del_plucker(cq, tag, &worker);
gpr_mu_unlock(cq->mu);
memset(&ret, 0, sizeof(ret));
@@ -1143,10 +1189,9 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
dump_pending_tags(cq);
break;
}
-
cq->num_polls++;
grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
- &worker, now, deadline);
+ &worker, deadline_millis);
if (err != GRPC_ERROR_NONE) {
del_plucker(cq, tag, &worker);
gpr_mu_unlock(cq->mu);
@@ -1180,7 +1225,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
@@ -1194,7 +1239,7 @@ static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
* merging them is a bit tricky and probably not worth it */
static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
grpc_completion_queue *cq) {
- cq_pluck_data *cqd = DATA_FROM_CQ(cq);
+ cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index 69d144bd95..c02bc5da07 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -70,6 +70,9 @@ void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc);
#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) grpc_cq_internal_unref(ec, cc)
#endif
+/* Initializes global variables used by completion queues */
+void grpc_cq_global_init();
+
/* Flag that an operation is beginning: the completion channel will not finish
shutdown until a corrensponding grpc_cq_end_* call is made.
\a tag is currently used only in debug builds. Return true on success, and
diff --git a/src/core/lib/surface/completion_queue_factory.c b/src/core/lib/surface/completion_queue_factory.cc
index aeecff5306..aeecff5306 100644
--- a/src/core/lib/surface/completion_queue_factory.c
+++ b/src/core/lib/surface/completion_queue_factory.cc
diff --git a/src/core/lib/surface/completion_queue_factory.h b/src/core/lib/surface/completion_queue_factory.h
index 89be8f8216..af8f3d60c3 100644
--- a/src/core/lib/surface/completion_queue_factory.h
+++ b/src/core/lib/surface/completion_queue_factory.h
@@ -22,6 +22,10 @@
#include <grpc/grpc.h>
#include "src/core/lib/surface/completion_queue.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_completion_queue_factory_vtable {
grpc_completion_queue* (*create)(const grpc_completion_queue_factory*,
const grpc_completion_queue_attributes*);
@@ -33,4 +37,8 @@ struct grpc_completion_queue_factory {
grpc_completion_queue_factory_vtable* vtable;
};
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H */
diff --git a/src/core/lib/surface/event_string.c b/src/core/lib/surface/event_string.cc
index f236272e2a..f236272e2a 100644
--- a/src/core/lib/surface/event_string.c
+++ b/src/core/lib/surface/event_string.cc
diff --git a/src/core/lib/surface/event_string.h b/src/core/lib/surface/event_string.h
index f00efca7f3..2d53cf0fac 100644
--- a/src/core/lib/surface/event_string.h
+++ b/src/core/lib/surface/event_string.h
@@ -21,7 +21,15 @@
#include <grpc/grpc.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Returns a string describing an event. Must be later freed with gpr_free() */
char *grpc_event_string(grpc_event *ev);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_EVENT_STRING_H */
diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.cc
index 898476daee..058e88f804 100644
--- a/src/core/lib/surface/init.c
+++ b/src/core/lib/surface/init.cc
@@ -31,10 +31,12 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/http/parser.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/resource_quota.h"
+#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/alarm_internal.h"
@@ -62,6 +64,7 @@ static void do_basic_init(void) {
gpr_log_verbosity_init();
gpr_mu_init(&g_init_mu);
grpc_register_built_in_plugins();
+ grpc_cq_global_init();
g_initializations = 0;
}
@@ -129,6 +132,7 @@ void grpc_init(void) {
grpc_register_tracer(&grpc_trace_channel_stack_builder);
grpc_register_tracer(&grpc_http1_trace);
grpc_register_tracer(&grpc_cq_pluck_trace); // default on
+ grpc_register_tracer(&grpc_call_combiner_trace);
grpc_register_tracer(&grpc_combiner_trace);
grpc_register_tracer(&grpc_server_channel_trace);
grpc_register_tracer(&grpc_bdp_estimator_trace);
@@ -177,14 +181,16 @@ void grpc_shutdown(void) {
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
- grpc_iomgr_shutdown(&exec_ctx);
- gpr_timers_global_destroy();
- grpc_tracer_shutdown();
+ grpc_executor_shutdown(&exec_ctx);
+ grpc_timer_manager_set_threading(false); // shutdown timer_manager thread
for (i = g_number_of_plugins; i >= 0; i--) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
+ grpc_iomgr_shutdown(&exec_ctx);
+ gpr_timers_global_destroy();
+ grpc_tracer_shutdown();
grpc_mdctx_global_shutdown(&exec_ctx);
grpc_handshaker_factory_registry_shutdown(&exec_ctx);
grpc_slice_intern_shutdown();
diff --git a/src/core/lib/surface/init.h b/src/core/lib/surface/init.h
index 9353208332..d429026327 100644
--- a/src/core/lib/surface/init.h
+++ b/src/core/lib/surface/init.h
@@ -19,9 +19,17 @@
#ifndef GRPC_CORE_LIB_SURFACE_INIT_H
#define GRPC_CORE_LIB_SURFACE_INIT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
void grpc_register_security_filters(void);
void grpc_security_pre_init(void);
void grpc_security_init(void);
int grpc_is_initialized(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_INIT_H */
diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.cc
index 2366c24910..8fbde3d1b4 100644
--- a/src/core/lib/surface/init_secure.c
+++ b/src/core/lib/surface/init_secure.cc
@@ -25,6 +25,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/security/credentials/credentials.h"
+#include "src/core/lib/security/credentials/plugin/plugin_credentials.h"
#include "src/core/lib/security/transport/auth_filters.h"
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/security/transport/security_connector.h"
@@ -84,4 +85,7 @@ void grpc_register_security_filters(void) {
maybe_prepend_server_auth_filter, NULL);
}
-void grpc_security_init() { grpc_security_register_handshaker_factories(); }
+void grpc_security_init() {
+ grpc_security_register_handshaker_factories();
+ grpc_register_tracer(&grpc_plugin_credentials_trace);
+}
diff --git a/src/core/lib/surface/init_unsecure.c b/src/core/lib/surface/init_unsecure.cc
index b852cab985..b852cab985 100644
--- a/src/core/lib/surface/init_unsecure.c
+++ b/src/core/lib/surface/init_unsecure.cc
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index a0791080a9..88e26cbeb7 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -40,6 +40,7 @@ namespace grpc_core {
namespace {
struct CallData {
+ grpc_call_combiner *call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
grpc_core::atomic<bool> filled_metadata;
@@ -52,14 +53,14 @@ struct ChannelData {
static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *mdb) {
- CallData *calld = static_cast<CallData *>(elem->call_data);
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) {
return;
}
- ChannelData *chand = static_cast<ChannelData *>(elem->channel_data);
+ ChannelData *chand = reinterpret_cast<ChannelData *>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
@@ -73,12 +74,13 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
mdb->list.head = &calld->status;
mdb->list.tail = &calld->details;
mdb->list.count = 2;
- mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ mdb->deadline = GRPC_MILLIS_INF_FUTURE;
}
static void lame_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
if (op->recv_initial_metadata) {
fill_metadata(exec_ctx, elem,
op->payload->recv_initial_metadata.recv_initial_metadata);
@@ -87,12 +89,8 @@ static void lame_start_transport_stream_op_batch(
op->payload->recv_trailing_metadata.recv_trailing_metadata);
}
grpc_transport_stream_op_batch_finish_with_failure(
- exec_ctx, op,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
-}
-
-static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return NULL;
+ exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"),
+ calld->call_combiner);
}
static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
@@ -122,6 +120,8 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
+ CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+ calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@@ -156,7 +156,6 @@ extern "C" const grpc_channel_filter grpc_lame_filter = {
sizeof(grpc_core::ChannelData),
grpc_core::init_channel_elem,
grpc_core::destroy_channel_elem,
- grpc_core::lame_get_peer,
grpc_core::lame_get_channel_info,
"lame-client",
};
@@ -176,7 +175,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
"error_message=%s)",
3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter);
- auto chand = static_cast<grpc_core::ChannelData *>(elem->channel_data);
+ auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data);
chand->error_code = error_code;
chand->error_message = error_message;
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/lame_client.h b/src/core/lib/surface/lame_client.h
index 3ce353f101..2f6f9cd046 100644
--- a/src/core/lib/surface/lame_client.h
+++ b/src/core/lib/surface/lame_client.h
@@ -21,6 +21,14 @@
#include "src/core/lib/channel/channel_stack.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_lame_filter;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H */
diff --git a/src/core/lib/surface/metadata_array.c b/src/core/lib/surface/metadata_array.cc
index 0afb8b4b87..0afb8b4b87 100644
--- a/src/core/lib/surface/metadata_array.c
+++ b/src/core/lib/surface/metadata_array.cc
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.cc
index 66dcc299aa..dd09cb91de 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.cc
@@ -29,6 +29,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
@@ -75,7 +76,7 @@ typedef struct requested_call {
grpc_call_details *details;
} batch;
struct {
- registered_method *registered_method;
+ registered_method *method;
gpr_timespec *deadline;
grpc_byte_buffer **optional_payload;
} registered;
@@ -136,7 +137,7 @@ struct call_data {
bool host_set;
grpc_slice path;
grpc_slice host;
- gpr_timespec deadline;
+ grpc_millis deadline;
grpc_completion_queue *cq_new;
@@ -144,7 +145,7 @@ struct call_data {
uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
- request_matcher *request_matcher;
+ request_matcher *matcher;
grpc_byte_buffer *payload;
grpc_closure got_initial_metadata;
@@ -170,7 +171,7 @@ struct registered_method {
grpc_server_register_method_payload_handling payload_handling;
uint32_t flags;
/* one request matcher per method */
- request_matcher request_matcher;
+ request_matcher matcher;
registered_method *next;
};
@@ -250,7 +251,8 @@ static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
count++;
}
cb->num_channels = count;
- cb->channels = gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
+ cb->channels =
+ (grpc_channel **)gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
count = 0;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
cb->channels[count++] = c->channel;
@@ -265,14 +267,15 @@ struct shutdown_cleanup_args {
static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- struct shutdown_cleanup_args *a = arg;
+ struct shutdown_cleanup_args *a = (struct shutdown_cleanup_args *)arg;
grpc_slice_unref_internal(exec_ctx, a->slice);
gpr_free(a);
}
static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
bool send_goaway, grpc_error *send_disconnect) {
- struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
+ struct shutdown_cleanup_args *sc =
+ (struct shutdown_cleanup_args *)gpr_malloc(sizeof(*sc));
GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
@@ -314,8 +317,8 @@ static void request_matcher_init(request_matcher *rm, size_t entries,
grpc_server *server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq =
- gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count);
+ rm->requests_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
}
@@ -331,7 +334,7 @@ static void request_matcher_destroy(request_matcher *rm) {
static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
grpc_error *error) {
- grpc_call_unref(grpc_call_from_top_element(elem));
+ grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem));
}
static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
@@ -384,7 +387,7 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
if (server->started) {
- request_matcher_destroy(&rm->request_matcher);
+ request_matcher_destroy(&rm->matcher);
}
gpr_free(rm->method);
gpr_free(rm->host);
@@ -426,7 +429,7 @@ static void orphan_channel(channel_data *chand) {
static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
grpc_server *server = chand->server;
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
server_unref(exec_ctx, server);
@@ -459,7 +462,7 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
grpc_cq_completion *c) {
- requested_call *rc = req;
+ requested_call *rc = (requested_call *)req;
grpc_server *server = rc->server;
if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
@@ -489,11 +492,13 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
GPR_ASSERT(calld->path_set);
rc->data.batch.details->host = grpc_slice_ref_internal(calld->host);
rc->data.batch.details->method = grpc_slice_ref_internal(calld->path);
- rc->data.batch.details->deadline = calld->deadline;
+ rc->data.batch.details->deadline =
+ grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
rc->data.batch.details->flags = calld->recv_initial_metadata_flags;
break;
case REGISTERED_CALL:
- *rc->data.registered.deadline = calld->deadline;
+ *rc->data.registered.deadline =
+ grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
if (rc->data.registered.optional_payload) {
*rc->data.registered.optional_payload = calld->payload;
calld->payload = NULL;
@@ -505,7 +510,7 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
grpc_call_element *elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
@@ -513,10 +518,10 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- grpc_call_element *call_elem = arg;
- call_data *calld = call_elem->call_data;
- channel_data *chand = call_elem->channel_data;
- request_matcher *rm = calld->request_matcher;
+ grpc_call_element *call_elem = (grpc_call_element *)arg;
+ call_data *calld = (call_data *)call_elem->call_data;
+ channel_data *chand = (channel_data *)call_elem->channel_data;
+ request_matcher *rm = calld->matcher;
grpc_server *server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
@@ -538,6 +543,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
if (request_id == -1) {
continue;
} else {
+ GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
@@ -548,6 +554,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
/* no cq to take the request found: queue it on the slow list */
+ GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
@@ -566,7 +573,7 @@ static void finish_start_new_rpc(
grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem,
request_matcher *rm,
grpc_server_register_method_payload_handling payload_handling) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_lock(&calld->mu_state);
@@ -578,7 +585,7 @@ static void finish_start_new_rpc(
return;
}
- calld->request_matcher = rm;
+ calld->matcher = rm;
switch (payload_handling) {
case GRPC_SRM_PAYLOAD_NONE:
@@ -599,8 +606,8 @@ static void finish_start_new_rpc(
}
static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_server *server = chand->server;
uint32_t i;
uint32_t hash;
@@ -624,7 +631,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -642,7 +649,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
continue;
}
finish_start_new_rpc(exec_ctx, server, elem,
- &rm->server_registered_method->request_matcher,
+ &rm->server_registered_method->matcher,
rm->server_registered_method->payload_handling);
return;
}
@@ -663,7 +670,7 @@ static int num_listeners(grpc_server *server) {
static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
grpc_cq_completion *completion) {
- server_unref(exec_ctx, server);
+ server_unref(exec_ctx, (grpc_server *)server);
}
static int num_channels(grpc_server *server) {
@@ -686,9 +693,9 @@ static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, &server->unregistered_request_matcher);
for (registered_method *rm = server->registered_methods; rm;
rm = rm->next) {
- request_matcher_kill_requests(exec_ctx, server, &rm->request_matcher,
+ request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
GRPC_ERROR_REF(error));
- request_matcher_zombify_all_pending_calls(exec_ctx, &rm->request_matcher);
+ request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher);
}
}
GRPC_ERROR_UNREF(error);
@@ -732,9 +739,9 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
- gpr_timespec op_deadline;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
+ grpc_millis op_deadline;
if (error == GRPC_ERROR_NONE) {
GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != NULL);
@@ -754,7 +761,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_ERROR_REF(error);
}
op_deadline = calld->recv_initial_metadata->deadline;
- if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) {
+ if (op_deadline != GRPC_MILLIS_INF_FUTURE) {
calld->deadline = op_deadline;
}
if (calld->host_set && calld->path_set) {
@@ -771,7 +778,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
static void server_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == NULL);
@@ -789,15 +796,14 @@ static void server_mutate_op(grpc_call_element *elem,
static void server_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
- GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
grpc_error *error) {
- grpc_call_element *elem = ptr;
- call_data *calld = elem->call_data;
+ grpc_call_element *elem = (grpc_call_element *)ptr;
+ call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
@@ -823,13 +829,13 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
grpc_transport *transport,
const void *transport_server_data) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
/* create a call */
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = chand->channel;
args.server_transport_data = transport_server_data;
- args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+ args.send_deadline = GRPC_MILLIS_INF_FUTURE;
grpc_call *call;
grpc_error *error = grpc_call_create(exec_ctx, &args, &call);
grpc_call_element *elem =
@@ -839,7 +845,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
GRPC_ERROR_UNREF(error);
return;
}
- call_data *calld = elem->call_data;
+ call_data *calld = (call_data *)elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
@@ -853,7 +859,7 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
grpc_error *error) {
- channel_data *chand = cd;
+ channel_data *chand = (channel_data *)cd;
grpc_server *server = chand->server;
if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
grpc_transport_op *op = grpc_make_transport_op(NULL);
@@ -874,10 +880,10 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
- call_data *calld = elem->call_data;
- channel_data *chand = elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
memset(calld, 0, sizeof(call_data));
- calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ calld->deadline = GRPC_MILLIS_INF_FUTURE;
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
@@ -892,8 +898,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
- channel_data *chand = elem->channel_data;
- call_data *calld = elem->call_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
+ call_data *calld = (call_data *)elem->call_data;
GPR_ASSERT(calld->state != PENDING);
@@ -914,7 +920,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_first);
GPR_ASSERT(!args->is_last);
chand->server = NULL;
@@ -931,7 +937,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
size_t i;
- channel_data *chand = elem->channel_data;
+ channel_data *chand = (channel_data *)elem->channel_data;
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method);
@@ -962,7 +968,6 @@ const grpc_channel_filter grpc_server_top_filter = {
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
- grpc_call_next_get_peer,
grpc_channel_next_get_info,
"server",
};
@@ -978,8 +983,8 @@ static void register_completion_queue(grpc_server *server,
GRPC_CQ_INTERNAL_REF(cq, "server");
n = server->cq_count++;
- server->cqs = gpr_realloc(server->cqs,
- server->cq_count * sizeof(grpc_completion_queue *));
+ server->cqs = (grpc_completion_queue **)gpr_realloc(
+ server->cqs, server->cq_count * sizeof(grpc_completion_queue *));
server->cqs[n] = cq;
}
@@ -1004,7 +1009,7 @@ void grpc_server_register_completion_queue(grpc_server *server,
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
- grpc_server *server = gpr_zalloc(sizeof(grpc_server));
+ grpc_server *server = (grpc_server *)gpr_zalloc(sizeof(grpc_server));
gpr_mu_init(&server->mu_global);
gpr_mu_init(&server->mu_call);
@@ -1055,7 +1060,7 @@ void *grpc_server_register_method(
flags);
return NULL;
}
- m = gpr_zalloc(sizeof(registered_method));
+ m = (registered_method *)gpr_zalloc(sizeof(registered_method));
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@@ -1067,7 +1072,7 @@ void *grpc_server_register_method(
static void start_listeners(grpc_exec_ctx *exec_ctx, void *s,
grpc_error *error) {
- grpc_server *server = s;
+ grpc_server *server = (grpc_server *)s;
for (listener *l = server->listeners; l; l = l->next) {
l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count);
}
@@ -1088,11 +1093,12 @@ void grpc_server_start(grpc_server *server) {
server->started = true;
server->pollset_count = 0;
- server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
- server->request_freelist_per_cq =
- gpr_malloc(sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq =
- gpr_malloc(sizeof(*server->requested_calls_per_cq) * server->cq_count);
+ server->pollsets =
+ (grpc_pollset **)gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
+ server->request_freelist_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ sizeof(*server->request_freelist_per_cq) * server->cq_count);
+ server->requested_calls_per_cq = (requested_call **)gpr_malloc(
+ sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
server->pollsets[server->pollset_count++] =
@@ -1103,22 +1109,24 @@ void grpc_server_start(grpc_server *server) {
for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
}
- server->requested_calls_per_cq[i] =
- gpr_malloc((size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
+ server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc(
+ (size_t)server->max_requested_calls_per_cq *
+ sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
- request_matcher_init(&rm->request_matcher,
+ request_matcher_init(&rm->matcher,
(size_t)server->max_requested_calls_per_cq, server);
}
server_ref(server);
server->starting = true;
- GRPC_CLOSURE_SCHED(&exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server,
- grpc_executor_scheduler),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ &exec_ctx,
+ GRPC_CLOSURE_CREATE(start_listeners, server,
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
+ GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -1173,7 +1181,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
if (num_registered_methods > 0) {
slots = 2 * num_registered_methods;
alloc = sizeof(channel_registered_method) * slots;
- chand->registered_methods = gpr_zalloc(alloc);
+ chand->registered_methods = (channel_registered_method *)gpr_zalloc(alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
grpc_slice host;
bool has_host;
@@ -1234,7 +1242,7 @@ void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
grpc_error *error) {
- grpc_server *server = s;
+ grpc_server *server = (grpc_server *)s;
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;
maybe_finish_shutdown(exec_ctx, server);
@@ -1261,14 +1269,15 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
- grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
- NULL, gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(
+ &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
+ (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
}
- server->shutdown_tags =
- gpr_realloc(server->shutdown_tags,
- sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
+ server->shutdown_tags = (shutdown_tag *)gpr_realloc(
+ server->shutdown_tags,
+ sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
@@ -1351,7 +1360,7 @@ void grpc_server_add_listener(
grpc_pollset **pollsets, size_t pollset_count),
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
grpc_closure *on_done)) {
- listener *l = gpr_malloc(sizeof(listener));
+ listener *l = (listener *)gpr_malloc(sizeof(listener));
l->arg = arg;
l->start = start;
l->destroy = destroy;
@@ -1384,7 +1393,7 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
rm = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
- rm = &rc->data.registered.registered_method->request_matcher;
+ rm = &rc->data.registered.method->matcher;
break;
}
server->requested_calls_per_cq[cq_idx][request_id] = *rc;
@@ -1428,7 +1437,8 @@ grpc_call_error grpc_server_request_call(
grpc_completion_queue *cq_for_notification, void *tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = gpr_malloc(sizeof(*rc));
+ requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@@ -1473,8 +1483,9 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue *cq_for_notification, void *tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = gpr_malloc(sizeof(*rc));
- registered_method *rm = rmp;
+ requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ registered_method *rm = (registered_method *)rmp;
+ GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@@ -1511,7 +1522,7 @@ grpc_call_error grpc_server_request_registered_call(
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->call = call;
- rc->data.registered.registered_method = rm;
+ rc->data.registered.method = rm;
rc->data.registered.deadline = deadline;
rc->initial_metadata = initial_metadata;
rc->data.registered.optional_payload = optional_payload;
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index dd5639d97a..375eab4a04 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -24,6 +24,10 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
extern const grpc_channel_filter grpc_server_top_filter;
/** Lightweight tracing of server channel state */
@@ -54,4 +58,8 @@ int grpc_server_has_open_connections(grpc_server *server);
void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets,
size_t *pollset_count);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_SERVER_H */
diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.cc
index 61209ae48d..81d07fae44 100644
--- a/src/core/lib/surface/validate_metadata.c
+++ b/src/core/lib/surface/validate_metadata.cc
@@ -26,6 +26,7 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/surface/validate_metadata.h"
static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits,
const char *err_desc) {
diff --git a/src/core/lib/surface/validate_metadata.h b/src/core/lib/surface/validate_metadata.h
index de869d89b4..afc8be6dfd 100644
--- a/src/core/lib/surface/validate_metadata.h
+++ b/src/core/lib/surface/validate_metadata.h
@@ -22,7 +22,15 @@
#include <grpc/slice.h>
#include "src/core/lib/iomgr/error.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice);
grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H */
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.cc
index 96c16105e7..6cb8e7e1a0 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.cc
@@ -21,6 +21,6 @@
#include <grpc/grpc.h>
-const char *grpc_version_string(void) { return "4.0.0-dev"; }
+const char *grpc_version_string(void) { return "5.0.0-dev"; }
-const char *grpc_g_stands_for(void) { return "gambit"; }
+const char *grpc_g_stands_for(void) { return "generous"; }
diff --git a/src/core/lib/transport/bdp_estimator.c b/src/core/lib/transport/bdp_estimator.c
deleted file mode 100644
index 8b57693413..0000000000
--- a/src/core/lib/transport/bdp_estimator.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/transport/bdp_estimator.h"
-
-#include <stdlib.h>
-
-#include <grpc/support/log.h>
-#include <grpc/support/useful.h>
-
-grpc_tracer_flag grpc_bdp_estimator_trace =
- GRPC_TRACER_INITIALIZER(false, "bdp_estimator");
-
-void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name) {
- estimator->estimate = 65536;
- estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED;
- estimator->name = name;
- estimator->bw_est = 0;
-}
-
-bool grpc_bdp_estimator_get_estimate(const grpc_bdp_estimator *estimator,
- int64_t *estimate) {
- *estimate = estimator->estimate;
- return true;
-}
-
-bool grpc_bdp_estimator_get_bw(const grpc_bdp_estimator *estimator,
- double *bw) {
- *bw = estimator->bw_est;
- return true;
-}
-
-void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator,
- int64_t num_bytes) {
- estimator->accumulator += num_bytes;
-}
-
-bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator) {
- switch (estimator->ping_state) {
- case GRPC_BDP_PING_UNSCHEDULED:
- return true;
- case GRPC_BDP_PING_SCHEDULED:
- return false;
- case GRPC_BDP_PING_STARTED:
- return false;
- }
- GPR_UNREACHABLE_CODE(return false);
-}
-
-void grpc_bdp_estimator_schedule_ping(grpc_bdp_estimator *estimator) {
- if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64,
- estimator->name, estimator->accumulator, estimator->estimate);
- }
- GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_UNSCHEDULED);
- estimator->ping_state = GRPC_BDP_PING_SCHEDULED;
- estimator->accumulator = 0;
-}
-
-void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator) {
- if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64,
- estimator->name, estimator->accumulator, estimator->estimate);
- }
- GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_SCHEDULED);
- estimator->ping_state = GRPC_BDP_PING_STARTED;
- estimator->accumulator = 0;
- estimator->ping_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
-}
-
-void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) {
- gpr_timespec dt_ts =
- gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), estimator->ping_start_time);
- double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec;
- double bw = dt > 0 ? ((double)estimator->accumulator / dt) : 0;
- if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
- " dt=%lf bw=%lfMbs bw_est=%lfMbs",
- estimator->name, estimator->accumulator, estimator->estimate, dt,
- bw / 125000.0, estimator->bw_est / 125000.0);
- }
- GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_STARTED);
- if (estimator->accumulator > 2 * estimator->estimate / 3 &&
- bw > estimator->bw_est) {
- estimator->estimate =
- GPR_MAX(estimator->accumulator, estimator->estimate * 2);
- estimator->bw_est = bw;
- if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64,
- estimator->name, estimator->estimate);
- }
- }
- estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED;
- estimator->accumulator = 0;
-}
diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc
new file mode 100644
index 0000000000..f1597014b1
--- /dev/null
+++ b/src/core/lib/transport/bdp_estimator.cc
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/transport/bdp_estimator.h"
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include <grpc/support/useful.h>
+
+grpc_tracer_flag grpc_bdp_estimator_trace =
+ GRPC_TRACER_INITIALIZER(false, "bdp_estimator");
+
+namespace grpc_core {
+
+BdpEstimator::BdpEstimator(const char *name)
+ : ping_state_(PingState::UNSCHEDULED),
+ accumulator_(0),
+ estimate_(65536),
+ ping_start_time_(gpr_time_0(GPR_CLOCK_MONOTONIC)),
+ inter_ping_delay_(100.0), // start at 100ms
+ stable_estimate_count_(0),
+ bw_est_(0),
+ name_(name) {}
+
+grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx *exec_ctx) {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec dt_ts = gpr_time_sub(now, ping_start_time_);
+ double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec;
+ double bw = dt > 0 ? ((double)accumulator_ / dt) : 0;
+ int start_inter_ping_delay = inter_ping_delay_;
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
+ " dt=%lf bw=%lfMbs bw_est=%lfMbs",
+ name_, accumulator_, estimate_, dt, bw / 125000.0,
+ bw_est_ / 125000.0);
+ }
+ GPR_ASSERT(ping_state_ == PingState::STARTED);
+ if (accumulator_ > 2 * estimate_ / 3 && bw > bw_est_) {
+ estimate_ = GPR_MAX(accumulator_, estimate_ * 2);
+ bw_est_ = bw;
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, name_,
+ estimate_);
+ }
+ inter_ping_delay_ /= 2; // if the ping estimate changes,
+ // exponentially get faster at probing
+ } else if (inter_ping_delay_ < 10000) {
+ stable_estimate_count_++;
+ if (stable_estimate_count_ >= 2) {
+ inter_ping_delay_ +=
+ 100 +
+ (int)(rand() * 100.0 / RAND_MAX); // if the ping estimate is steady,
+ // slowly ramp down the probe time
+ }
+ }
+ if (start_inter_ping_delay != inter_ping_delay_) {
+ stable_estimate_count_ = 0;
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "bdp[%s]:update_inter_time to %dms", name_,
+ inter_ping_delay_);
+ }
+ }
+ ping_state_ = PingState::UNSCHEDULED;
+ accumulator_ = 0;
+ return grpc_exec_ctx_now(exec_ctx) + inter_ping_delay_;
+}
+
+} // namespace grpc_core
diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h
index 1ef0dc99dd..750da39599 100644
--- a/src/core/lib/transport/bdp_estimator.h
+++ b/src/core/lib/transport/bdp_estimator.h
@@ -19,51 +19,76 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H
#define GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H
-#include <grpc/support/time.h>
+#include <grpc/support/port_platform.h>
+
+#include <inttypes.h>
#include <stdbool.h>
#include <stdint.h>
-#include "src/core/lib/debug/trace.h"
-#define GRPC_BDP_SAMPLES 16
-#define GRPC_BDP_MIN_SAMPLES_FOR_ESTIMATE 3
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
extern grpc_tracer_flag grpc_bdp_estimator_trace;
-typedef enum {
- GRPC_BDP_PING_UNSCHEDULED,
- GRPC_BDP_PING_SCHEDULED,
- GRPC_BDP_PING_STARTED
-} grpc_bdp_estimator_ping_state;
-
-typedef struct grpc_bdp_estimator {
- grpc_bdp_estimator_ping_state ping_state;
- int64_t accumulator;
- int64_t estimate;
- gpr_timespec ping_start_time;
- double bw_est;
- const char *name;
-} grpc_bdp_estimator;
-
-void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name);
-
-// Returns true if a reasonable estimate could be obtained
-bool grpc_bdp_estimator_get_estimate(const grpc_bdp_estimator *estimator,
- int64_t *estimate);
-// Tracks new bytes read.
-bool grpc_bdp_estimator_get_bw(const grpc_bdp_estimator *estimator, double *bw);
-// Returns true if the user should schedule a ping
-void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator,
- int64_t num_bytes);
-// Returns true if the user should schedule a ping
-bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator);
-// Schedule a ping: call in response to receiving a true from
-// grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a
-// transport (but not necessarily started)
-void grpc_bdp_estimator_schedule_ping(grpc_bdp_estimator *estimator);
-// Start a ping: call after calling grpc_bdp_estimator_schedule_ping and once
-// the ping is on the wire
-void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator);
-// Completes a previously started ping
-void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator);
+namespace grpc_core {
+
+class BdpEstimator {
+ public:
+ explicit BdpEstimator(const char *name);
+ ~BdpEstimator() {}
+
+ int64_t EstimateBdp() const { return estimate_; }
+ double EstimateBandwidth() const { return bw_est_; }
+
+ void AddIncomingBytes(int64_t num_bytes) { accumulator_ += num_bytes; }
+
+ // Schedule a ping: call in response to receiving a true from
+ // grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a
+ // transport (but not necessarily started)
+ void SchedulePing() {
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
+ accumulator_, estimate_);
+ }
+ GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED);
+ ping_state_ = PingState::SCHEDULED;
+ accumulator_ = 0;
+ }
+
+ // Start a ping: call after calling grpc_bdp_estimator_schedule_ping and
+ // once
+ // the ping is on the wire
+ void StartPing() {
+ if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
+ accumulator_, estimate_);
+ }
+ GPR_ASSERT(ping_state_ == PingState::SCHEDULED);
+ ping_state_ = PingState::STARTED;
+ accumulator_ = 0;
+ ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);
+ }
+
+ // Completes a previously started ping, returns when to schedule the next one
+ grpc_millis CompletePing(grpc_exec_ctx *exec_ctx);
+
+ private:
+ enum class PingState { UNSCHEDULED, SCHEDULED, STARTED };
+
+ PingState ping_state_;
+ int64_t accumulator_;
+ int64_t estimate_;
+ // when was the current ping started?
+ gpr_timespec ping_start_time_;
+ int inter_ping_delay_;
+ int stable_estimate_count_;
+ double bw_est_;
+ const char *name_;
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H */
diff --git a/src/core/lib/transport/byte_stream.c b/src/core/lib/transport/byte_stream.cc
index 08f61629a9..08f61629a9 100644
--- a/src/core/lib/transport/byte_stream.c
+++ b/src/core/lib/transport/byte_stream.cc
diff --git a/src/core/lib/transport/byte_stream.h b/src/core/lib/transport/byte_stream.h
index be2a35213e..c1d8ee543f 100644
--- a/src/core/lib/transport/byte_stream.h
+++ b/src/core/lib/transport/byte_stream.h
@@ -28,6 +28,10 @@
/** Mask of all valid internal flags. */
#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_byte_stream grpc_byte_stream;
typedef struct {
@@ -135,4 +139,8 @@ void grpc_caching_byte_stream_init(grpc_caching_byte_stream *stream,
// Resets the byte stream to the start of the underlying stream.
void grpc_caching_byte_stream_reset(grpc_caching_byte_stream *stream);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H */
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.cc
index 73a9178ae2..652c26cf0a 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.cc
@@ -29,8 +29,6 @@ grpc_tracer_flag grpc_connectivity_state_trace =
const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
switch (state) {
- case GRPC_CHANNEL_INIT:
- return "INIT";
case GRPC_CHANNEL_IDLE:
return "IDLE";
case GRPC_CHANNEL_CONNECTING:
@@ -148,7 +146,8 @@ bool grpc_connectivity_state_notify_on_state_change(
GRPC_CLOSURE_SCHED(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error));
} else {
- grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
+ grpc_connectivity_state_watcher *w =
+ (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w));
w->current = current;
w->notify = notify;
w->next = tracker->watchers;
@@ -173,7 +172,6 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_name(state), reason, error, error_string);
}
switch (state) {
- case GRPC_CHANNEL_INIT:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:
diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h
index 2fece6cc21..c0ba188148 100644
--- a/src/core/lib/transport/connectivity_state.h
+++ b/src/core/lib/transport/connectivity_state.h
@@ -23,6 +23,10 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/exec_ctx.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_connectivity_state_watcher {
/** we keep watchers in a linked list */
struct grpc_connectivity_state_watcher *next;
@@ -84,4 +88,8 @@ bool grpc_connectivity_state_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
grpc_connectivity_state *current, grpc_closure *notify);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_TRANSPORT_CONNECTIVITY_STATE_H */
diff --git a/src/core/lib/transport/error_utils.c b/src/core/lib/transport/error_utils.cc
index 5e3920b627..2e3b61b7ab 100644
--- a/src/core/lib/transport/error_utils.c
+++ b/src/core/lib/transport/error_utils.cc
@@ -39,8 +39,9 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error,
return NULL;
}
-void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
- grpc_status_code *code, grpc_slice *slice,
+void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
+ grpc_millis deadline, grpc_status_code *code,
+ grpc_slice *slice,
grpc_http2_error_code *http_error) {
// Start with the parent error and recurse through the tree of children
// until we find the first one that has a status code.
@@ -63,8 +64,8 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
status = (grpc_status_code)integer;
} else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR,
&integer)) {
- status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer,
- deadline);
+ status = grpc_http2_error_to_grpc_status(
+ exec_ctx, (grpc_http2_error_code)integer, deadline);
}
if (code != NULL) *code = status;
diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h
index e530884215..b4f9df4bf1 100644
--- a/src/core/lib/transport/error_utils.h
+++ b/src/core/lib/transport/error_utils.h
@@ -20,16 +20,22 @@
#define GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H
#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/http2_errors.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/// A utility function to get the status code and message to be returned
/// to the application. If not set in the top-level message, looks
/// through child errors until it finds the first one with these attributes.
/// All attributes are pulled from the same child error. If any of the
/// attributes (code, msg, http_status) are unneeded, they can be passed as
/// NULL.
-void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
- grpc_status_code *code, grpc_slice *slice,
+void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
+ grpc_millis deadline, grpc_status_code *code,
+ grpc_slice *slice,
grpc_http2_error_code *http_status);
/// A utility function to check whether there is a clear status code that
@@ -38,4 +44,8 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
/// GRPC_ERROR_CANCELLED
bool grpc_error_has_clear_grpc_status(grpc_error *error);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H */
diff --git a/src/core/lib/transport/metadata.c b/src/core/lib/transport/metadata.cc
index 2fea366072..2392f26c0b 100644
--- a/src/core/lib/transport/metadata.c
+++ b/src/core/lib/transport/metadata.cc
@@ -19,6 +19,7 @@
#include "src/core/lib/transport/metadata.h"
#include <assert.h>
+#include <inttypes.h>
#include <stddef.h>
#include <string.h>
@@ -117,7 +118,8 @@ void grpc_mdctx_global_init(void) {
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->elems = gpr_zalloc(sizeof(*shard->elems) * shard->capacity);
+ shard->elems = (interned_metadata **)gpr_zalloc(sizeof(*shard->elems) *
+ shard->capacity);
}
}
@@ -204,7 +206,8 @@ static void grow_mdtab(mdtab_shard *shard) {
GPR_TIMER_BEGIN("grow_mdtab", 0);
- mdtab = gpr_zalloc(sizeof(interned_metadata *) * capacity);
+ mdtab =
+ (interned_metadata **)gpr_zalloc(sizeof(interned_metadata *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
@@ -243,7 +246,8 @@ grpc_mdelem grpc_mdelem_create(
GRPC_MDELEM_STORAGE_EXTERNAL);
}
- allocated_metadata *allocated = gpr_malloc(sizeof(*allocated));
+ allocated_metadata *allocated =
+ (allocated_metadata *)gpr_malloc(sizeof(*allocated));
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
@@ -292,7 +296,7 @@ grpc_mdelem grpc_mdelem_create(
}
/* not found: create a new pair */
- md = gpr_malloc(sizeof(interned_metadata));
+ md = (interned_metadata *)gpr_malloc(sizeof(interned_metadata));
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
@@ -348,11 +352,14 @@ static size_t get_base64_encoded_size(size_t raw_length) {
return raw_length / 3 * 4 + tail_xtra[raw_length % 3];
}
-size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem) {
+size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem,
+ bool use_true_binary_metadata) {
size_t overhead_and_key = 32 + GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
size_t value_len = GRPC_SLICE_LENGTH(GRPC_MDVALUE(elem));
if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
- return overhead_and_key + get_base64_encoded_size(value_len);
+ return overhead_and_key + (use_true_binary_metadata
+ ? value_len + 1
+ : get_base64_encoded_size(value_len));
} else {
return overhead_and_key + value_len;
}
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 974469e436..3f1032ab8a 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -102,8 +102,13 @@ struct grpc_mdelem {
((grpc_mdelem_data *)((md).payload & ~(uintptr_t)3))
#define GRPC_MDELEM_STORAGE(md) \
((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
+#ifdef __cplusplus
+#define GRPC_MAKE_MDELEM(data, storage) \
+ (grpc_mdelem{((uintptr_t)(data)) | ((uintptr_t)storage)})
+#else
#define GRPC_MAKE_MDELEM(data, storage) \
((grpc_mdelem){((uintptr_t)(data)) | ((uintptr_t)storage)})
+#endif
#define GRPC_MDELEM_IS_INTERNED(md) \
((grpc_mdelem_data_storage)((md).payload & \
(uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
@@ -127,7 +132,8 @@ grpc_mdelem grpc_mdelem_create(
bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b);
-size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem);
+size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem,
+ bool use_true_binary_metadata);
/* Mutator and accessor for grpc_mdelem user data. The destructor function
is used as a type tag and is checked during user_data fetch. */
diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.cc
index a077052561..2df9c9189c 100644
--- a/src/core/lib/transport/metadata_batch.c
+++ b/src/core/lib/transport/metadata_batch.cc
@@ -74,7 +74,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
memset(batch, 0, sizeof(*batch));
- batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ batch->deadline = GRPC_MILLIS_INF_FUTURE;
}
void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx,
@@ -233,32 +233,32 @@ void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem *storage,
grpc_slice value) {
- grpc_mdelem old = storage->md;
- grpc_mdelem new = grpc_mdelem_from_slices(
- exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old)), value);
- storage->md = new;
- GRPC_MDELEM_UNREF(exec_ctx, old);
+ grpc_mdelem old_mdelem = storage->md;
+ grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
+ exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
+ storage->md = new_mdelem;
+ GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
}
grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
grpc_metadata_batch *batch,
grpc_linked_mdelem *storage,
- grpc_mdelem new) {
+ grpc_mdelem new_mdelem) {
assert_valid_callouts(exec_ctx, batch);
grpc_error *error = GRPC_ERROR_NONE;
- grpc_mdelem old = storage->md;
- if (!grpc_slice_eq(GRPC_MDKEY(new), GRPC_MDKEY(old))) {
+ grpc_mdelem old_mdelem = storage->md;
+ if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
maybe_unlink_callout(batch, storage);
- storage->md = new;
+ storage->md = new_mdelem;
error = maybe_link_callout(batch, storage);
if (error != GRPC_ERROR_NONE) {
unlink_storage(&batch->list, storage);
GRPC_MDELEM_UNREF(exec_ctx, storage->md);
}
} else {
- storage->md = new;
+ storage->md = new_mdelem;
}
- GRPC_MDELEM_UNREF(exec_ctx, old);
+ GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
assert_valid_callouts(exec_ctx, batch);
return error;
}
@@ -270,9 +270,7 @@ void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx,
}
bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
- return batch->list.head == NULL &&
- gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type),
- batch->deadline) == 0;
+ return batch->list.head == NULL && batch->deadline == GRPC_MILLIS_INF_FUTURE;
}
size_t grpc_metadata_batch_size(grpc_metadata_batch *batch) {
@@ -302,12 +300,12 @@ grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
grpc_error *error = GRPC_ERROR_NONE;
while (l) {
grpc_linked_mdelem *next = l->next;
- grpc_filtered_mdelem new = func(exec_ctx, user_data, l->md);
- add_error(&error, new.error, composite_error_string);
- if (GRPC_MDISNULL(new.md)) {
+ grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md);
+ add_error(&error, new_mdelem.error, composite_error_string);
+ if (GRPC_MDISNULL(new_mdelem.md)) {
grpc_metadata_batch_remove(exec_ctx, batch, l);
- } else if (new.md.payload != l->md.payload) {
- grpc_metadata_batch_substitute(exec_ctx, batch, l, new.md);
+ } else if (new_mdelem.md.payload != l->md.payload) {
+ grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md);
}
l = next;
}
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 57d298c75c..a2b4b92385 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -51,9 +51,9 @@ typedef struct grpc_metadata_batch {
grpc_mdelem_list list;
grpc_metadata_batch_callouts idx;
/** Used to calculate grpc-timeout at the point of sending,
- or gpr_inf_future if this batch does not need to send a
+ or GRPC_MILLIS_INF_FUTURE if this batch does not need to send a
grpc-timeout */
- gpr_timespec deadline;
+ grpc_millis deadline;
} grpc_metadata_batch;
void grpc_metadata_batch_init(grpc_metadata_batch *batch);
@@ -125,10 +125,11 @@ typedef struct {
} grpc_filtered_mdelem;
#define GRPC_FILTERED_ERROR(error) \
- ((grpc_filtered_mdelem){(error), GRPC_MDNULL})
-#define GRPC_FILTERED_MDELEM(md) ((grpc_filtered_mdelem){GRPC_ERROR_NONE, (md)})
+ { (error), GRPC_MDNULL }
+#define GRPC_FILTERED_MDELEM(md) \
+ { GRPC_ERROR_NONE, (md) }
#define GRPC_FILTERED_REMOVE() \
- ((grpc_filtered_mdelem){GRPC_ERROR_NONE, GRPC_MDNULL})
+ { GRPC_ERROR_NONE, GRPC_MDNULL }
typedef grpc_filtered_mdelem (*grpc_metadata_batch_filter_func)(
grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem elem);
diff --git a/src/core/lib/transport/pid_controller.c b/src/core/lib/transport/pid_controller.c
deleted file mode 100644
index 4b304f17b2..0000000000
--- a/src/core/lib/transport/pid_controller.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/transport/pid_controller.h"
-#include <grpc/support/useful.h>
-
-void grpc_pid_controller_init(grpc_pid_controller *pid_controller,
- grpc_pid_controller_args args) {
- pid_controller->args = args;
- pid_controller->last_control_value = args.initial_control_value;
- grpc_pid_controller_reset(pid_controller);
-}
-
-void grpc_pid_controller_reset(grpc_pid_controller *pid_controller) {
- pid_controller->last_error = 0.0;
- pid_controller->last_dc_dt = 0.0;
- pid_controller->error_integral = 0.0;
-}
-
-double grpc_pid_controller_update(grpc_pid_controller *pid_controller,
- double error, double dt) {
- if (dt == 0) return pid_controller->last_control_value;
- /* integrate error using the trapezoid rule */
- pid_controller->error_integral +=
- dt * (pid_controller->last_error + error) * 0.5;
- pid_controller->error_integral = GPR_CLAMP(
- pid_controller->error_integral, -pid_controller->args.integral_range,
- pid_controller->args.integral_range);
- double diff_error = (error - pid_controller->last_error) / dt;
- /* calculate derivative of control value vs time */
- double dc_dt = pid_controller->args.gain_p * error +
- pid_controller->args.gain_i * pid_controller->error_integral +
- pid_controller->args.gain_d * diff_error;
- /* and perform trapezoidal integration */
- double new_control_value = pid_controller->last_control_value +
- dt * (pid_controller->last_dc_dt + dc_dt) * 0.5;
- new_control_value =
- GPR_CLAMP(new_control_value, pid_controller->args.min_control_value,
- pid_controller->args.max_control_value);
- pid_controller->last_error = error;
- pid_controller->last_dc_dt = dc_dt;
- pid_controller->last_control_value = new_control_value;
- return new_control_value;
-}
-
-double grpc_pid_controller_last(grpc_pid_controller *pid_controller) {
- return pid_controller->last_control_value;
-}
diff --git a/src/core/lib/transport/pid_controller.cc b/src/core/lib/transport/pid_controller.cc
new file mode 100644
index 0000000000..9f7750d693
--- /dev/null
+++ b/src/core/lib/transport/pid_controller.cc
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/transport/pid_controller.h"
+#include <grpc/support/useful.h>
+
+namespace grpc_core {
+
+PidController::PidController(const Args &args)
+ : last_control_value_(args.initial_control_value()), args_(args) {}
+
+double PidController::Update(double error, double dt) {
+ if (dt <= 0) return last_control_value_;
+ /* integrate error using the trapezoid rule */
+ error_integral_ += dt * (last_error_ + error) * 0.5;
+ error_integral_ = GPR_CLAMP(error_integral_, -args_.integral_range(),
+ args_.integral_range());
+ double diff_error = (error - last_error_) / dt;
+ /* calculate derivative of control value vs time */
+ double dc_dt = args_.gain_p() * error + args_.gain_i() * error_integral_ +
+ args_.gain_d() * diff_error;
+ /* and perform trapezoidal integration */
+ double new_control_value =
+ last_control_value_ + dt * (last_dc_dt_ + dc_dt) * 0.5;
+ new_control_value = GPR_CLAMP(new_control_value, args_.min_control_value(),
+ args_.max_control_value());
+ last_error_ = error;
+ last_dc_dt_ = dc_dt;
+ last_control_value_ = new_control_value;
+ return new_control_value;
+}
+
+} // namespace grpc_core
diff --git a/src/core/lib/transport/pid_controller.h b/src/core/lib/transport/pid_controller.h
index 9352b2643f..87e59a1a90 100644
--- a/src/core/lib/transport/pid_controller.h
+++ b/src/core/lib/transport/pid_controller.h
@@ -19,6 +19,8 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H
#define GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H
+#include <limits>
+
/* \file Simple PID controller.
Implements a proportional-integral-derivative controller.
Used when we want to iteratively control a variable to converge some other
@@ -26,37 +28,87 @@
Gains can be set to adjust sensitivity to current error (p), the integral
of error (i), and the derivative of error (d). */
-typedef struct {
- double gain_p;
- double gain_i;
- double gain_d;
- double initial_control_value;
- double min_control_value;
- double max_control_value;
- double integral_range;
-} grpc_pid_controller_args;
-
-typedef struct {
- double last_error;
- double error_integral;
- double last_control_value;
- double last_dc_dt;
- grpc_pid_controller_args args;
-} grpc_pid_controller;
-
-/** Initialize the controller */
-void grpc_pid_controller_init(grpc_pid_controller *pid_controller,
- grpc_pid_controller_args args);
-
-/** Reset the controller: useful when things have changed significantly */
-void grpc_pid_controller_reset(grpc_pid_controller *pid_controller);
-
-/** Update the controller: given a current error estimate, and the time since
- the last update, returns a new control value */
-double grpc_pid_controller_update(grpc_pid_controller *pid_controller,
- double error, double dt);
-
-/** Returns the last control value calculated */
-double grpc_pid_controller_last(grpc_pid_controller *pid_controller);
+namespace grpc_core {
+
+class PidController {
+ public:
+ class Args {
+ public:
+ double gain_p() const { return gain_p_; }
+ double gain_i() const { return gain_i_; }
+ double gain_d() const { return gain_d_; }
+ double initial_control_value() const { return initial_control_value_; }
+ double min_control_value() const { return min_control_value_; }
+ double max_control_value() const { return max_control_value_; }
+ double integral_range() const { return integral_range_; }
+
+ Args& set_gain_p(double gain_p) {
+ gain_p_ = gain_p;
+ return *this;
+ }
+ Args& set_gain_i(double gain_i) {
+ gain_i_ = gain_i;
+ return *this;
+ }
+ Args& set_gain_d(double gain_d) {
+ gain_d_ = gain_d;
+ return *this;
+ }
+ Args& set_initial_control_value(double initial_control_value) {
+ initial_control_value_ = initial_control_value;
+ return *this;
+ }
+ Args& set_min_control_value(double min_control_value) {
+ min_control_value_ = min_control_value;
+ return *this;
+ }
+ Args& set_max_control_value(double max_control_value) {
+ max_control_value_ = max_control_value;
+ return *this;
+ }
+ Args& set_integral_range(double integral_range) {
+ integral_range_ = integral_range;
+ return *this;
+ }
+
+ private:
+ double gain_p_ = 0.0;
+ double gain_i_ = 0.0;
+ double gain_d_ = 0.0;
+ double initial_control_value_ = 0.0;
+ double min_control_value_ = std::numeric_limits<double>::min();
+ double max_control_value_ = std::numeric_limits<double>::max();
+ double integral_range_ = std::numeric_limits<double>::max();
+ };
+
+ explicit PidController(const Args& args);
+
+ /// Reset the controller internal state: useful when the environment has
+ /// changed significantly
+ void Reset() {
+ last_error_ = 0.0;
+ last_dc_dt_ = 0.0;
+ error_integral_ = 0.0;
+ }
+
+ /// Update the controller: given a current error estimate, and the time since
+ /// the last update, returns a new control value
+ double Update(double error, double dt);
+
+ /// Returns the last control value calculated
+ double last_control_value() const { return last_control_value_; }
+
+ /// Returns the current error integral (mostly for testing)
+ double error_integral() const { return error_integral_; }
+
+ private:
+ double last_error_ = 0.0;
+ double error_integral_ = 0.0;
+ double last_control_value_;
+ double last_dc_dt_ = 0.0;
+ const Args args_;
+};
+
+} // namespace grpc_core
#endif /* GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H */
diff --git a/src/core/lib/transport/service_config.c b/src/core/lib/transport/service_config.cc
index 0379d0010d..070a13a2b4 100644
--- a/src/core/lib/transport/service_config.c
+++ b/src/core/lib/transport/service_config.cc
@@ -59,7 +59,8 @@ struct grpc_service_config {
};
grpc_service_config* grpc_service_config_create(const char* json_string) {
- grpc_service_config* service_config = gpr_malloc(sizeof(*service_config));
+ grpc_service_config* service_config =
+ (grpc_service_config*)gpr_malloc(sizeof(*service_config));
service_config->json_string = gpr_strdup(json_string);
service_config->json_tree =
grpc_json_parse_string(service_config->json_string);
@@ -198,7 +199,8 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table(
num_entries += count_names_in_method_config_json(method);
}
// Populate method config table entries.
- entries = gpr_malloc(num_entries * sizeof(grpc_slice_hash_table_entry));
+ entries = (grpc_slice_hash_table_entry*)gpr_malloc(
+ num_entries * sizeof(grpc_slice_hash_table_entry));
size_t idx = 0;
for (grpc_json* method = field->child; method != NULL;
method = method->next) {
@@ -230,7 +232,7 @@ void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
- char* buf = gpr_malloc(len + 2); // '*' and NUL
+ char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h
index 84110abc36..9c43093627 100644
--- a/src/core/lib/transport/service_config.h
+++ b/src/core/lib/transport/service_config.h
@@ -22,6 +22,10 @@
#include "src/core/lib/json/json.h"
#include "src/core/lib/slice/slice_hash_table.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_service_config grpc_service_config;
grpc_service_config* grpc_service_config_create(const char* json_string);
@@ -59,4 +63,8 @@ void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
const grpc_slice_hash_table* table,
grpc_slice path);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H */
diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c
deleted file mode 100644
index b20d94aeac..0000000000
--- a/src/core/lib/transport/static_metadata.c
+++ /dev/null
@@ -1,854 +0,0 @@
-/*
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * WARNING: Auto-generated code.
- *
- * To make changes to this file, change
- * tools/codegen/core/gen_static_metadata.py, and then re-run it.
- *
- * See metadata.h for an explanation of the interface here, and metadata.c for
- * an explanation of what's going on.
- */
-
-#include "src/core/lib/transport/static_metadata.h"
-
-#include "src/core/lib/slice/slice_internal.h"
-
-static uint8_t g_bytes[] = {
- 58, 112, 97, 116, 104, 58, 109, 101, 116, 104, 111, 100, 58, 115, 116,
- 97, 116, 117, 115, 58, 97, 117, 116, 104, 111, 114, 105, 116, 121, 58,
- 115, 99, 104, 101, 109, 101, 116, 101, 103, 114, 112, 99, 45, 109, 101,
- 115, 115, 97, 103, 101, 103, 114, 112, 99, 45, 115, 116, 97, 116, 117,
- 115, 103, 114, 112, 99, 45, 112, 97, 121, 108, 111, 97, 100, 45, 98,
- 105, 110, 103, 114, 112, 99, 45, 101, 110, 99, 111, 100, 105, 110, 103,
- 103, 114, 112, 99, 45, 97, 99, 99, 101, 112, 116, 45, 101, 110, 99,
- 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 115, 101, 114, 118, 101,
- 114, 45, 115, 116, 97, 116, 115, 45, 98, 105, 110, 103, 114, 112, 99,
- 45, 116, 97, 103, 115, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116,
- 114, 97, 99, 101, 45, 98, 105, 110, 99, 111, 110, 116, 101, 110, 116,
- 45, 116, 121, 112, 101, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110,
- 99, 111, 100, 105, 110, 103, 97, 99, 99, 101, 112, 116, 45, 101, 110,
- 99, 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 105, 110, 116, 101,
- 114, 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114,
- 101, 113, 117, 101, 115, 116, 103, 114, 112, 99, 45, 105, 110, 116, 101,
- 114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99,
- 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115,
- 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45,
- 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
- 117, 116, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
- 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
- 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
- 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
- 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
- 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
- 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
- 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
- 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100,
- 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97,
- 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105,
- 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84,
- 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115,
- 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100,
- 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48,
- 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99,
- 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102,
- 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103,
- 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103,
- 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45,
- 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111,
- 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117,
- 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104,
- 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110,
- 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111,
- 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99,
- 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111,
- 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99,
- 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111,
- 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101,
- 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102,
- 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105,
- 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101,
- 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105,
- 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105,
- 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101,
- 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, 110,
- 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111,
- 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116,
- 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45,
- 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97,
- 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101,
- 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101,
- 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115,
- 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116,
- 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102,
- 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121,
- 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105,
- 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101,
- 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103,
- 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112,
- 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116,
- 101, 44, 103, 122, 105, 112};
-
-static void static_ref(void *unused) {}
-static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
-static const grpc_slice_refcount_vtable static_sub_vtable = {
- static_ref, static_unref, grpc_slice_default_eq_impl,
- grpc_slice_default_hash_impl};
-const grpc_slice_refcount_vtable grpc_static_metadata_vtable = {
- static_ref, static_unref, grpc_static_slice_eq, grpc_static_slice_hash};
-static grpc_slice_refcount static_sub_refcnt = {&static_sub_vtable,
- &static_sub_refcnt};
-grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
- {&grpc_static_metadata_vtable, &static_sub_refcnt},
-};
-
-const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
- {.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[3],
- .data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[5],
- .data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[6],
- .data.refcounted = {g_bytes + 38, 12}},
- {.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[8],
- .data.refcounted = {g_bytes + 61, 16}},
- {.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[11],
- .data.refcounted = {g_bytes + 110, 21}},
- {.refcount = &grpc_static_metadata_refcounts[12],
- .data.refcounted = {g_bytes + 131, 13}},
- {.refcount = &grpc_static_metadata_refcounts[13],
- .data.refcounted = {g_bytes + 144, 14}},
- {.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[17],
- .data.refcounted = {g_bytes + 201, 30}},
- {.refcount = &grpc_static_metadata_refcounts[18],
- .data.refcounted = {g_bytes + 231, 37}},
- {.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 268, 10}},
- {.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 278, 4}},
- {.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 282, 8}},
- {.refcount = &grpc_static_metadata_refcounts[22],
- .data.refcounted = {g_bytes + 290, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}},
- {.refcount = &grpc_static_metadata_refcounts[24],
- .data.refcounted = {g_bytes + 302, 19}},
- {.refcount = &grpc_static_metadata_refcounts[25],
- .data.refcounted = {g_bytes + 321, 12}},
- {.refcount = &grpc_static_metadata_refcounts[26],
- .data.refcounted = {g_bytes + 333, 30}},
- {.refcount = &grpc_static_metadata_refcounts[27],
- .data.refcounted = {g_bytes + 363, 31}},
- {.refcount = &grpc_static_metadata_refcounts[28],
- .data.refcounted = {g_bytes + 394, 36}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 430, 1}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 431, 1}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 432, 1}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 445, 7}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 452, 8}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 460, 16}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 476, 4}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 480, 3}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 483, 3}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 486, 4}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 490, 5}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 495, 4}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 499, 3}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 502, 3}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 505, 1}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 506, 11}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 517, 3}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 520, 3}},
- {.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 523, 3}},
- {.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 526, 3}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 529, 3}},
- {.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 532, 14}},
- {.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 546, 13}},
- {.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 559, 15}},
- {.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 574, 13}},
- {.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 587, 6}},
- {.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 593, 27}},
- {.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 620, 3}},
- {.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 623, 5}},
- {.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 628, 13}},
- {.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 641, 13}},
- {.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 654, 19}},
- {.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 673, 16}},
- {.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 689, 14}},
- {.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 703, 16}},
- {.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 719, 13}},
- {.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 732, 6}},
- {.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 738, 4}},
- {.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 742, 4}},
- {.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 746, 6}},
- {.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 752, 7}},
- {.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 759, 4}},
- {.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 763, 8}},
- {.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 771, 17}},
- {.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 788, 13}},
- {.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 801, 8}},
- {.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 809, 19}},
- {.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 828, 13}},
- {.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 841, 11}},
- {.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 852, 4}},
- {.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 856, 8}},
- {.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 864, 12}},
- {.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 876, 18}},
- {.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 894, 19}},
- {.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 913, 5}},
- {.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 918, 7}},
- {.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 925, 7}},
- {.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 932, 11}},
- {.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 943, 6}},
- {.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 949, 10}},
- {.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 959, 25}},
- {.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 984, 17}},
- {.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 1001, 4}},
- {.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 1005, 3}},
- {.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 1008, 16}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1024, 16}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1040, 13}},
- {.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1053, 12}},
- {.refcount = &grpc_static_metadata_refcounts[99],
- .data.refcounted = {g_bytes + 1065, 21}},
-};
-
-uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
-
-static const int8_t elems_r[] = {
- 11, 9, -3, 0, 10, 27, -74, 28, 0, 14, -7, 0, 0, 0, 18, 8, -2,
- 0, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, -50, 0, -33, -55, -56, -57, -58, -57, 0, 40, 39, 38, 37, 36, 35, 34,
- 33, 32, 31, 30, 29, 28, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 22,
- 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 12, 11, 0};
-static uint32_t elems_phash(uint32_t i) {
- i -= 45;
- uint32_t x = i % 98;
- uint32_t y = i / 98;
- uint32_t h = x;
- if (y < GPR_ARRAY_SIZE(elems_r)) {
- uint32_t delta = (uint32_t)elems_r[y];
- h += delta;
- }
- return h;
-}
-
-static const uint16_t elem_keys[] = {
- 1032, 1033, 1034, 247, 248, 249, 250, 251, 1623, 143, 144, 45,
- 46, 440, 441, 442, 1523, 1632, 1633, 932, 933, 934, 729, 730,
- 1423, 1532, 1533, 535, 731, 1923, 2023, 2123, 5223, 5523, 5623, 5723,
- 5823, 1436, 1653, 5923, 6023, 6123, 6223, 6323, 6423, 6523, 6623, 6723,
- 6823, 6923, 7023, 7123, 7223, 5423, 7323, 7423, 7523, 7623, 7723, 7823,
- 7923, 8023, 8123, 8223, 1096, 1097, 1098, 1099, 8323, 8423, 8523, 8623,
- 8723, 8823, 8923, 9023, 9123, 9223, 9323, 323, 9423, 9523, 1697, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 137, 238, 239, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0};
-static const uint8_t elem_idxs[] = {
- 76, 79, 77, 19, 20, 21, 22, 23, 25, 15, 16, 17, 18, 11,
- 12, 13, 38, 83, 84, 3, 4, 5, 0, 1, 43, 36, 37, 6,
- 2, 72, 50, 57, 24, 28, 29, 30, 31, 7, 26, 32, 33, 34,
- 35, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 27, 51, 52,
- 53, 54, 55, 56, 58, 59, 60, 61, 78, 80, 81, 82, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 73, 14, 74, 75, 85, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 8, 9, 10};
-
-grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
- if (a == -1 || b == -1) return GRPC_MDNULL;
- uint32_t k = (uint32_t)(a * 100 + b);
- uint32_t h = elems_phash(k);
- return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
- elem_idxs[h] != 255
- ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]],
- GRPC_MDELEM_STORAGE_STATIC)
- : GRPC_MDNULL;
-}
-
-grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[29],
- .data.refcounted = {g_bytes + 430, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[30],
- .data.refcounted = {g_bytes + 431, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[7],
- .data.refcounted = {g_bytes + 50, 11}},
- {.refcount = &grpc_static_metadata_refcounts[31],
- .data.refcounted = {g_bytes + 432, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[9],
- .data.refcounted = {g_bytes + 77, 13}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 445, 7}}},
- {{.refcount = &grpc_static_metadata_refcounts[5],
- .data.refcounted = {g_bytes + 36, 2}},
- {.refcount = &grpc_static_metadata_refcounts[35],
- .data.refcounted = {g_bytes + 452, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[36],
- .data.refcounted = {g_bytes + 460, 16}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[37],
- .data.refcounted = {g_bytes + 476, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[38],
- .data.refcounted = {g_bytes + 480, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[39],
- .data.refcounted = {g_bytes + 483, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[40],
- .data.refcounted = {g_bytes + 486, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[41],
- .data.refcounted = {g_bytes + 490, 5}}},
- {{.refcount = &grpc_static_metadata_refcounts[4],
- .data.refcounted = {g_bytes + 29, 7}},
- {.refcount = &grpc_static_metadata_refcounts[42],
- .data.refcounted = {g_bytes + 495, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[3],
- .data.refcounted = {g_bytes + 19, 10}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[43],
- .data.refcounted = {g_bytes + 499, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[1],
- .data.refcounted = {g_bytes + 5, 7}},
- {.refcount = &grpc_static_metadata_refcounts[44],
- .data.refcounted = {g_bytes + 502, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[45],
- .data.refcounted = {g_bytes + 505, 1}}},
- {{.refcount = &grpc_static_metadata_refcounts[0],
- .data.refcounted = {g_bytes + 0, 5}},
- {.refcount = &grpc_static_metadata_refcounts[46],
- .data.refcounted = {g_bytes + 506, 11}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[47],
- .data.refcounted = {g_bytes + 517, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[48],
- .data.refcounted = {g_bytes + 520, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[49],
- .data.refcounted = {g_bytes + 523, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[50],
- .data.refcounted = {g_bytes + 526, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[2],
- .data.refcounted = {g_bytes + 12, 7}},
- {.refcount = &grpc_static_metadata_refcounts[51],
- .data.refcounted = {g_bytes + 529, 3}}},
- {{.refcount = &grpc_static_metadata_refcounts[52],
- .data.refcounted = {g_bytes + 532, 14}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[53],
- .data.refcounted = {g_bytes + 546, 13}}},
- {{.refcount = &grpc_static_metadata_refcounts[54],
- .data.refcounted = {g_bytes + 559, 15}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[55],
- .data.refcounted = {g_bytes + 574, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[56],
- .data.refcounted = {g_bytes + 587, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[57],
- .data.refcounted = {g_bytes + 593, 27}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[58],
- .data.refcounted = {g_bytes + 620, 3}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[59],
- .data.refcounted = {g_bytes + 623, 5}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[60],
- .data.refcounted = {g_bytes + 628, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[61],
- .data.refcounted = {g_bytes + 641, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[62],
- .data.refcounted = {g_bytes + 654, 19}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[15],
- .data.refcounted = {g_bytes + 170, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[63],
- .data.refcounted = {g_bytes + 673, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[64],
- .data.refcounted = {g_bytes + 689, 14}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[65],
- .data.refcounted = {g_bytes + 703, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[66],
- .data.refcounted = {g_bytes + 719, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[14],
- .data.refcounted = {g_bytes + 158, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[67],
- .data.refcounted = {g_bytes + 732, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[68],
- .data.refcounted = {g_bytes + 738, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[69],
- .data.refcounted = {g_bytes + 742, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[70],
- .data.refcounted = {g_bytes + 746, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[71],
- .data.refcounted = {g_bytes + 752, 7}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[72],
- .data.refcounted = {g_bytes + 759, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[20],
- .data.refcounted = {g_bytes + 278, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[73],
- .data.refcounted = {g_bytes + 763, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[74],
- .data.refcounted = {g_bytes + 771, 17}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[75],
- .data.refcounted = {g_bytes + 788, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[76],
- .data.refcounted = {g_bytes + 801, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[77],
- .data.refcounted = {g_bytes + 809, 19}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[78],
- .data.refcounted = {g_bytes + 828, 13}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[21],
- .data.refcounted = {g_bytes + 282, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[79],
- .data.refcounted = {g_bytes + 841, 11}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[80],
- .data.refcounted = {g_bytes + 852, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[81],
- .data.refcounted = {g_bytes + 856, 8}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[82],
- .data.refcounted = {g_bytes + 864, 12}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[83],
- .data.refcounted = {g_bytes + 876, 18}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[84],
- .data.refcounted = {g_bytes + 894, 19}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[85],
- .data.refcounted = {g_bytes + 913, 5}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[86],
- .data.refcounted = {g_bytes + 918, 7}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[87],
- .data.refcounted = {g_bytes + 925, 7}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[88],
- .data.refcounted = {g_bytes + 932, 11}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[89],
- .data.refcounted = {g_bytes + 943, 6}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[90],
- .data.refcounted = {g_bytes + 949, 10}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[91],
- .data.refcounted = {g_bytes + 959, 25}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[92],
- .data.refcounted = {g_bytes + 984, 17}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[19],
- .data.refcounted = {g_bytes + 268, 10}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[93],
- .data.refcounted = {g_bytes + 1001, 4}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[94],
- .data.refcounted = {g_bytes + 1005, 3}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[95],
- .data.refcounted = {g_bytes + 1008, 16}},
- {.refcount = &grpc_static_metadata_refcounts[23],
- .data.refcounted = {g_bytes + 302, 0}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[34],
- .data.refcounted = {g_bytes + 445, 7}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[96],
- .data.refcounted = {g_bytes + 1024, 16}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1040, 13}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[98],
- .data.refcounted = {g_bytes + 1053, 12}}},
- {{.refcount = &grpc_static_metadata_refcounts[10],
- .data.refcounted = {g_bytes + 90, 20}},
- {.refcount = &grpc_static_metadata_refcounts[99],
- .data.refcounted = {g_bytes + 1065, 21}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[32],
- .data.refcounted = {g_bytes + 433, 8}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[33],
- .data.refcounted = {g_bytes + 441, 4}}},
- {{.refcount = &grpc_static_metadata_refcounts[16],
- .data.refcounted = {g_bytes + 186, 15}},
- {.refcount = &grpc_static_metadata_refcounts[97],
- .data.refcounted = {g_bytes + 1040, 13}}},
-};
-bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
- true, // :path
- true, // :method
- true, // :status
- true, // :authority
- true, // :scheme
- true, // te
- true, // grpc-message
- true, // grpc-status
- true, // grpc-payload-bin
- true, // grpc-encoding
- true, // grpc-accept-encoding
- true, // grpc-server-stats-bin
- true, // grpc-tags-bin
- true, // grpc-trace-bin
- true, // content-type
- true, // content-encoding
- true, // accept-encoding
- true, // grpc-internal-encoding-request
- true, // grpc-internal-stream-encoding-request
- true, // user-agent
- true, // host
- true, // lb-token
-};
-
-const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
- 79, 80, 81, 82};
-
-const uint8_t grpc_static_accept_stream_encoding_metadata[4] = {0, 83, 84, 85};
diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc
new file mode 100644
index 0000000000..472cf888ea
--- /dev/null
+++ b/src/core/lib/transport/static_metadata.cc
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * WARNING: Auto-generated code.
+ *
+ * To make changes to this file, change
+ * tools/codegen/core/gen_static_metadata.py, and then re-run it.
+ *
+ * See metadata.h for an explanation of the interface here, and metadata.c for
+ * an explanation of what's going on.
+ */
+
+#include "src/core/lib/transport/static_metadata.h"
+
+#include "src/core/lib/slice/slice_internal.h"
+
+static uint8_t g_bytes[] = {
+ 58, 112, 97, 116, 104, 58, 109, 101, 116, 104, 111, 100, 58, 115, 116,
+ 97, 116, 117, 115, 58, 97, 117, 116, 104, 111, 114, 105, 116, 121, 58,
+ 115, 99, 104, 101, 109, 101, 116, 101, 103, 114, 112, 99, 45, 109, 101,
+ 115, 115, 97, 103, 101, 103, 114, 112, 99, 45, 115, 116, 97, 116, 117,
+ 115, 103, 114, 112, 99, 45, 112, 97, 121, 108, 111, 97, 100, 45, 98,
+ 105, 110, 103, 114, 112, 99, 45, 101, 110, 99, 111, 100, 105, 110, 103,
+ 103, 114, 112, 99, 45, 97, 99, 99, 101, 112, 116, 45, 101, 110, 99,
+ 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 115, 101, 114, 118, 101,
+ 114, 45, 115, 116, 97, 116, 115, 45, 98, 105, 110, 103, 114, 112, 99,
+ 45, 116, 97, 103, 115, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116,
+ 114, 97, 99, 101, 45, 98, 105, 110, 99, 111, 110, 116, 101, 110, 116,
+ 45, 116, 121, 112, 101, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 97, 99, 99, 101, 112, 116, 45, 101, 110,
+ 99, 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 105, 110, 116, 101,
+ 114, 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114,
+ 101, 113, 117, 101, 115, 116, 103, 114, 112, 99, 45, 105, 110, 116, 101,
+ 114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99,
+ 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115,
+ 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45,
+ 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111,
+ 117, 116, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114,
+ 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101,
+ 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113,
+ 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121,
+ 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115,
+ 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98,
+ 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49,
+ 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66,
+ 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100,
+ 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97,
+ 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105,
+ 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84,
+ 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115,
+ 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100,
+ 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48,
+ 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99,
+ 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102,
+ 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103,
+ 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103,
+ 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45,
+ 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111,
+ 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117,
+ 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104,
+ 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110,
+ 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111,
+ 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99,
+ 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111,
+ 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99,
+ 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111,
+ 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101,
+ 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102,
+ 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105,
+ 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101,
+ 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105,
+ 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105,
+ 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101,
+ 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, 110,
+ 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111,
+ 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116,
+ 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45,
+ 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97,
+ 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101,
+ 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101,
+ 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115,
+ 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116,
+ 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102,
+ 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121,
+ 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105,
+ 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101,
+ 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103,
+ 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112,
+ 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116,
+ 101, 44, 103, 122, 105, 112};
+
+static void static_ref(void *unused) {}
+static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
+static const grpc_slice_refcount_vtable static_sub_vtable = {
+ static_ref, static_unref, grpc_slice_default_eq_impl,
+ grpc_slice_default_hash_impl};
+const grpc_slice_refcount_vtable grpc_static_metadata_vtable = {
+ static_ref, static_unref, grpc_static_slice_eq, grpc_static_slice_hash};
+static grpc_slice_refcount static_sub_refcnt = {&static_sub_vtable,
+ &static_sub_refcnt};
+grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+ {&grpc_static_metadata_vtable, &static_sub_refcnt},
+};
+
+const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
+ {&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
+ {&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
+ {&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}},
+ {&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}},
+ {&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}},
+ {&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}},
+ {&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}},
+ {&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}},
+ {&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}},
+ {&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
+ {&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
+ {&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
+ {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}},
+ {&grpc_static_metadata_refcounts[24], {{g_bytes + 302, 19}}},
+ {&grpc_static_metadata_refcounts[25], {{g_bytes + 321, 12}}},
+ {&grpc_static_metadata_refcounts[26], {{g_bytes + 333, 30}}},
+ {&grpc_static_metadata_refcounts[27], {{g_bytes + 363, 31}}},
+ {&grpc_static_metadata_refcounts[28], {{g_bytes + 394, 36}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}},
+ {&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}},
+ {&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
+ {&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
+ {&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
+ {&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
+ {&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
+ {&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
+ {&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
+ {&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
+ {&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
+ {&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
+ {&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
+ {&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
+ {&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
+ {&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
+ {&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
+ {&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
+ {&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
+ {&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
+ {&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
+ {&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
+ {&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
+ {&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
+ {&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
+ {&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
+ {&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
+ {&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
+ {&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
+ {&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
+ {&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
+ {&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
+ {&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
+ {&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
+ {&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
+ {&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
+ {&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
+ {&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
+ {&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
+ {&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
+ {&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
+ {&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
+ {&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
+ {&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}},
+};
+
+uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4};
+
+static const int8_t elems_r[] = {
+ 11, 9, -3, 0, 10, 27, -74, 28, 0, 14, -7, 0, 0, 0, 18, 8, -2,
+ 0, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, -50, 0, -33, -55, -56, -57, -58, -57, 0, 40, 39, 38, 37, 36, 35, 34,
+ 33, 32, 31, 30, 29, 28, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 22,
+ 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 12, 11, 0};
+static uint32_t elems_phash(uint32_t i) {
+ i -= 45;
+ uint32_t x = i % 98;
+ uint32_t y = i / 98;
+ uint32_t h = x;
+ if (y < GPR_ARRAY_SIZE(elems_r)) {
+ uint32_t delta = (uint32_t)elems_r[y];
+ h += delta;
+ }
+ return h;
+}
+
+static const uint16_t elem_keys[] = {
+ 1032, 1033, 1034, 247, 248, 249, 250, 251, 1623, 143, 144, 45,
+ 46, 440, 441, 442, 1523, 1632, 1633, 932, 933, 934, 729, 730,
+ 1423, 1532, 1533, 535, 731, 1923, 2023, 2123, 5223, 5523, 5623, 5723,
+ 5823, 1436, 1653, 5923, 6023, 6123, 6223, 6323, 6423, 6523, 6623, 6723,
+ 6823, 6923, 7023, 7123, 7223, 5423, 7323, 7423, 7523, 7623, 7723, 7823,
+ 7923, 8023, 8123, 8223, 1096, 1097, 1098, 1099, 8323, 8423, 8523, 8623,
+ 8723, 8823, 8923, 9023, 9123, 9223, 9323, 323, 9423, 9523, 1697, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 137, 238, 239, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0};
+static const uint8_t elem_idxs[] = {
+ 76, 79, 77, 19, 20, 21, 22, 23, 25, 15, 16, 17, 18, 11,
+ 12, 13, 38, 83, 84, 3, 4, 5, 0, 1, 43, 36, 37, 6,
+ 2, 72, 50, 57, 24, 28, 29, 30, 31, 7, 26, 32, 33, 34,
+ 35, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 27, 51, 52,
+ 53, 54, 55, 56, 58, 59, 60, 61, 78, 80, 81, 82, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 73, 14, 74, 75, 85, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 8, 9, 10};
+
+grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
+ if (a == -1 || b == -1) return GRPC_MDNULL;
+ uint32_t k = (uint32_t)(a * 100 + b);
+ uint32_t h = elems_phash(k);
+ return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k &&
+ elem_idxs[h] != 255
+ ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]],
+ GRPC_MDELEM_STORAGE_STATIC)
+ : GRPC_MDNULL;
+}
+
+grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}}},
+ {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
+ {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
+ {{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
+ {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}}},
+ {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}}},
+ {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
+ {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}}},
+ {{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}}},
+ {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
+ {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}}},
+ {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}}},
+ {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
+ {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}}},
+ {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
+ {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}}},
+ {{&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}}},
+ {{&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}},
+ {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}}},
+ {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
+ {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}},
+ {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
+ {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}},
+};
+bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {
+ true, // :path
+ true, // :method
+ true, // :status
+ true, // :authority
+ true, // :scheme
+ true, // te
+ true, // grpc-message
+ true, // grpc-status
+ true, // grpc-payload-bin
+ true, // grpc-encoding
+ true, // grpc-accept-encoding
+ true, // grpc-server-stats-bin
+ true, // grpc-tags-bin
+ true, // grpc-trace-bin
+ true, // content-type
+ true, // content-encoding
+ true, // accept-encoding
+ true, // grpc-internal-encoding-request
+ true, // grpc-internal-stream-encoding-request
+ true, // user-agent
+ true, // host
+ true, // lb-token
+};
+
+const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78,
+ 79, 80, 81, 82};
+
+const uint8_t grpc_static_accept_stream_encoding_metadata[4] = {0, 83, 84, 85};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index f03a9d23b1..299410f22c 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -27,6 +27,10 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H
#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "src/core/lib/transport/metadata.h"
#define GRPC_STATIC_MDSTR_COUNT 100
@@ -584,4 +588,7 @@ extern const uint8_t grpc_static_accept_stream_encoding_metadata[4];
(GRPC_MAKE_MDELEM(&grpc_static_mdelem_table \
[grpc_static_accept_stream_encoding_metadata[(algs)]], \
GRPC_MDELEM_STORAGE_STATIC))
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/lib/transport/status_conversion.c b/src/core/lib/transport/status_conversion.cc
index 9a76977e4b..891c4427d7 100644
--- a/src/core/lib/transport/status_conversion.c
+++ b/src/core/lib/transport/status_conversion.cc
@@ -18,7 +18,7 @@
#include "src/core/lib/transport/status_conversion.h"
-int grpc_status_to_http2_error(grpc_status_code status) {
+grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) {
switch (status) {
case GRPC_STATUS_OK:
return GRPC_HTTP2_NO_ERROR;
@@ -37,8 +37,9 @@ int grpc_status_to_http2_error(grpc_status_code status) {
}
}
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
- gpr_timespec deadline) {
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx,
+ grpc_http2_error_code error,
+ grpc_millis deadline) {
switch (error) {
case GRPC_HTTP2_NO_ERROR:
/* should never be received */
@@ -46,7 +47,7 @@ grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
case GRPC_HTTP2_CANCEL:
/* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been
* exceeded */
- return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0
+ return grpc_exec_ctx_now(exec_ctx) > deadline
? GRPC_STATUS_DEADLINE_EXCEEDED
: GRPC_STATUS_CANCELLED;
case GRPC_HTTP2_ENHANCE_YOUR_CALM:
diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h
index e93f3dfd9b..8ef91aecfe 100644
--- a/src/core/lib/transport/status_conversion.h
+++ b/src/core/lib/transport/status_conversion.h
@@ -20,15 +20,25 @@
#define GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H
#include <grpc/grpc.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/transport/http2_errors.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */
grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status);
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
- gpr_timespec deadline);
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx,
+ grpc_http2_error_code error,
+ grpc_millis deadline);
/* Conversion of HTTP status codes (:status) to grpc status codes */
grpc_status_code grpc_http2_status_to_grpc_status(int status);
int grpc_status_to_http2_status(grpc_status_code status);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H */
diff --git a/src/core/lib/transport/timeout_encoding.c b/src/core/lib/transport/timeout_encoding.cc
index 02f179d6a3..23a9ef308f 100644
--- a/src/core/lib/transport/timeout_encoding.c
+++ b/src/core/lib/transport/timeout_encoding.cc
@@ -59,60 +59,27 @@ static void enc_seconds(char *buffer, int64_t sec) {
}
}
-static void enc_nanos(char *buffer, int64_t x) {
+static void enc_millis(char *buffer, int64_t x) {
x = round_up_to_three_sig_figs(x);
- if (x < 100000) {
- if (x % 1000 == 0) {
- enc_ext(buffer, x / 1000, 'u');
- } else {
- enc_ext(buffer, x, 'n');
- }
- } else if (x < 100000000) {
- if (x % 1000000 == 0) {
- enc_ext(buffer, x / 1000000, 'm');
- } else {
- enc_ext(buffer, x / 1000, 'u');
- }
- } else if (x < 1000000000) {
- enc_ext(buffer, x / 1000000, 'm');
+ if (x < GPR_MS_PER_SEC) {
+ enc_ext(buffer, x, 'm');
} else {
- /* note that this is only ever called with times of less than one second,
- so if we reach here the time must have been rounded up to a whole second
- (and no more) */
- memcpy(buffer, "1S", 3);
- }
-}
-
-static void enc_micros(char *buffer, int64_t x) {
- x = round_up_to_three_sig_figs(x);
- if (x < 100000) {
- if (x % 1000 == 0) {
- enc_ext(buffer, x / 1000, 'm');
+ if (x % GPR_MS_PER_SEC == 0) {
+ enc_seconds(buffer, x / GPR_MS_PER_SEC);
} else {
- enc_ext(buffer, x, 'u');
+ enc_ext(buffer, x, 'm');
}
- } else if (x < 100000000) {
- if (x % 1000000 == 0) {
- enc_ext(buffer, x / 1000000, 'S');
- } else {
- enc_ext(buffer, x / 1000, 'm');
- }
- } else {
- enc_ext(buffer, x / 1000000, 'S');
}
}
-void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer) {
- if (timeout.tv_sec < 0) {
+void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer) {
+ if (timeout <= 0) {
enc_tiny(buffer);
- } else if (timeout.tv_sec == 0) {
- enc_nanos(buffer, timeout.tv_nsec);
- } else if (timeout.tv_sec < 1000 && timeout.tv_nsec != 0) {
- enc_micros(buffer,
- (int64_t)(timeout.tv_sec * 1000000) +
- (timeout.tv_nsec / 1000 + (timeout.tv_nsec % 1000 != 0)));
+ } else if (timeout < 1000 * GPR_MS_PER_SEC) {
+ enc_millis(buffer, timeout);
} else {
- enc_seconds(buffer, timeout.tv_sec + (timeout.tv_nsec != 0));
+ enc_seconds(buffer,
+ timeout / GPR_MS_PER_SEC + (timeout % GPR_MS_PER_SEC != 0));
}
}
@@ -121,8 +88,8 @@ static int is_all_whitespace(const char *p, const char *end) {
return p == end;
}
-int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
- int32_t x = 0;
+int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout) {
+ grpc_millis x = 0;
const uint8_t *p = GRPC_SLICE_START_PTR(text);
const uint8_t *end = GRPC_SLICE_END_PTR(text);
int have_digit = 0;
@@ -136,7 +103,7 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
/* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */
if (x >= (100 * 1000 * 1000)) {
if (x != (100 * 1000 * 1000) || digit != 0) {
- *timeout = gpr_inf_future(GPR_TIMESPAN);
+ *timeout = GRPC_MILLIS_INF_FUTURE;
return 1;
}
}
@@ -150,22 +117,22 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) {
/* decode unit specifier */
switch (*p) {
case 'n':
- *timeout = gpr_time_from_nanos(x, GPR_TIMESPAN);
+ *timeout = x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0);
break;
case 'u':
- *timeout = gpr_time_from_micros(x, GPR_TIMESPAN);
+ *timeout = x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0);
break;
case 'm':
- *timeout = gpr_time_from_millis(x, GPR_TIMESPAN);
+ *timeout = x;
break;
case 'S':
- *timeout = gpr_time_from_seconds(x, GPR_TIMESPAN);
+ *timeout = x * GPR_MS_PER_SEC;
break;
case 'M':
- *timeout = gpr_time_from_minutes(x, GPR_TIMESPAN);
+ *timeout = x * 60 * GPR_MS_PER_SEC;
break;
case 'H':
- *timeout = gpr_time_from_hours(x, GPR_TIMESPAN);
+ *timeout = x * 60 * 60 * GPR_MS_PER_SEC;
break;
default:
return 0;
diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h
index 7ff35c4083..91cdf0f728 100644
--- a/src/core/lib/transport/timeout_encoding.h
+++ b/src/core/lib/transport/timeout_encoding.h
@@ -22,13 +22,22 @@
#include <grpc/slice.h>
#include <grpc/support/time.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/support/string.h"
#define GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Encode/decode timeouts to the GRPC over HTTP/2 format;
encoding may round up arbitrarily */
-void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer);
-int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout);
+void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer);
+int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.cc
index 6c61f4b8d9..ab4f938e7b 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.cc
@@ -72,7 +72,8 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
cope with.
Throw this over to the executor (on a core-owned thread) and process it
there. */
- refcount->destroy.scheduler = grpc_executor_scheduler;
+ refcount->destroy.scheduler =
+ grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
}
@@ -101,15 +102,19 @@ static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
void *buffer, size_t length) {
slice_stream_ref(&refcount->slice_refcount);
- return (grpc_slice){.refcount = &refcount->slice_refcount,
- .data.refcounted = {.bytes = buffer, .length = length}};
+ grpc_slice res;
+ res.refcount = &refcount->slice_refcount,
+ res.data.refcounted.bytes = (uint8_t *)buffer;
+ res.data.refcounted.length = length;
+ return res;
}
static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {
- .ref = slice_stream_ref,
- .unref = slice_stream_unref,
- .eq = grpc_slice_default_eq_impl,
- .hash = grpc_slice_default_hash_impl};
+ slice_stream_ref, /* ref */
+ slice_stream_unref, /* unref */
+ grpc_slice_default_eq_impl, /* eq */
+ grpc_slice_default_hash_impl /* hash */
+};
#ifndef NDEBUG
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
@@ -197,11 +202,6 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
then_schedule_closure);
}
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport) {
- return transport->vtable->get_peer(exec_ctx, transport);
-}
-
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport) {
return transport->vtable->get_endpoint(exec_ctx, transport);
@@ -214,24 +214,24 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
// is a function that must always unref cancel_error
// though it lives in lib, it handles transport stream ops sure
// it's grpc_transport_stream_op_batch_finish_with_failure
-
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
- grpc_error *error) {
+ grpc_error *error, grpc_call_combiner *call_combiner) {
if (batch->send_message) {
grpc_byte_stream_destroy(exec_ctx,
batch->payload->send_message.send_message);
}
if (batch->recv_message) {
- GRPC_CLOSURE_SCHED(exec_ctx,
- batch->payload->recv_message.recv_message_ready,
- GRPC_ERROR_REF(error));
+ GRPC_CALL_COMBINER_START(exec_ctx, call_combiner,
+ batch->payload->recv_message.recv_message_ready,
+ GRPC_ERROR_REF(error),
+ "failing recv_message_ready");
}
if (batch->recv_initial_metadata) {
- GRPC_CLOSURE_SCHED(
- exec_ctx,
+ GRPC_CALL_COMBINER_START(
+ exec_ctx, call_combiner,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
- GRPC_ERROR_REF(error));
+ GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready");
}
GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
if (batch->cancel_stream) {
@@ -247,13 +247,13 @@ typedef struct {
static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- made_transport_op *op = arg;
+ made_transport_op *op = (made_transport_op *)arg;
GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
- made_transport_op *op = gpr_malloc(sizeof(*op));
+ made_transport_op *op = (made_transport_op *)gpr_malloc(sizeof(*op));
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
@@ -271,7 +271,7 @@ typedef struct {
static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
- made_transport_stream_op *op = arg;
+ made_transport_stream_op *op = (made_transport_stream_op *)arg;
grpc_closure *c = op->inner_on_complete;
gpr_free(op);
GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
@@ -279,7 +279,8 @@ static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
grpc_closure *on_complete) {
- made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
+ made_transport_stream_op *op =
+ (made_transport_stream_op *)gpr_zalloc(sizeof(*op));
op->op.payload = &op->payload;
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx);
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 099138ea14..fbf5dcb8b5 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -22,6 +22,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
+#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
@@ -152,6 +153,9 @@ struct grpc_transport_stream_op_batch_payload {
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
uint32_t send_initial_metadata_flags;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} send_initial_metadata;
struct {
@@ -176,6 +180,9 @@ struct grpc_transport_stream_op_batch_payload {
// immediately available. This may be a signal that we received a
// Trailers-Only response.
bool *trailing_metadata_available;
+ // If non-NULL, will be set by the transport to the peer string
+ // (a char*, which the caller takes ownership of).
+ gpr_atm *peer_string;
} recv_initial_metadata;
struct {
@@ -293,7 +300,7 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_error *error);
+ grpc_error *error, grpc_call_combiner *call_combiner);
char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
char *grpc_transport_op_string(grpc_transport_op *op);
@@ -332,10 +339,6 @@ void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */
void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
-/* Get the transports peer */
-char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport);
-
/* Get the endpoint used by \a transport */
grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
grpc_transport *transport);
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index fc772c6dd1..445fb41ab1 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -21,6 +21,10 @@
#include "src/core/lib/transport/transport.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct grpc_transport_vtable {
/* Memory required for a single stream element - this is allocated by upper
layers and initialized by the transport */
@@ -59,9 +63,6 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy */
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
- /* implementation of grpc_transport_get_peer */
- char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
-
/* implementation of grpc_transport_get_endpoint */
grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
} grpc_transport_vtable;
@@ -72,4 +73,8 @@ struct grpc_transport {
const grpc_transport_vtable *vtable;
};
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H */
diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.cc
index 7b18229ba6..cc11b0cc49 100644
--- a/src/core/lib/transport/transport_op_string.c
+++ b/src/core/lib/transport/transport_op_string.cc
@@ -16,8 +16,11 @@
*
*/
+#include <grpc/support/port_platform.h>
+
#include "src/core/lib/channel/channel_stack.h"
+#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
@@ -48,10 +51,9 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
- if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
+ if (md.deadline != GRPC_MILLIS_INF_FUTURE) {
char *tmp;
- gpr_asprintf(&tmp, " deadline=%" PRId64 ".%09d", md.deadline.tv_sec,
- md.deadline.tv_nsec);
+ gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline);
gpr_strvec_add(b, tmp);
}
}
@@ -112,6 +114,13 @@ char *grpc_transport_stream_op_batch_string(
gpr_strvec_add(&b, tmp);
}
+ if (op->collect_stats) {
+ gpr_strvec_add(&b, gpr_strdup(" "));
+ gpr_asprintf(&tmp, "COLLECT_STATS:%p",
+ op->payload->collect_stats.collect_stats);
+ gpr_strvec_add(&b, tmp);
+ }
+
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
@@ -190,7 +199,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
return out;
}
-void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
+void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
char *str = grpc_transport_stream_op_batch_string(op);
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.c b/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
index 322ebea111..e0422f6750 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
@@ -18,18 +18,18 @@
#include <grpc/grpc.h>
-extern void grpc_http_filters_init(void);
-extern void grpc_http_filters_shutdown(void);
-extern void grpc_chttp2_plugin_init(void);
-extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_deadline_filter_init(void);
-extern void grpc_deadline_filter_shutdown(void);
-extern void grpc_client_channel_init(void);
-extern void grpc_client_channel_shutdown(void);
-extern void grpc_tsi_gts_init(void);
-extern void grpc_tsi_gts_shutdown(void);
-extern void grpc_load_reporting_plugin_init(void);
-extern void grpc_load_reporting_plugin_shutdown(void);
+extern "C" void grpc_http_filters_init(void);
+extern "C" void grpc_http_filters_shutdown(void);
+extern "C" void grpc_chttp2_plugin_init(void);
+extern "C" void grpc_chttp2_plugin_shutdown(void);
+extern "C" void grpc_deadline_filter_init(void);
+extern "C" void grpc_deadline_filter_shutdown(void);
+extern "C" void grpc_client_channel_init(void);
+extern "C" void grpc_client_channel_shutdown(void);
+extern "C" void grpc_tsi_gts_init(void);
+extern "C" void grpc_tsi_gts_shutdown(void);
+extern "C" void grpc_server_load_reporting_plugin_init(void);
+extern "C" void grpc_server_load_reporting_plugin_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_http_filters_init,
@@ -42,6 +42,6 @@ void grpc_register_built_in_plugins(void) {
grpc_client_channel_shutdown);
grpc_register_plugin(grpc_tsi_gts_init,
grpc_tsi_gts_shutdown);
- grpc_register_plugin(grpc_load_reporting_plugin_init,
- grpc_load_reporting_plugin_shutdown);
+ grpc_register_plugin(grpc_server_load_reporting_plugin_init,
+ grpc_server_load_reporting_plugin_shutdown);
}
diff --git a/src/core/plugin_registry/grpc_plugin_registry.c b/src/core/plugin_registry/grpc_plugin_registry.cc
index fa9974952c..339c9bb367 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_plugin_registry.cc
@@ -18,42 +18,40 @@
#include <grpc/grpc.h>
-extern void grpc_http_filters_init(void);
-extern void grpc_http_filters_shutdown(void);
-extern void grpc_chttp2_plugin_init(void);
-extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_tsi_gts_init(void);
-extern void grpc_tsi_gts_shutdown(void);
-extern void grpc_deadline_filter_init(void);
-extern void grpc_deadline_filter_shutdown(void);
-extern void grpc_client_channel_init(void);
-extern void grpc_client_channel_shutdown(void);
-extern void grpc_inproc_plugin_init(void);
-extern void grpc_inproc_plugin_shutdown(void);
-extern void grpc_resolver_fake_init(void);
-extern void grpc_resolver_fake_shutdown(void);
-extern void grpc_lb_policy_grpclb_init(void);
-extern void grpc_lb_policy_grpclb_shutdown(void);
-extern void grpc_lb_policy_pick_first_init(void);
-extern void grpc_lb_policy_pick_first_shutdown(void);
-extern void grpc_lb_policy_round_robin_init(void);
-extern void grpc_lb_policy_round_robin_shutdown(void);
-extern void grpc_resolver_dns_ares_init(void);
-extern void grpc_resolver_dns_ares_shutdown(void);
-extern void grpc_resolver_dns_native_init(void);
-extern void grpc_resolver_dns_native_shutdown(void);
-extern void grpc_resolver_sockaddr_init(void);
-extern void grpc_resolver_sockaddr_shutdown(void);
-extern void grpc_load_reporting_plugin_init(void);
-extern void grpc_load_reporting_plugin_shutdown(void);
-extern void census_grpc_plugin_init(void);
-extern void census_grpc_plugin_shutdown(void);
-extern void grpc_max_age_filter_init(void);
-extern void grpc_max_age_filter_shutdown(void);
-extern void grpc_message_size_filter_init(void);
-extern void grpc_message_size_filter_shutdown(void);
-extern void grpc_workaround_cronet_compression_filter_init(void);
-extern void grpc_workaround_cronet_compression_filter_shutdown(void);
+extern "C" void grpc_http_filters_init(void);
+extern "C" void grpc_http_filters_shutdown(void);
+extern "C" void grpc_chttp2_plugin_init(void);
+extern "C" void grpc_chttp2_plugin_shutdown(void);
+extern "C" void grpc_tsi_gts_init(void);
+extern "C" void grpc_tsi_gts_shutdown(void);
+extern "C" void grpc_deadline_filter_init(void);
+extern "C" void grpc_deadline_filter_shutdown(void);
+extern "C" void grpc_client_channel_init(void);
+extern "C" void grpc_client_channel_shutdown(void);
+extern "C" void grpc_inproc_plugin_init(void);
+extern "C" void grpc_inproc_plugin_shutdown(void);
+extern "C" void grpc_resolver_fake_init(void);
+extern "C" void grpc_resolver_fake_shutdown(void);
+extern "C" void grpc_lb_policy_grpclb_init(void);
+extern "C" void grpc_lb_policy_grpclb_shutdown(void);
+extern "C" void grpc_lb_policy_pick_first_init(void);
+extern "C" void grpc_lb_policy_pick_first_shutdown(void);
+extern "C" void grpc_lb_policy_round_robin_init(void);
+extern "C" void grpc_lb_policy_round_robin_shutdown(void);
+extern "C" void grpc_resolver_dns_ares_init(void);
+extern "C" void grpc_resolver_dns_ares_shutdown(void);
+extern "C" void grpc_resolver_dns_native_init(void);
+extern "C" void grpc_resolver_dns_native_shutdown(void);
+extern "C" void grpc_resolver_sockaddr_init(void);
+extern "C" void grpc_resolver_sockaddr_shutdown(void);
+extern "C" void grpc_server_load_reporting_plugin_init(void);
+extern "C" void grpc_server_load_reporting_plugin_shutdown(void);
+extern "C" void grpc_max_age_filter_init(void);
+extern "C" void grpc_max_age_filter_shutdown(void);
+extern "C" void grpc_message_size_filter_init(void);
+extern "C" void grpc_message_size_filter_shutdown(void);
+extern "C" void grpc_workaround_cronet_compression_filter_init(void);
+extern "C" void grpc_workaround_cronet_compression_filter_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_http_filters_init,
@@ -82,10 +80,8 @@ void grpc_register_built_in_plugins(void) {
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
grpc_resolver_sockaddr_shutdown);
- grpc_register_plugin(grpc_load_reporting_plugin_init,
- grpc_load_reporting_plugin_shutdown);
- grpc_register_plugin(census_grpc_plugin_init,
- census_grpc_plugin_shutdown);
+ grpc_register_plugin(grpc_server_load_reporting_plugin_init,
+ grpc_server_load_reporting_plugin_shutdown);
grpc_register_plugin(grpc_max_age_filter_init,
grpc_max_age_filter_shutdown);
grpc_register_plugin(grpc_message_size_filter_init,
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
index 7eb599d81a..c9fc17d34d 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
@@ -18,40 +18,38 @@
#include <grpc/grpc.h>
-extern void grpc_http_filters_init(void);
-extern void grpc_http_filters_shutdown(void);
-extern void grpc_chttp2_plugin_init(void);
-extern void grpc_chttp2_plugin_shutdown(void);
-extern void grpc_deadline_filter_init(void);
-extern void grpc_deadline_filter_shutdown(void);
-extern void grpc_client_channel_init(void);
-extern void grpc_client_channel_shutdown(void);
-extern void grpc_inproc_plugin_init(void);
-extern void grpc_inproc_plugin_shutdown(void);
-extern void grpc_resolver_dns_ares_init(void);
-extern void grpc_resolver_dns_ares_shutdown(void);
-extern void grpc_resolver_dns_native_init(void);
-extern void grpc_resolver_dns_native_shutdown(void);
-extern void grpc_resolver_sockaddr_init(void);
-extern void grpc_resolver_sockaddr_shutdown(void);
-extern void grpc_resolver_fake_init(void);
-extern void grpc_resolver_fake_shutdown(void);
-extern void grpc_load_reporting_plugin_init(void);
-extern void grpc_load_reporting_plugin_shutdown(void);
-extern void grpc_lb_policy_grpclb_init(void);
-extern void grpc_lb_policy_grpclb_shutdown(void);
-extern void grpc_lb_policy_pick_first_init(void);
-extern void grpc_lb_policy_pick_first_shutdown(void);
-extern void grpc_lb_policy_round_robin_init(void);
-extern void grpc_lb_policy_round_robin_shutdown(void);
-extern void census_grpc_plugin_init(void);
-extern void census_grpc_plugin_shutdown(void);
-extern void grpc_max_age_filter_init(void);
-extern void grpc_max_age_filter_shutdown(void);
-extern void grpc_message_size_filter_init(void);
-extern void grpc_message_size_filter_shutdown(void);
-extern void grpc_workaround_cronet_compression_filter_init(void);
-extern void grpc_workaround_cronet_compression_filter_shutdown(void);
+extern "C" void grpc_http_filters_init(void);
+extern "C" void grpc_http_filters_shutdown(void);
+extern "C" void grpc_chttp2_plugin_init(void);
+extern "C" void grpc_chttp2_plugin_shutdown(void);
+extern "C" void grpc_deadline_filter_init(void);
+extern "C" void grpc_deadline_filter_shutdown(void);
+extern "C" void grpc_client_channel_init(void);
+extern "C" void grpc_client_channel_shutdown(void);
+extern "C" void grpc_inproc_plugin_init(void);
+extern "C" void grpc_inproc_plugin_shutdown(void);
+extern "C" void grpc_resolver_dns_ares_init(void);
+extern "C" void grpc_resolver_dns_ares_shutdown(void);
+extern "C" void grpc_resolver_dns_native_init(void);
+extern "C" void grpc_resolver_dns_native_shutdown(void);
+extern "C" void grpc_resolver_sockaddr_init(void);
+extern "C" void grpc_resolver_sockaddr_shutdown(void);
+extern "C" void grpc_resolver_fake_init(void);
+extern "C" void grpc_resolver_fake_shutdown(void);
+extern "C" void grpc_server_load_reporting_plugin_init(void);
+extern "C" void grpc_server_load_reporting_plugin_shutdown(void);
+extern "C" void grpc_lb_policy_grpclb_init(void);
+extern "C" void grpc_lb_policy_grpclb_shutdown(void);
+extern "C" void grpc_lb_policy_pick_first_init(void);
+extern "C" void grpc_lb_policy_pick_first_shutdown(void);
+extern "C" void grpc_lb_policy_round_robin_init(void);
+extern "C" void grpc_lb_policy_round_robin_shutdown(void);
+extern "C" void grpc_max_age_filter_init(void);
+extern "C" void grpc_max_age_filter_shutdown(void);
+extern "C" void grpc_message_size_filter_init(void);
+extern "C" void grpc_message_size_filter_shutdown(void);
+extern "C" void grpc_workaround_cronet_compression_filter_init(void);
+extern "C" void grpc_workaround_cronet_compression_filter_shutdown(void);
void grpc_register_built_in_plugins(void) {
grpc_register_plugin(grpc_http_filters_init,
@@ -72,16 +70,14 @@ void grpc_register_built_in_plugins(void) {
grpc_resolver_sockaddr_shutdown);
grpc_register_plugin(grpc_resolver_fake_init,
grpc_resolver_fake_shutdown);
- grpc_register_plugin(grpc_load_reporting_plugin_init,
- grpc_load_reporting_plugin_shutdown);
+ grpc_register_plugin(grpc_server_load_reporting_plugin_init,
+ grpc_server_load_reporting_plugin_shutdown);
grpc_register_plugin(grpc_lb_policy_grpclb_init,
grpc_lb_policy_grpclb_shutdown);
grpc_register_plugin(grpc_lb_policy_pick_first_init,
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,
grpc_lb_policy_round_robin_shutdown);
- grpc_register_plugin(census_grpc_plugin_init,
- census_grpc_plugin_shutdown);
grpc_register_plugin(grpc_max_age_filter_init,
grpc_max_age_filter_shutdown);
grpc_register_plugin(grpc_message_size_filter_init,
diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.cc
index 967126ecee..349dcf5cb8 100644
--- a/src/core/tsi/fake_transport_security.c
+++ b/src/core/tsi/fake_transport_security.cc
@@ -25,7 +25,8 @@
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
-#include "src/core/tsi/transport_security.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/tsi/transport_security_grpc.h"
/* --- Constants. ---*/
#define TSI_FAKE_FRAME_HEADER_SIZE 4
@@ -74,6 +75,14 @@ typedef struct {
size_t max_frame_size;
} tsi_fake_frame_protector;
+typedef struct {
+ tsi_zero_copy_grpc_protector base;
+ grpc_slice_buffer header_sb;
+ grpc_slice_buffer protected_sb;
+ size_t max_frame_size;
+ size_t parsed_frame_size;
+} tsi_fake_zero_copy_grpc_protector;
+
/* --- Utils. ---*/
static const char *tsi_fake_handshake_message_strings[] = {
@@ -89,11 +98,10 @@ static const char *tsi_fake_handshake_message_to_string(int msg) {
static tsi_result tsi_fake_handshake_message_from_string(
const char *msg_string, tsi_fake_handshake_message *msg) {
- tsi_fake_handshake_message i;
- for (i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) {
+ for (int i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) {
if (strncmp(msg_string, tsi_fake_handshake_message_strings[i],
strlen(tsi_fake_handshake_message_strings[i])) == 0) {
- *msg = i;
+ *msg = (tsi_fake_handshake_message)i;
return TSI_OK;
}
}
@@ -113,6 +121,28 @@ static void store32_little_endian(uint32_t value, unsigned char *buf) {
buf[0] = (unsigned char)((value)&0xFF);
}
+static uint32_t read_frame_size(const grpc_slice_buffer *sb) {
+ GPR_ASSERT(sb != NULL && sb->length >= TSI_FAKE_FRAME_HEADER_SIZE);
+ uint8_t frame_size_buffer[TSI_FAKE_FRAME_HEADER_SIZE];
+ uint8_t *buf = frame_size_buffer;
+ /* Copies the first 4 bytes to a temporary buffer. */
+ size_t remaining = TSI_FAKE_FRAME_HEADER_SIZE;
+ for (size_t i = 0; i < sb->count; i++) {
+ size_t slice_length = GRPC_SLICE_LENGTH(sb->slices[i]);
+ if (remaining <= slice_length) {
+ memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), remaining);
+ remaining = 0;
+ break;
+ } else {
+ memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), slice_length);
+ buf += slice_length;
+ remaining -= slice_length;
+ }
+ }
+ GPR_ASSERT(remaining == 0);
+ return load32_little_endian(frame_size_buffer);
+}
+
static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
frame->offset = 0;
frame->needs_draining = needs_draining;
@@ -124,9 +154,10 @@ static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
static void tsi_fake_frame_ensure_size(tsi_fake_frame *frame) {
if (frame->data == NULL) {
frame->allocated_size = frame->size;
- frame->data = gpr_malloc(frame->allocated_size);
+ frame->data = (unsigned char *)gpr_malloc(frame->allocated_size);
} else if (frame->size > frame->allocated_size) {
- unsigned char *new_data = gpr_realloc(frame->data, frame->size);
+ unsigned char *new_data =
+ (unsigned char *)gpr_realloc(frame->data, frame->size);
frame->data = new_data;
frame->allocated_size = frame->size;
}
@@ -145,7 +176,7 @@ static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes,
if (frame->needs_draining) return TSI_INTERNAL_ERROR;
if (frame->data == NULL) {
frame->allocated_size = TSI_FAKE_FRAME_INITIAL_ALLOCATED_SIZE;
- frame->data = gpr_malloc(frame->allocated_size);
+ frame->data = (unsigned char *)gpr_malloc(frame->allocated_size);
}
if (frame->offset < TSI_FAKE_FRAME_HEADER_SIZE) {
@@ -363,6 +394,84 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
fake_protector_unprotect, fake_protector_destroy,
};
+/* --- tsi_zero_copy_grpc_protector methods implementation. ---*/
+
+static tsi_result fake_zero_copy_grpc_protector_protect(
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *unprotected_slices,
+ grpc_slice_buffer *protected_slices) {
+ if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)self;
+ /* Protects each frame. */
+ while (unprotected_slices->length > 0) {
+ size_t frame_length =
+ GPR_MIN(impl->max_frame_size,
+ unprotected_slices->length + TSI_FAKE_FRAME_HEADER_SIZE);
+ grpc_slice slice = GRPC_SLICE_MALLOC(TSI_FAKE_FRAME_HEADER_SIZE);
+ store32_little_endian((uint32_t)frame_length, GRPC_SLICE_START_PTR(slice));
+ grpc_slice_buffer_add(protected_slices, slice);
+ size_t data_length = frame_length - TSI_FAKE_FRAME_HEADER_SIZE;
+ grpc_slice_buffer_move_first(unprotected_slices, data_length,
+ protected_slices);
+ }
+ return TSI_OK;
+}
+
+static tsi_result fake_zero_copy_grpc_protector_unprotect(
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *protected_slices,
+ grpc_slice_buffer *unprotected_slices) {
+ if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) {
+ return TSI_INVALID_ARGUMENT;
+ }
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)self;
+ grpc_slice_buffer_move_into(protected_slices, &impl->protected_sb);
+ /* Unprotect each frame, if we get a full frame. */
+ while (impl->protected_sb.length >= TSI_FAKE_FRAME_HEADER_SIZE) {
+ if (impl->parsed_frame_size == 0) {
+ impl->parsed_frame_size = read_frame_size(&impl->protected_sb);
+ if (impl->parsed_frame_size <= 4) {
+ gpr_log(GPR_ERROR, "Invalid frame size.");
+ return TSI_DATA_CORRUPTED;
+ }
+ }
+ /* If we do not have a full frame, return with OK status. */
+ if (impl->protected_sb.length < impl->parsed_frame_size) break;
+ /* Strips header bytes. */
+ grpc_slice_buffer_move_first(&impl->protected_sb,
+ TSI_FAKE_FRAME_HEADER_SIZE, &impl->header_sb);
+ /* Moves data to unprotected slices. */
+ grpc_slice_buffer_move_first(
+ &impl->protected_sb,
+ impl->parsed_frame_size - TSI_FAKE_FRAME_HEADER_SIZE,
+ unprotected_slices);
+ impl->parsed_frame_size = 0;
+ grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &impl->header_sb);
+ }
+ return TSI_OK;
+}
+
+static void fake_zero_copy_grpc_protector_destroy(
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self) {
+ if (self == NULL) return;
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)self;
+ grpc_slice_buffer_destroy_internal(exec_ctx, &impl->header_sb);
+ grpc_slice_buffer_destroy_internal(exec_ctx, &impl->protected_sb);
+ gpr_free(impl);
+}
+
+static const tsi_zero_copy_grpc_protector_vtable
+ zero_copy_grpc_protector_vtable = {
+ fake_zero_copy_grpc_protector_protect,
+ fake_zero_copy_grpc_protector_unprotect,
+ fake_zero_copy_grpc_protector_destroy,
+};
+
/* --- tsi_handshaker_result methods implementation. ---*/
typedef struct {
@@ -383,6 +492,15 @@ static tsi_result fake_handshaker_result_extract_peer(
return result;
}
+static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector(
+ void *exec_ctx, const tsi_handshaker_result *self,
+ size_t *max_output_protected_frame_size,
+ tsi_zero_copy_grpc_protector **protector) {
+ *protector =
+ tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size);
+ return TSI_OK;
+}
+
static tsi_result fake_handshaker_result_create_frame_protector(
const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
tsi_frame_protector **protector) {
@@ -407,7 +525,7 @@ static void fake_handshaker_result_destroy(tsi_handshaker_result *self) {
static const tsi_handshaker_result_vtable handshaker_result_vtable = {
fake_handshaker_result_extract_peer,
- NULL, /* create_zero_copy_grpc_protector */
+ fake_handshaker_result_create_zero_copy_grpc_protector,
fake_handshaker_result_create_frame_protector,
fake_handshaker_result_get_unused_bytes,
fake_handshaker_result_destroy,
@@ -420,10 +538,11 @@ static tsi_result fake_handshaker_result_create(
handshaker_result == NULL) {
return TSI_INVALID_ARGUMENT;
}
- fake_handshaker_result *result = gpr_zalloc(sizeof(*result));
+ fake_handshaker_result *result =
+ (fake_handshaker_result *)gpr_zalloc(sizeof(*result));
result->base.vtable = &handshaker_result_vtable;
if (unused_bytes_size > 0) {
- result->unused_bytes = gpr_malloc(unused_bytes_size);
+ result->unused_bytes = (unsigned char *)gpr_malloc(unused_bytes_size);
memcpy(result->unused_bytes, unused_bytes, unused_bytes_size);
}
result->unused_bytes_size = unused_bytes_size;
@@ -443,7 +562,7 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
}
if (!impl->outgoing_frame.needs_draining) {
tsi_fake_handshake_message next_message_to_send =
- impl->next_message_to_send + 2;
+ (tsi_fake_handshake_message)(impl->next_message_to_send + 2);
const char *msg_string =
tsi_fake_handshake_message_to_string(impl->next_message_to_send);
result = tsi_fake_frame_set_data((unsigned char *)msg_string,
@@ -478,7 +597,8 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
tsi_result result = TSI_OK;
tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
- tsi_fake_handshake_message expected_msg = impl->next_message_to_send - 1;
+ tsi_fake_handshake_message expected_msg =
+ (tsi_fake_handshake_message)(impl->next_message_to_send - 1);
tsi_fake_handshake_message received_msg;
if (!impl->needs_incoming_message || impl->result == TSI_OK) {
@@ -563,8 +683,8 @@ static tsi_result fake_handshaker_next(
if (result == TSI_INCOMPLETE_DATA) {
handshaker->outgoing_bytes_buffer_size *= 2;
handshaker->outgoing_bytes_buffer =
- gpr_realloc(handshaker->outgoing_bytes_buffer,
- handshaker->outgoing_bytes_buffer_size);
+ (unsigned char *)gpr_realloc(handshaker->outgoing_bytes_buffer,
+ handshaker->outgoing_bytes_buffer_size);
}
} while (result == TSI_INCOMPLETE_DATA);
if (result != TSI_OK) return result;
@@ -605,13 +725,14 @@ static const tsi_handshaker_vtable handshaker_vtable = {
};
tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
- tsi_fake_handshaker *impl = gpr_zalloc(sizeof(*impl));
+ tsi_fake_handshaker *impl = (tsi_fake_handshaker *)gpr_zalloc(sizeof(*impl));
impl->base.vtable = &handshaker_vtable;
impl->is_client = is_client;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
impl->outgoing_bytes_buffer_size =
TSI_FAKE_HANDSHAKER_OUTGOING_BUFFER_INITIAL_SIZE;
- impl->outgoing_bytes_buffer = gpr_malloc(impl->outgoing_bytes_buffer_size);
+ impl->outgoing_bytes_buffer =
+ (unsigned char *)gpr_malloc(impl->outgoing_bytes_buffer_size);
if (is_client) {
impl->needs_incoming_message = 0;
impl->next_message_to_send = TSI_FAKE_CLIENT_INIT;
@@ -624,10 +745,25 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
tsi_frame_protector *tsi_create_fake_frame_protector(
size_t *max_protected_frame_size) {
- tsi_fake_frame_protector *impl = gpr_zalloc(sizeof(*impl));
+ tsi_fake_frame_protector *impl =
+ (tsi_fake_frame_protector *)gpr_zalloc(sizeof(*impl));
impl->max_frame_size = (max_protected_frame_size == NULL)
? TSI_FAKE_DEFAULT_FRAME_SIZE
: *max_protected_frame_size;
impl->base.vtable = &frame_protector_vtable;
return &impl->base;
}
+
+tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector(
+ size_t *max_protected_frame_size) {
+ tsi_fake_zero_copy_grpc_protector *impl =
+ (tsi_fake_zero_copy_grpc_protector *)gpr_zalloc(sizeof(*impl));
+ grpc_slice_buffer_init(&impl->header_sb);
+ grpc_slice_buffer_init(&impl->protected_sb);
+ impl->max_frame_size = (max_protected_frame_size == NULL)
+ ? TSI_FAKE_DEFAULT_FRAME_SIZE
+ : *max_protected_frame_size;
+ impl->parsed_frame_size = 0;
+ impl->base.vtable = &zero_copy_grpc_protector_vtable;
+ return &impl->base;
+}
diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h
index 934b3cbeb2..6159708a84 100644
--- a/src/core/tsi/fake_transport_security.h
+++ b/src/core/tsi/fake_transport_security.h
@@ -39,6 +39,11 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client);
tsi_frame_protector *tsi_create_fake_frame_protector(
size_t *max_protected_frame_size);
+/* Creates a zero-copy protector directly without going through the handshake
+ * phase. */
+tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector(
+ size_t *max_protected_frame_size);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/tsi/gts_transport_security.c b/src/core/tsi/gts_transport_security.cc
index e2ac685e44..d37f3bf8f6 100644
--- a/src/core/tsi/gts_transport_security.c
+++ b/src/core/tsi/gts_transport_security.cc
@@ -24,12 +24,12 @@ static gts_shared_resource g_gts_resource;
gts_shared_resource *gts_get_shared_resource(void) { return &g_gts_resource; }
-void grpc_tsi_gts_init() {
+extern "C" void grpc_tsi_gts_init() {
memset(&g_gts_resource, 0, sizeof(gts_shared_resource));
gpr_mu_init(&g_gts_resource.mu);
}
-void grpc_tsi_gts_shutdown() {
+extern "C" void grpc_tsi_gts_shutdown() {
gpr_mu_destroy(&g_gts_resource.mu);
if (g_gts_resource.cq == NULL) {
return;
diff --git a/src/core/tsi/gts_transport_security.h b/src/core/tsi/gts_transport_security.h
index 538e1030bc..9590038ed0 100644
--- a/src/core/tsi/gts_transport_security.h
+++ b/src/core/tsi/gts_transport_security.h
@@ -23,6 +23,10 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef struct gts_shared_resource {
gpr_thd_id thread_id;
grpc_channel *channel;
@@ -34,4 +38,8 @@ typedef struct gts_shared_resource {
* TSI handshakes. */
gts_shared_resource *gts_get_shared_resource(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_TSI_GTS_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.cc
index 1fd65928f9..b1c69e9c7b 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.cc
@@ -39,12 +39,14 @@
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
+extern "C" {
#include <openssl/bio.h>
#include <openssl/crypto.h> /* For OPENSSL_free */
#include <openssl/err.h>
#include <openssl/ssl.h>
#include <openssl/x509.h>
#include <openssl/x509v3.h>
+}
#include "src/core/tsi/ssl_types.h"
#include "src/core/tsi/transport_security.h"
@@ -67,7 +69,13 @@
/* --- Structure definitions. ---*/
+struct tsi_ssl_handshaker_factory {
+ const tsi_ssl_handshaker_factory_vtable *vtable;
+ gpr_refcount refcount;
+};
+
struct tsi_ssl_client_handshaker_factory {
+ tsi_ssl_handshaker_factory base;
SSL_CTX *ssl_context;
unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
@@ -77,6 +85,7 @@ struct tsi_ssl_server_handshaker_factory {
/* Several contexts to support SNI.
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
+ tsi_ssl_handshaker_factory base;
SSL_CTX **ssl_contexts;
tsi_peer *ssl_context_x509_subject_names;
size_t ssl_context_count;
@@ -90,6 +99,7 @@ typedef struct {
BIO *into_ssl;
BIO *from_ssl;
tsi_result result;
+ tsi_ssl_handshaker_factory *factory_ref;
} tsi_ssl_handshaker;
typedef struct {
@@ -127,7 +137,7 @@ static void init_openssl(void) {
OpenSSL_add_all_algorithms();
num_locks = CRYPTO_num_locks();
GPR_ASSERT(num_locks > 0);
- openssl_mutexes = gpr_malloc((size_t)num_locks * sizeof(gpr_mu));
+ openssl_mutexes = (gpr_mu *)gpr_malloc((size_t)num_locks * sizeof(gpr_mu));
for (i = 0; i < CRYPTO_num_locks(); i++) {
gpr_mu_init(&openssl_mutexes[i]);
}
@@ -353,7 +363,7 @@ static tsi_result peer_from_x509(X509 *cert, int include_certificate_type,
tsi_peer *peer) {
/* TODO(jboeuf): Maybe add more properties. */
GENERAL_NAMES *subject_alt_names =
- X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
+ (GENERAL_NAMES *)X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
int subject_alt_name_count = (subject_alt_names != NULL)
? (int)sk_GENERAL_NAME_num(subject_alt_names)
: 0;
@@ -471,7 +481,7 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context,
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
- certificate = PEM_read_bio_X509_AUX(pem, NULL, NULL, "");
+ certificate = PEM_read_bio_X509_AUX(pem, NULL, NULL, (void *)"");
if (certificate == NULL) {
result = TSI_INVALID_ARGUMENT;
break;
@@ -481,7 +491,8 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context,
break;
}
while (1) {
- X509 *certificate_authority = PEM_read_bio_X509(pem, NULL, NULL, "");
+ X509 *certificate_authority =
+ PEM_read_bio_X509(pem, NULL, NULL, (void *)"");
if (certificate_authority == NULL) {
ERR_clear_error();
break; /* Done reading. */
@@ -512,7 +523,7 @@ static tsi_result ssl_ctx_use_private_key(SSL_CTX *context, const char *pem_key,
pem = BIO_new_mem_buf((void *)pem_key, (int)pem_key_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
- private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, "");
+ private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, (void *)"");
if (private_key == NULL) {
result = TSI_INVALID_ARGUMENT;
break;
@@ -551,7 +562,7 @@ static tsi_result ssl_ctx_load_verification_certs(SSL_CTX *context,
}
while (1) {
- root = PEM_read_bio_X509_AUX(pem, NULL, NULL, "");
+ root = PEM_read_bio_X509_AUX(pem, NULL, NULL, (void *)"");
if (root == NULL) {
ERR_clear_error();
break; /* We're at the end of stream. */
@@ -647,7 +658,7 @@ static tsi_result extract_x509_subject_names_from_pem_cert(const char *pem_cert,
pem = BIO_new_mem_buf((void *)pem_cert, (int)strlen(pem_cert));
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
- cert = PEM_read_bio_X509(pem, NULL, NULL, "");
+ cert = PEM_read_bio_X509(pem, NULL, NULL, (void *)"");
if (cert == NULL) {
gpr_log(GPR_ERROR, "Invalid certificate");
result = TSI_INVALID_ARGUMENT;
@@ -676,7 +687,7 @@ static tsi_result build_alpn_protocol_name_list(
}
*protocol_name_list_length += length + 1;
}
- *protocol_name_list = gpr_malloc(*protocol_name_list_length);
+ *protocol_name_list = (unsigned char *)gpr_malloc(*protocol_name_list_length);
if (*protocol_name_list == NULL) return TSI_OUT_OF_RESOURCES;
current = *protocol_name_list;
for (i = 0; i < num_alpn_protocols; i++) {
@@ -846,6 +857,47 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_destroy,
};
+/* --- tsi_server_handshaker_factory methods implementation. --- */
+
+static void tsi_ssl_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return;
+
+ if (self->vtable != NULL && self->vtable->destroy != NULL) {
+ self->vtable->destroy(self);
+ }
+ /* Note, we don't free(self) here because this object is always directly
+ * embedded in another object. If tsi_ssl_handshaker_factory_init allocates
+ * any memory, it should be free'd here. */
+}
+
+static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref(
+ tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return NULL;
+ gpr_refn(&self->refcount, 1);
+ return self;
+}
+
+static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return;
+
+ if (gpr_unref(&self->refcount)) {
+ tsi_ssl_handshaker_factory_destroy(self);
+ }
+}
+
+static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL};
+
+/* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for
+ * allocating memory for the factory. */
+static void tsi_ssl_handshaker_factory_init(
+ tsi_ssl_handshaker_factory *factory) {
+ GPR_ASSERT(factory != NULL);
+
+ factory->vtable = &handshaker_factory_vtable;
+ gpr_ref_init(&factory->refcount, 1);
+}
+
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
@@ -949,8 +1001,8 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
}
if (alpn_selected != NULL) {
size_t i;
- tsi_peer_property *new_properties =
- gpr_zalloc(sizeof(*new_properties) * (peer->property_count + 1));
+ tsi_peer_property *new_properties = (tsi_peer_property *)gpr_zalloc(
+ sizeof(*new_properties) * (peer->property_count + 1));
for (i = 0; i < peer->property_count; i++) {
new_properties[i] = peer->properties[i];
}
@@ -974,7 +1026,8 @@ static tsi_result ssl_handshaker_create_frame_protector(
size_t actual_max_output_protected_frame_size =
TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND;
tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
- tsi_ssl_frame_protector *protector_impl = gpr_zalloc(sizeof(*protector_impl));
+ tsi_ssl_frame_protector *protector_impl =
+ (tsi_ssl_frame_protector *)gpr_zalloc(sizeof(*protector_impl));
if (max_output_protected_frame_size != NULL) {
if (*max_output_protected_frame_size >
@@ -990,7 +1043,8 @@ static tsi_result ssl_handshaker_create_frame_protector(
}
protector_impl->buffer_size =
actual_max_output_protected_frame_size - TSI_SSL_MAX_PROTECTION_OVERHEAD;
- protector_impl->buffer = gpr_malloc(protector_impl->buffer_size);
+ protector_impl->buffer =
+ (unsigned char *)gpr_malloc(protector_impl->buffer_size);
if (protector_impl->buffer == NULL) {
gpr_log(GPR_ERROR,
"Could not allocated buffer for tsi_ssl_frame_protector.");
@@ -1013,6 +1067,7 @@ static tsi_result ssl_handshaker_create_frame_protector(
static void ssl_handshaker_destroy(tsi_handshaker *self) {
tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
+ tsi_ssl_handshaker_factory_unref(impl->factory_ref);
gpr_free(impl);
}
@@ -1030,6 +1085,7 @@ static const tsi_handshaker_vtable handshaker_vtable = {
static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
const char *server_name_indication,
+ tsi_ssl_handshaker_factory *factory,
tsi_handshaker **handshaker) {
SSL *ssl = SSL_new(ctx);
BIO *into_ssl = NULL;
@@ -1079,12 +1135,14 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
SSL_set_accept_state(ssl);
}
- impl = gpr_zalloc(sizeof(*impl));
+ impl = (tsi_ssl_handshaker *)gpr_zalloc(sizeof(*impl));
impl->ssl = ssl;
impl->into_ssl = into_ssl;
impl->from_ssl = from_ssl;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
impl->base.vtable = &handshaker_vtable;
+ impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory);
+
*handshaker = &impl->base;
return TSI_OK;
}
@@ -1121,11 +1179,20 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker) {
return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication,
- handshaker);
+ &self->base, handshaker);
}
-void tsi_ssl_client_handshaker_factory_destroy(
+void tsi_ssl_client_handshaker_factory_unref(
tsi_ssl_client_handshaker_factory *self) {
+ if (self == NULL) return;
+ tsi_ssl_handshaker_factory_unref(&self->base);
+}
+
+static void tsi_ssl_client_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *factory) {
+ if (factory == NULL) return;
+ tsi_ssl_client_handshaker_factory *self =
+ (tsi_ssl_client_handshaker_factory *)factory;
if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context);
if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list);
gpr_free(self);
@@ -1150,11 +1217,21 @@ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT;
/* Create the handshaker with the first context. We will switch if needed
because of SNI in ssl_server_handshaker_factory_servername_callback. */
- return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, handshaker);
+ return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, &self->base,
+ handshaker);
}
-void tsi_ssl_server_handshaker_factory_destroy(
+void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self) {
+ if (self == NULL) return;
+ tsi_ssl_handshaker_factory_unref(&self->base);
+}
+
+static void tsi_ssl_server_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *factory) {
+ if (factory == NULL) return;
+ tsi_ssl_server_handshaker_factory *self =
+ (tsi_ssl_server_handshaker_factory *)factory;
size_t i;
for (i = 0; i < self->ssl_context_count; i++) {
if (self->ssl_contexts[i] != NULL) {
@@ -1263,6 +1340,9 @@ static int server_handshaker_factory_npn_advertised_callback(
/* --- tsi_ssl_handshaker_factory constructors. --- */
+static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = {
+ tsi_ssl_client_handshaker_factory_destroy};
+
tsi_result tsi_create_ssl_client_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair,
const char *pem_root_certs, const char *cipher_suites,
@@ -1284,7 +1364,10 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_INVALID_ARGUMENT;
}
- impl = gpr_zalloc(sizeof(*impl));
+ impl = (tsi_ssl_client_handshaker_factory *)gpr_zalloc(sizeof(*impl));
+ tsi_ssl_handshaker_factory_init(&impl->base);
+ impl->base.vtable = &client_handshaker_factory_vtable;
+
impl->ssl_context = ssl_context;
do {
@@ -1322,7 +1405,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
} while (0);
if (result != TSI_OK) {
- tsi_ssl_client_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL);
@@ -1332,6 +1415,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_OK;
}
+static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = {
+ tsi_ssl_server_handshaker_factory_destroy};
+
tsi_result tsi_create_ssl_server_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, const char *pem_client_root_certs,
@@ -1363,13 +1449,17 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
return TSI_INVALID_ARGUMENT;
}
- impl = gpr_zalloc(sizeof(*impl));
- impl->ssl_contexts = gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *));
+ impl = (tsi_ssl_server_handshaker_factory *)gpr_zalloc(sizeof(*impl));
+ tsi_ssl_handshaker_factory_init(&impl->base);
+ impl->base.vtable = &server_handshaker_factory_vtable;
+
+ impl->ssl_contexts =
+ (SSL_CTX **)gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
- gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
+ (tsi_peer *)gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
impl->ssl_context_x509_subject_names == NULL) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return TSI_OUT_OF_RESOURCES;
}
impl->ssl_context_count = num_key_cert_pairs;
@@ -1379,7 +1469,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
&impl->alpn_protocol_list,
&impl->alpn_protocol_list_length);
if (result != TSI_OK) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
@@ -1451,10 +1541,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
} while (0);
if (result != TSI_OK) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
+
*factory = impl;
return TSI_OK;
}
@@ -1501,3 +1592,15 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
return 0; /* Not found. */
}
+
+/* --- Testing support. --- */
+const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory *factory,
+ tsi_ssl_handshaker_factory_vtable *new_vtable) {
+ GPR_ASSERT(factory != NULL);
+ GPR_ASSERT(factory->vtable != NULL);
+
+ const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable;
+ factory->vtable = new_vtable;
+ return orig_vtable;
+}
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index 177599930b..3abfdf5ed8 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -96,10 +96,10 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker);
-/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
- while handshakers created with this factory are still in use. */
-void tsi_ssl_client_handshaker_factory_destroy(
- tsi_ssl_client_handshaker_factory *self);
+/* Decrements reference count of the handshaker factory. Handshaker factory will
+ * be destroyed once no references exist. */
+void tsi_ssl_client_handshaker_factory_unref(
+ tsi_ssl_client_handshaker_factory *factory);
/* --- tsi_ssl_server_handshaker_factory object ---
@@ -158,9 +158,9 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker);
-/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
- while handshakers created with this factory are still in use. */
-void tsi_ssl_server_handshaker_factory_destroy(
+/* Decrements reference count of the handshaker factory. Handshaker factory will
+ * be destroyed once no references exist. */
+void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self);
/* Util that checks that an ssl peer matches a specific name.
@@ -170,6 +170,29 @@ void tsi_ssl_server_handshaker_factory_destroy(
- handle public suffix wildchar more strictly (e.g. *.co.uk) */
int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
+/* --- Testing support. ---
+
+ These functions and typedefs are not intended to be used outside of testing.
+ */
+
+/* Base type of client and server handshaker factories. */
+typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
+
+/* Function pointer to handshaker_factory destructor. */
+typedef void (*tsi_ssl_handshaker_factory_destructor)(
+ tsi_ssl_handshaker_factory *factory);
+
+/* Virtual table for tsi_ssl_handshaker_factory. */
+typedef struct {
+ tsi_ssl_handshaker_factory_destructor destroy;
+} tsi_ssl_handshaker_factory_vtable;
+
+/* Set destructor of handshaker_factory to new_destructor, returns previous
+ destructor. */
+const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory *factory,
+ tsi_ssl_handshaker_factory_vtable *new_vtable);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/core/tsi/ssl_types.h b/src/core/tsi/ssl_types.h
index 3788643355..e0e967034b 100644
--- a/src/core/tsi/ssl_types.h
+++ b/src/core/tsi/ssl_types.h
@@ -19,6 +19,10 @@
#ifndef GRPC_CORE_TSI_SSL_TYPES_H
#define GRPC_CORE_TSI_SSL_TYPES_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* A collection of macros to cast between various integer types that are
* used differently between BoringSSL and OpenSSL:
* TSI_INT_AS_SIZE(x): convert 'int x' to a length parameter for an OpenSSL
@@ -37,4 +41,8 @@
#define TSI_SIZE_AS_SIZE(x) ((int)(x))
#endif
+#ifdef __cplusplus
+}
+#endif
+
#endif /* GRPC_CORE_TSI_SSL_TYPES_H */
diff --git a/src/core/tsi/transport_security.c b/src/core/tsi/transport_security.cc
index 76213072a3..21bd8eba78 100644
--- a/src/core/tsi/transport_security.c
+++ b/src/core/tsi/transport_security.cc
@@ -282,7 +282,7 @@ tsi_result tsi_construct_allocated_string_peer_property(
*property = tsi_init_peer_property();
if (name != NULL) property->name = gpr_strdup(name);
if (value_length > 0) {
- property->value.data = gpr_zalloc(value_length);
+ property->value.data = (char *)gpr_zalloc(value_length);
property->value.length = value_length;
}
return TSI_OK;
@@ -310,7 +310,8 @@ tsi_result tsi_construct_string_peer_property(const char *name,
tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer) {
memset(peer, 0, sizeof(tsi_peer));
if (property_count > 0) {
- peer->properties = gpr_zalloc(property_count * sizeof(tsi_peer_property));
+ peer->properties = (tsi_peer_property *)gpr_zalloc(
+ property_count * sizeof(tsi_peer_property));
peer->property_count = property_count;
}
return TSI_OK;
diff --git a/src/core/tsi/transport_security.h b/src/core/tsi/transport_security.h
index b0d7039850..3bba38149c 100644
--- a/src/core/tsi/transport_security.h
+++ b/src/core/tsi/transport_security.h
@@ -84,11 +84,17 @@ struct tsi_handshaker {
};
/* Base for tsi_handshaker_result implementations.
- See transport_security_interface.h for documentation. */
+ See transport_security_interface.h for documentation.
+ The exec_ctx parameter in create_zero_copy_grpc_protector is supposed to be
+ of type grpc_exec_ctx*, but we're using void* instead to avoid making the TSI
+ API depend on grpc. The create_zero_copy_grpc_protector() method is only used
+ in grpc, where we do need the exec_ctx passed through, but the API still
+ needs to compile in other applications, where grpc_exec_ctx is not defined.
+*/
typedef struct {
tsi_result (*extract_peer)(const tsi_handshaker_result *self, tsi_peer *peer);
tsi_result (*create_zero_copy_grpc_protector)(
- const tsi_handshaker_result *self,
+ void *exec_ctx, const tsi_handshaker_result *self,
size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector);
tsi_result (*create_frame_protector)(const tsi_handshaker_result *self,
diff --git a/src/core/tsi/transport_security_adapter.c b/src/core/tsi/transport_security_adapter.cc
index 1c2a57b3bd..e399e42758 100644
--- a/src/core/tsi/transport_security_adapter.c
+++ b/src/core/tsi/transport_security_adapter.cc
@@ -80,12 +80,13 @@ static tsi_result tsi_adapter_create_handshaker_result(
if (wrapped == NULL || (unused_bytes_size > 0 && unused_bytes == NULL)) {
return TSI_INVALID_ARGUMENT;
}
- tsi_adapter_handshaker_result *impl = gpr_zalloc(sizeof(*impl));
+ tsi_adapter_handshaker_result *impl =
+ (tsi_adapter_handshaker_result *)gpr_zalloc(sizeof(*impl));
impl->base.vtable = &result_vtable;
impl->wrapped = wrapped;
impl->unused_bytes_size = unused_bytes_size;
if (unused_bytes_size > 0) {
- impl->unused_bytes = gpr_malloc(unused_bytes_size);
+ impl->unused_bytes = (unsigned char *)gpr_malloc(unused_bytes_size);
memcpy(impl->unused_bytes, unused_bytes, unused_bytes_size);
} else {
impl->unused_bytes = NULL;
@@ -172,8 +173,8 @@ static tsi_result adapter_next(
offset += to_send_size;
if (status == TSI_INCOMPLETE_DATA) {
impl->adapter_buffer_size *= 2;
- impl->adapter_buffer =
- gpr_realloc(impl->adapter_buffer, impl->adapter_buffer_size);
+ impl->adapter_buffer = (unsigned char *)gpr_realloc(
+ impl->adapter_buffer, impl->adapter_buffer_size);
}
} while (status == TSI_INCOMPLETE_DATA);
if (status != TSI_OK) return status;
@@ -209,11 +210,12 @@ static const tsi_handshaker_vtable handshaker_vtable = {
tsi_handshaker *tsi_create_adapter_handshaker(tsi_handshaker *wrapped) {
GPR_ASSERT(wrapped != NULL);
- tsi_adapter_handshaker *impl = gpr_zalloc(sizeof(*impl));
+ tsi_adapter_handshaker *impl =
+ (tsi_adapter_handshaker *)gpr_zalloc(sizeof(*impl));
impl->base.vtable = &handshaker_vtable;
impl->wrapped = wrapped;
impl->adapter_buffer_size = TSI_ADAPTER_INITIAL_BUFFER_SIZE;
- impl->adapter_buffer = gpr_malloc(impl->adapter_buffer_size);
+ impl->adapter_buffer = (unsigned char *)gpr_malloc(impl->adapter_buffer_size);
return &impl->base;
}
diff --git a/src/core/tsi/transport_security_grpc.c b/src/core/tsi/transport_security_grpc.cc
index 5bcfdfa61f..affd995230 100644
--- a/src/core/tsi/transport_security_grpc.c
+++ b/src/core/tsi/transport_security_grpc.cc
@@ -20,16 +20,18 @@
/* This method creates a tsi_zero_copy_grpc_protector object. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
+ grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
+ size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector) {
- if (self == NULL || self->vtable == NULL || protector == NULL) {
+ if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
+ protector == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->create_zero_copy_grpc_protector == NULL) {
return TSI_UNIMPLEMENTED;
}
return self->vtable->create_zero_copy_grpc_protector(
- self, max_output_protected_frame_size, protector);
+ exec_ctx, self, max_output_protected_frame_size, protector);
}
/* --- tsi_zero_copy_grpc_protector common implementation. ---
@@ -37,28 +39,33 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
Calls specific implementation after state/input validation. */
tsi_result tsi_zero_copy_grpc_protector_protect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices,
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *unprotected_slices,
grpc_slice_buffer *protected_slices) {
- if (self == NULL || self->vtable == NULL || unprotected_slices == NULL ||
- protected_slices == NULL) {
+ if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
+ unprotected_slices == NULL || protected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->protect == NULL) return TSI_UNIMPLEMENTED;
- return self->vtable->protect(self, unprotected_slices, protected_slices);
+ return self->vtable->protect(exec_ctx, self, unprotected_slices,
+ protected_slices);
}
tsi_result tsi_zero_copy_grpc_protector_unprotect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices,
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *protected_slices,
grpc_slice_buffer *unprotected_slices) {
- if (self == NULL || self->vtable == NULL || protected_slices == NULL ||
- unprotected_slices == NULL) {
+ if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
+ protected_slices == NULL || unprotected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
}
if (self->vtable->unprotect == NULL) return TSI_UNIMPLEMENTED;
- return self->vtable->unprotect(self, protected_slices, unprotected_slices);
+ return self->vtable->unprotect(exec_ctx, self, protected_slices,
+ unprotected_slices);
}
-void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector *self) {
+void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self) {
if (self == NULL) return;
- self->vtable->destroy(self);
+ self->vtable->destroy(exec_ctx, self);
}
diff --git a/src/core/tsi/transport_security_grpc.h b/src/core/tsi/transport_security_grpc.h
index 5ab5297cc4..ca6755c12f 100644
--- a/src/core/tsi/transport_security_grpc.h
+++ b/src/core/tsi/transport_security_grpc.h
@@ -30,7 +30,8 @@ extern "C" {
assuming there is no fatal error.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
+ grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
+ size_t *max_output_protected_frame_size,
tsi_zero_copy_grpc_protector **protector);
/* -- tsi_zero_copy_grpc_protector object -- */
@@ -42,8 +43,8 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- This method returns TSI_OK in case of success or a specific error code in
case of failure. */
tsi_result tsi_zero_copy_grpc_protector_protect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices,
- grpc_slice_buffer *protected_slices);
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices);
/* Outputs unprotected bytes.
- protected_slices is the bytes of protected frames.
@@ -52,21 +53,24 @@ tsi_result tsi_zero_copy_grpc_protector_protect(
there is not enough data to output in which case unprotected_slices has 0
bytes. */
tsi_result tsi_zero_copy_grpc_protector_unprotect(
- tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices,
- grpc_slice_buffer *unprotected_slices);
+ grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
+ grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices);
/* Destroys the tsi_zero_copy_grpc_protector object. */
-void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector *self);
+void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self);
/* Base for tsi_zero_copy_grpc_protector implementations. */
typedef struct {
- tsi_result (*protect)(tsi_zero_copy_grpc_protector *self,
+ tsi_result (*protect)(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self,
grpc_slice_buffer *unprotected_slices,
grpc_slice_buffer *protected_slices);
- tsi_result (*unprotect)(tsi_zero_copy_grpc_protector *self,
+ tsi_result (*unprotect)(grpc_exec_ctx *exec_ctx,
+ tsi_zero_copy_grpc_protector *self,
grpc_slice_buffer *protected_slices,
grpc_slice_buffer *unprotected_slices);
- void (*destroy)(tsi_zero_copy_grpc_protector *self);
+ void (*destroy)(grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self);
} tsi_zero_copy_grpc_protector_vtable;
struct tsi_zero_copy_grpc_protector {
diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc
index f2d9bb07c9..9df531066e 100644
--- a/src/cpp/client/channel_cc.cc
+++ b/src/cpp/client/channel_cc.cc
@@ -18,7 +18,10 @@
#include <grpc++/channel.h>
+#include <chrono>
+#include <condition_variable>
#include <memory>
+#include <mutex>
#include <grpc++/client_context.h>
#include <grpc++/completion_queue.h>
@@ -35,7 +38,13 @@
#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/support/env.h"
+#include "src/core/lib/support/string.h"
namespace grpc {
@@ -76,8 +85,9 @@ grpc::string Channel::GetServiceConfigJSON() const {
&channel_info.service_config_json);
}
-Call Channel::CreateCall(const RpcMethod& method, ClientContext* context,
- CompletionQueue* cq) {
+internal::Call Channel::CreateCall(const internal::RpcMethod& method,
+ ClientContext* context,
+ CompletionQueue* cq) {
const bool kRegistered = method.channel_tag() && context->authority().empty();
grpc_call* c_call = NULL;
if (kRegistered) {
@@ -109,10 +119,11 @@ Call Channel::CreateCall(const RpcMethod& method, ClientContext* context,
}
grpc_census_call_set_context(c_call, context->census_context());
context->set_call(c_call, shared_from_this());
- return Call(c_call, this, cq);
+ return internal::Call(c_call, this, cq);
}
-void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
+void Channel::PerformOpsOnCall(internal::CallOpSetInterface* ops,
+ internal::Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
@@ -131,7 +142,8 @@ grpc_connectivity_state Channel::GetState(bool try_to_connect) {
}
namespace {
-class TagSaver final : public CompletionQueueTag {
+
+class TagSaver final : public internal::CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
~TagSaver() override {}
diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc
index 3af8bdc11a..40e95f3c05 100644
--- a/src/cpp/client/client_context.cc
+++ b/src/cpp/client/client_context.cc
@@ -96,7 +96,7 @@ void ClientContext::set_call(grpc_call* call,
void ClientContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) {
- char* algorithm_name = nullptr;
+ const char* algorithm_name = nullptr;
if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
algorithm);
diff --git a/src/cpp/client/create_channel.cc b/src/cpp/client/create_channel.cc
index e2893c8f3c..de67281dd4 100644
--- a/src/cpp/client/create_channel.cc
+++ b/src/cpp/client/create_channel.cc
@@ -38,8 +38,7 @@ std::shared_ptr<Channel> CreateCustomChannel(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args) {
- internal::GrpcLibrary
- init_lib; // We need to call init in case of a bad creds.
+ GrpcLibraryCodegen init_lib; // We need to call init in case of a bad creds.
return creds
? creds->CreateChannel(target, args)
: CreateChannelInternal("", grpc_lame_client_channel_create(
diff --git a/src/cpp/client/generic_stub.cc b/src/cpp/client/generic_stub.cc
index 66b1ef0e39..fc18ce9093 100644
--- a/src/cpp/client/generic_stub.cc
+++ b/src/cpp/client/generic_stub.cc
@@ -22,14 +22,41 @@
namespace grpc {
+namespace {
+std::unique_ptr<GenericClientAsyncReaderWriter> CallInternal(
+ ChannelInterface* channel, ClientContext* context,
+ const grpc::string& method, CompletionQueue* cq, bool start, void* tag) {
+ return std::unique_ptr<GenericClientAsyncReaderWriter>(
+ internal::ClientAsyncReaderWriterFactory<ByteBuffer, ByteBuffer>::Create(
+ channel, cq, internal::RpcMethod(method.c_str(),
+ internal::RpcMethod::BIDI_STREAMING),
+ context, start, tag));
+}
+
+} // namespace
+
// begin a call to a named method
std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::Call(
ClientContext* context, const grpc::string& method, CompletionQueue* cq,
void* tag) {
- return std::unique_ptr<GenericClientAsyncReaderWriter>(
- GenericClientAsyncReaderWriter::Create(
+ return CallInternal(channel_.get(), context, method, cq, true, tag);
+}
+
+// setup a call to a named method
+std::unique_ptr<GenericClientAsyncReaderWriter> GenericStub::PrepareCall(
+ ClientContext* context, const grpc::string& method, CompletionQueue* cq) {
+ return CallInternal(channel_.get(), context, method, cq, false, nullptr);
+}
+
+// setup a unary call to a named method
+std::unique_ptr<GenericClientAsyncResponseReader> GenericStub::PrepareUnaryCall(
+ ClientContext* context, const grpc::string& method,
+ const ByteBuffer& request, CompletionQueue* cq) {
+ return std::unique_ptr<GenericClientAsyncResponseReader>(
+ internal::ClientAsyncResponseReaderFactory<ByteBuffer>::Create(
channel_.get(), cq,
- RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING), context, tag));
+ internal::RpcMethod(method.c_str(), internal::RpcMethod::NORMAL_RPC),
+ context, request, false));
}
} // namespace grpc
diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc
index 057a058a3f..13bbc3075d 100644
--- a/src/cpp/client/secure_credentials.cc
+++ b/src/cpp/client/secure_credentials.cc
@@ -21,6 +21,7 @@
#include <grpc++/impl/grpc_library.h>
#include <grpc++/support/channel_arguments.h>
#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
#include "src/cpp/client/create_channel_internal.h"
#include "src/cpp/common/secure_auth_context.h"
@@ -150,6 +151,18 @@ std::shared_ptr<ChannelCredentials> CompositeChannelCredentials(
return nullptr;
}
+std::shared_ptr<CallCredentials> CompositeCallCredentials(
+ const std::shared_ptr<CallCredentials>& creds1,
+ const std::shared_ptr<CallCredentials>& creds2) {
+ SecureCallCredentials* s_creds1 = creds1->AsSecureCredentials();
+ SecureCallCredentials* s_creds2 = creds2->AsSecureCredentials();
+ if (s_creds1 != nullptr && s_creds2 != nullptr) {
+ return WrapCallCredentials(grpc_composite_call_credentials_create(
+ s_creds1->GetRawCreds(), s_creds2->GetRawCreds(), nullptr));
+ }
+ return nullptr;
+}
+
void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
if (wrapper == nullptr) return;
MetadataCredentialsPluginWrapper* w =
@@ -157,28 +170,50 @@ void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
delete w;
}
-void MetadataCredentialsPluginWrapper::GetMetadata(
+int MetadataCredentialsPluginWrapper::GetMetadata(
void* wrapper, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void* user_data) {
+ grpc_credentials_plugin_metadata_cb cb, void* user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t* num_creds_md, grpc_status_code* status,
+ const char** error_details) {
GPR_ASSERT(wrapper);
MetadataCredentialsPluginWrapper* w =
reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
if (!w->plugin_) {
- cb(user_data, NULL, 0, GRPC_STATUS_OK, NULL);
- return;
+ *num_creds_md = 0;
+ *status = GRPC_STATUS_OK;
+ *error_details = nullptr;
+ return true;
}
if (w->plugin_->IsBlocking()) {
+ // Asynchronous return.
w->thread_pool_->Add(
std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context,
- cb, user_data));
+ cb, user_data, nullptr, nullptr, nullptr, nullptr));
+ return 0;
} else {
- w->InvokePlugin(context, cb, user_data);
+ // Synchronous return.
+ w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status,
+ error_details);
+ return 1;
+ }
+}
+
+namespace {
+
+void UnrefMetadata(const std::vector<grpc_metadata>& md) {
+ for (auto it = md.begin(); it != md.end(); ++it) {
+ grpc_slice_unref(it->key);
+ grpc_slice_unref(it->value);
}
}
+} // namespace
+
void MetadataCredentialsPluginWrapper::InvokePlugin(
grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb,
- void* user_data) {
+ void* user_data, grpc_metadata creds_md[4], size_t* num_creds_md,
+ grpc_status_code* status_code, const char** error_details) {
std::multimap<grpc::string, grpc::string> metadata;
// const_cast is safe since the SecureAuthContext does not take owndership and
@@ -196,12 +231,31 @@ void MetadataCredentialsPluginWrapper::InvokePlugin(
md_entry.flags = 0;
md.push_back(md_entry);
}
- cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
- static_cast<grpc_status_code>(status.error_code()),
- status.error_message().c_str());
- for (auto it = md.begin(); it != md.end(); ++it) {
- grpc_slice_unref(it->key);
- grpc_slice_unref(it->value);
+ if (creds_md != nullptr) {
+ // Synchronous return.
+ if (md.size() > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
+ *num_creds_md = 0;
+ *status_code = GRPC_STATUS_INTERNAL;
+ *error_details = gpr_strdup(
+ "blocking plugin credentials returned too many metadata keys");
+ UnrefMetadata(md);
+ } else {
+ for (const auto& elem : md) {
+ creds_md[*num_creds_md].key = elem.key;
+ creds_md[*num_creds_md].value = elem.value;
+ creds_md[*num_creds_md].flags = elem.flags;
+ ++(*num_creds_md);
+ }
+ *status_code = static_cast<grpc_status_code>(status.error_code());
+ *error_details =
+ status.ok() ? nullptr : gpr_strdup(status.error_message().c_str());
+ }
+ } else {
+ // Asynchronous return.
+ cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
+ static_cast<grpc_status_code>(status.error_code()),
+ status.error_message().c_str());
+ UnrefMetadata(md);
}
}
diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h
index 66547c736b..ed9afb37b0 100644
--- a/src/cpp/client/secure_credentials.h
+++ b/src/cpp/client/secure_credentials.h
@@ -58,16 +58,23 @@ class SecureCallCredentials final : public CallCredentials {
class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen {
public:
static void Destroy(void* wrapper);
- static void GetMetadata(void* wrapper, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb,
- void* user_data);
+ static int GetMetadata(
+ void* wrapper, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void* user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t* num_creds_md, grpc_status_code* status,
+ const char** error_details);
explicit MetadataCredentialsPluginWrapper(
std::unique_ptr<MetadataCredentialsPlugin> plugin);
private:
- void InvokePlugin(grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void* user_data);
+ void InvokePlugin(
+ grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void* user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t* num_creds_md, grpc_status_code* status_code,
+ const char** error_details);
std::unique_ptr<ThreadPoolInterface> thread_pool_;
std::unique_ptr<MetadataCredentialsPlugin> plugin_;
};
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index f130aecd4b..f89f5f1f03 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -86,6 +86,10 @@ void ChannelArguments::SetCompressionAlgorithm(
SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm);
}
+void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) {
+ SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout);
+}
+
void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
if (!mutator) {
return;
diff --git a/src/cpp/common/channel_filter.cc b/src/cpp/common/channel_filter.cc
index 448d9fbcf2..ea44cff832 100644
--- a/src/cpp/common/channel_filter.cc
+++ b/src/cpp/common/channel_filter.cc
@@ -68,10 +68,6 @@ void CallData::SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_stack_ignore_set_pollset_or_pollset_set(exec_ctx, elem, pollent);
}
-char *CallData::GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- return grpc_call_next_get_peer(exec_ctx, elem);
-}
-
// internal code used by RegisterChannelFilter()
namespace internal {
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index c3d187d7e1..c1aeb3f724 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -268,9 +268,6 @@ class CallData {
virtual void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent);
-
- /// Gets the peer name.
- virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem);
};
namespace internal {
@@ -349,11 +346,6 @@ class ChannelFilter final {
CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent);
}
-
- static char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- CallDataType *call_data = reinterpret_cast<CallDataType *>(elem->call_data);
- return call_data->GetPeer(exec_ctx, elem);
- }
};
struct FilterRecord {
@@ -396,8 +388,7 @@ void RegisterChannelFilter(
FilterType::call_data_size, FilterType::InitCallElement,
FilterType::SetPollsetOrPollsetSet, FilterType::DestroyCallElement,
FilterType::channel_data_size, FilterType::InitChannelElement,
- FilterType::DestroyChannelElement, FilterType::GetPeer,
- FilterType::GetChannelInfo, name}};
+ FilterType::DestroyChannelElement, FilterType::GetChannelInfo, name}};
internal::channel_filters->push_back(filter_record);
}
diff --git a/src/cpp/common/completion_queue_cc.cc b/src/cpp/common/completion_queue_cc.cc
index f34b0f3d58..eb6dc8cc5f 100644
--- a/src/cpp/common/completion_queue_cc.cc
+++ b/src/cpp/common/completion_queue_cc.cc
@@ -60,7 +60,7 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
case GRPC_QUEUE_SHUTDOWN:
return SHUTDOWN;
case GRPC_OP_COMPLETE:
- auto cq_tag = static_cast<CompletionQueueTag*>(ev.tag);
+ auto cq_tag = static_cast<internal::CompletionQueueTag*>(ev.tag);
*ok = ev.success != 0;
*tag = cq_tag;
if (cq_tag->FinalizeResult(tag, ok)) {
@@ -71,4 +71,29 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
}
}
+CompletionQueue::CompletionQueueTLSCache::CompletionQueueTLSCache(
+ CompletionQueue* cq)
+ : cq_(cq), flushed_(false) {
+ grpc_completion_queue_thread_local_cache_init(cq_->cq_);
+}
+
+CompletionQueue::CompletionQueueTLSCache::~CompletionQueueTLSCache() {
+ GPR_ASSERT(flushed_);
+}
+
+bool CompletionQueue::CompletionQueueTLSCache::Flush(void** tag, bool* ok) {
+ int res = 0;
+ void* res_tag;
+ flushed_ = true;
+ if (grpc_completion_queue_thread_local_cache_flush(cq_->cq_, &res_tag,
+ &res)) {
+ auto cq_tag = static_cast<internal::CompletionQueueTag*>(res_tag);
+ *ok = res == 1;
+ if (cq_tag->FinalizeResult(tag, ok)) {
+ return true;
+ }
+ }
+ return false;
+}
+
} // namespace grpc
diff --git a/src/cpp/common/version_cc.cc b/src/cpp/common/version_cc.cc
index 2e9a51316d..8049cbe0c9 100644
--- a/src/cpp/common/version_cc.cc
+++ b/src/cpp/common/version_cc.cc
@@ -22,5 +22,5 @@
#include <grpc++/grpc++.h>
namespace grpc {
-grpc::string Version() { return "1.7.0-dev"; }
+grpc::string Version() { return "1.8.0-dev"; }
}
diff --git a/src/cpp/server/health/default_health_check_service.cc b/src/cpp/server/health/default_health_check_service.cc
index 815b607032..10dbd3c39a 100644
--- a/src/cpp/server/health/default_health_check_service.cc
+++ b/src/cpp/server/health/default_health_check_service.cc
@@ -20,6 +20,7 @@
#include <mutex>
#include <grpc++/impl/codegen/method_handler_impl.h>
+#include <grpc/slice.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -36,11 +37,12 @@ const char kHealthCheckMethodName[] = "/grpc.health.v1.Health/Check";
DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl(
DefaultHealthCheckService* service)
: service_(service), method_(nullptr) {
- MethodHandler* handler =
- new RpcMethodHandler<HealthCheckServiceImpl, ByteBuffer, ByteBuffer>(
+ internal::MethodHandler* handler =
+ new internal::RpcMethodHandler<HealthCheckServiceImpl, ByteBuffer,
+ ByteBuffer>(
std::mem_fn(&HealthCheckServiceImpl::Check), this);
- method_ = new RpcServiceMethod(kHealthCheckMethodName, RpcMethod::NORMAL_RPC,
- handler);
+ method_ = new internal::RpcServiceMethod(
+ kHealthCheckMethodName, internal::RpcMethod::NORMAL_RPC, handler);
AddMethod(method_);
}
diff --git a/src/cpp/server/health/default_health_check_service.h b/src/cpp/server/health/default_health_check_service.h
index 09d5cebe98..99d6680c50 100644
--- a/src/cpp/server/health/default_health_check_service.h
+++ b/src/cpp/server/health/default_health_check_service.h
@@ -41,7 +41,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
private:
const DefaultHealthCheckService* const service_;
- RpcServiceMethod* method_;
+ internal::RpcServiceMethod* method_;
};
DefaultHealthCheckService();
diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc
index 6bd3ecda32..6480482774 100644
--- a/src/cpp/server/server_cc.cc
+++ b/src/cpp/server/server_cc.cc
@@ -90,7 +90,8 @@ class Server::UnimplementedAsyncRequest final
ServerCompletionQueue* const cq_;
};
-typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
+typedef internal::SneakyCallOpSet<internal::CallOpSendInitialMetadata,
+ internal::CallOpServerSendStatus>
UnimplementedAsyncResponseOp;
class Server::UnimplementedAsyncResponse final
: public UnimplementedAsyncResponseOp {
@@ -108,12 +109,12 @@ class Server::UnimplementedAsyncResponse final
UnimplementedAsyncRequest* const request_;
};
-class ShutdownTag : public CompletionQueueTag {
+class ShutdownTag : public internal::CompletionQueueTag {
public:
bool FinalizeResult(void** tag, bool* status) { return false; }
};
-class DummyTag : public CompletionQueueTag {
+class DummyTag : public internal::CompletionQueueTag {
public:
bool FinalizeResult(void** tag, bool* status) {
*status = true;
@@ -121,15 +122,15 @@ class DummyTag : public CompletionQueueTag {
}
};
-class Server::SyncRequest final : public CompletionQueueTag {
+class Server::SyncRequest final : public internal::CompletionQueueTag {
public:
- SyncRequest(RpcServiceMethod* method, void* tag)
+ SyncRequest(internal::RpcServiceMethod* method, void* tag)
: method_(method),
tag_(tag),
in_flight_(false),
- has_request_payload_(method->method_type() == RpcMethod::NORMAL_RPC ||
- method->method_type() ==
- RpcMethod::SERVER_STREAMING),
+ has_request_payload_(
+ method->method_type() == internal::RpcMethod::NORMAL_RPC ||
+ method->method_type() == internal::RpcMethod::SERVER_STREAMING),
call_details_(nullptr),
cq_(nullptr) {
grpc_metadata_array_init(&request_metadata_);
@@ -212,14 +213,14 @@ class Server::SyncRequest final : public CompletionQueueTag {
void Run(std::shared_ptr<GlobalCallbacks> global_callbacks) {
ctx_.BeginCompletionOp(&call_);
global_callbacks->PreSynchronousRequest(&ctx_);
- method_->handler()->RunHandler(
- MethodHandler::HandlerParameter(&call_, &ctx_, request_payload_));
+ method_->handler()->RunHandler(internal::MethodHandler::HandlerParameter(
+ &call_, &ctx_, request_payload_));
global_callbacks->PostSynchronousRequest(&ctx_);
request_payload_ = nullptr;
cq_.Shutdown();
- CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
+ internal::CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
/* Ensure the cq_ is shutdown */
@@ -229,15 +230,15 @@ class Server::SyncRequest final : public CompletionQueueTag {
private:
CompletionQueue cq_;
- Call call_;
+ internal::Call call_;
ServerContext ctx_;
const bool has_request_payload_;
grpc_byte_buffer* request_payload_;
- RpcServiceMethod* const method_;
+ internal::RpcServiceMethod* const method_;
};
private:
- RpcServiceMethod* const method_;
+ internal::RpcServiceMethod* const method_;
void* const tag_;
bool in_flight_;
const bool has_request_payload_;
@@ -266,8 +267,11 @@ class Server::SyncRequestThreadManager : public ThreadManager {
WorkStatus PollForWork(void** tag, bool* ok) override {
*tag = nullptr;
+ // TODO(ctiller): workaround for GPR_TIMESPAN based deadlines not working
+ // right now
gpr_timespec deadline =
- gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN);
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN));
switch (server_cq_->AsyncNext(tag, ok, deadline)) {
case CompletionQueue::TIMEOUT:
@@ -308,14 +312,15 @@ class Server::SyncRequestThreadManager : public ThreadManager {
// object
}
- void AddSyncMethod(RpcServiceMethod* method, void* tag) {
+ void AddSyncMethod(internal::RpcServiceMethod* method, void* tag) {
sync_requests_.emplace_back(new SyncRequest(method, tag));
}
void AddUnknownSyncMethod() {
if (!sync_requests_.empty()) {
- unknown_method_.reset(new RpcServiceMethod(
- "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
+ unknown_method_.reset(new internal::RpcServiceMethod(
+ "unknown", internal::RpcMethod::BIDI_STREAMING,
+ new internal::UnknownMethodHandler));
sync_requests_.emplace_back(
new SyncRequest(unknown_method_.get(), nullptr));
}
@@ -352,8 +357,8 @@ class Server::SyncRequestThreadManager : public ThreadManager {
CompletionQueue* server_cq_;
int cq_timeout_msec_;
std::vector<std::unique_ptr<SyncRequest>> sync_requests_;
- std::unique_ptr<RpcServiceMethod> unknown_method_;
- std::unique_ptr<RpcServiceMethod> health_check_;
+ std::unique_ptr<internal::RpcServiceMethod> unknown_method_;
+ std::unique_ptr<internal::RpcServiceMethod> health_check_;
std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
};
@@ -436,13 +441,13 @@ std::shared_ptr<Channel> Server::InProcessChannel(
}
static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
- RpcServiceMethod* method) {
+ internal::RpcServiceMethod* method) {
switch (method->method_type()) {
- case RpcMethod::NORMAL_RPC:
- case RpcMethod::SERVER_STREAMING:
+ case internal::RpcMethod::NORMAL_RPC:
+ case internal::RpcMethod::SERVER_STREAMING:
return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
- case RpcMethod::CLIENT_STREAMING:
- case RpcMethod::BIDI_STREAMING:
+ case internal::RpcMethod::CLIENT_STREAMING:
+ case internal::RpcMethod::BIDI_STREAMING:
return GRPC_SRM_PAYLOAD_NONE;
}
GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
@@ -463,7 +468,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
continue;
}
- RpcServiceMethod* method = it->get();
+ internal::RpcServiceMethod* method = it->get();
void* tag = grpc_server_register_method(
server_, method->name(), host ? host->c_str() : nullptr,
PayloadHandlingForMethod(method), 0);
@@ -603,7 +608,8 @@ void Server::Wait() {
}
}
-void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
+void Server::PerformOpsOnCall(internal::CallOpSetInterface* ops,
+ internal::Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
@@ -619,8 +625,8 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
ServerInterface* server, ServerContext* context,
- ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag,
- bool delete_on_finalize)
+ internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
+ void* tag, bool delete_on_finalize)
: server_(server),
context_(context),
stream_(stream),
@@ -642,7 +648,8 @@ bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
}
context_->set_call(call_);
context_->cq_ = call_cq_;
- Call call(call_, server_, call_cq_, server_->max_receive_message_size());
+ internal::Call call(call_, server_, call_cq_,
+ server_->max_receive_message_size());
if (*status && call_) {
context_->BeginCompletionOp(&call);
}
@@ -657,7 +664,8 @@ bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
ServerInterface* server, ServerContext* context,
- ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag)
+ internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
+ void* tag)
: BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
void ServerInterface::RegisteredAsyncRequest::IssueRequest(
@@ -672,7 +680,7 @@ void ServerInterface::RegisteredAsyncRequest::IssueRequest(
ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
ServerInterface* server, GenericServerContext* context,
- ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
+ internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
: BaseAsyncRequest(server, context, stream, call_cq, tag,
delete_on_finalize) {
@@ -715,7 +723,7 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
UnimplementedAsyncRequest* request)
: request_(request) {
Status status(StatusCode::UNIMPLEMENTED, "");
- UnknownMethodHandler::FillOps(request_->context(), this);
+ internal::UnknownMethodHandler::FillOps(request_->context(), this);
request_->stream()->call_.PerformOps(this);
}
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index 4913682f1d..f2cb6363f5 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -37,7 +37,7 @@ namespace grpc {
// CompletionOp
-class ServerContext::CompletionOp final : public CallOpSetInterface {
+class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
public:
// initial refs: one in the server context, one in the cq
CompletionOp()
@@ -146,7 +146,7 @@ ServerContext::~ServerContext() {
}
}
-void ServerContext::BeginCompletionOp(Call* call) {
+void ServerContext::BeginCompletionOp(internal::Call* call) {
GPR_ASSERT(!completion_op_);
completion_op_ = new CompletionOp();
if (has_notify_when_done_tag_) {
@@ -155,8 +155,8 @@ void ServerContext::BeginCompletionOp(Call* call) {
call->PerformOps(completion_op_);
}
-CompletionQueueTag* ServerContext::GetCompletionOpTag() {
- return static_cast<CompletionQueueTag*>(completion_op_);
+internal::CompletionQueueTag* ServerContext::GetCompletionOpTag() {
+ return static_cast<internal::CompletionQueueTag*>(completion_op_);
}
void ServerContext::AddInitialMetadata(const grpc::string& key,
@@ -190,7 +190,7 @@ bool ServerContext::IsCancelled() const {
void ServerContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) {
- char* algorithm_name = NULL;
+ const char* algorithm_name = NULL;
if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
algorithm);
diff --git a/src/cpp/util/byte_buffer_cc.cc b/src/cpp/util/byte_buffer_cc.cc
index b1ff25252a..180c813762 100644
--- a/src/cpp/util/byte_buffer_cc.cc
+++ b/src/cpp/util/byte_buffer_cc.cc
@@ -16,11 +16,15 @@
*
*/
+#include <grpc++/impl/grpc_library.h>
#include <grpc++/support/byte_buffer.h>
+#include <grpc/byte_buffer.h>
#include <grpc/byte_buffer_reader.h>
namespace grpc {
+static internal::GrpcLibraryInitializer g_gli_initializer;
+
ByteBuffer::ByteBuffer(const Slice* slices, size_t nslices) {
// The following assertions check that the representation of a grpc::Slice is
// identical to that of a grpc_slice: it has a grpc_slice field, and nothing
@@ -29,6 +33,16 @@ ByteBuffer::ByteBuffer(const Slice* slices, size_t nslices) {
"Slice must have same representation as grpc_slice");
static_assert(sizeof(Slice) == sizeof(grpc_slice),
"Slice must have same representation as grpc_slice");
+ // The following assertions check that the representation of a ByteBuffer is
+ // identical to grpc_byte_buffer*: it has a grpc_byte_buffer* field,
+ // and nothing else.
+ static_assert(std::is_same<decltype(buffer_), grpc_byte_buffer*>::value,
+ "ByteBuffer must have same representation as "
+ "grpc_byte_buffer*");
+ static_assert(sizeof(ByteBuffer) == sizeof(grpc_byte_buffer*),
+ "ByteBuffer must have same representation as "
+ "grpc_byte_buffer*");
+ g_gli_initializer.summon(); // Make sure that initializer linked in
// The const_cast is legal if grpc_raw_byte_buffer_create() does no more
// than its advertised side effect of increasing the reference count of the
// slices it processes, and such an increase does not affect the semantics
@@ -37,19 +51,6 @@ ByteBuffer::ByteBuffer(const Slice* slices, size_t nslices) {
reinterpret_cast<grpc_slice*>(const_cast<Slice*>(slices)), nslices);
}
-ByteBuffer::~ByteBuffer() {
- if (buffer_) {
- grpc_byte_buffer_destroy(buffer_);
- }
-}
-
-void ByteBuffer::Clear() {
- if (buffer_) {
- grpc_byte_buffer_destroy(buffer_);
- buffer_ = nullptr;
- }
-}
-
Status ByteBuffer::Dump(std::vector<Slice>* slices) const {
slices->clear();
if (!buffer_) {
@@ -80,7 +81,9 @@ ByteBuffer::ByteBuffer(const ByteBuffer& buf)
: buffer_(grpc_byte_buffer_copy(buf.buffer_)) {}
ByteBuffer& ByteBuffer::operator=(const ByteBuffer& buf) {
- Clear(); // first remove existing data
+ if (this != &buf) {
+ Clear(); // first remove existing data
+ }
if (buf.buffer_) {
buffer_ = grpc_byte_buffer_copy(buf.buffer_); // then copy
}
diff --git a/src/cpp/util/core_stats.cc b/src/cpp/util/core_stats.cc
new file mode 100644
index 0000000000..edf0b1bb67
--- /dev/null
+++ b/src/cpp/util/core_stats.cc
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/cpp/util/core_stats.h"
+
+#include <grpc/support/log.h>
+
+using grpc::core::Bucket;
+using grpc::core::Histogram;
+using grpc::core::Metric;
+using grpc::core::Stats;
+
+namespace grpc {
+
+void CoreStatsToProto(const grpc_stats_data& core, Stats* proto) {
+ for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ Metric* m = proto->add_metrics();
+ m->set_name(grpc_stats_counter_name[i]);
+ m->set_count(core.counters[i]);
+ }
+ for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
+ Metric* m = proto->add_metrics();
+ m->set_name(grpc_stats_histogram_name[i]);
+ Histogram* h = m->mutable_histogram();
+ for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
+ Bucket* b = h->add_buckets();
+ b->set_start(grpc_stats_histo_bucket_boundaries[i][j]);
+ b->set_count(core.histograms[grpc_stats_histo_start[i] + j]);
+ }
+ }
+}
+
+void ProtoToCoreStats(const grpc::core::Stats& proto, grpc_stats_data* core) {
+ memset(core, 0, sizeof(*core));
+ for (const auto& m : proto.metrics()) {
+ switch (m.value_case()) {
+ case Metric::VALUE_NOT_SET:
+ break;
+ case Metric::kCount:
+ for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
+ if (m.name() == grpc_stats_counter_name[i]) {
+ core->counters[i] = m.count();
+ break;
+ }
+ }
+ break;
+ case Metric::kHistogram:
+ for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
+ if (m.name() == grpc_stats_histogram_name[i]) {
+ const auto& h = m.histogram();
+ bool valid = true;
+ if (grpc_stats_histo_buckets[i] != h.buckets_size()) valid = false;
+ for (int j = 0; valid && j < h.buckets_size(); j++) {
+ if (grpc_stats_histo_bucket_boundaries[i][j] !=
+ h.buckets(j).start()) {
+ valid = false;
+ }
+ }
+ if (!valid) {
+ gpr_log(GPR_ERROR,
+ "Found histogram %s but shape is different from proto",
+ m.name().c_str());
+ }
+ for (int j = 0; valid && j < h.buckets_size(); j++) {
+ core->histograms[grpc_stats_histo_start[i] + j] =
+ h.buckets(j).count();
+ }
+ }
+ }
+ break;
+ }
+ }
+}
+
+} // namespace grpc
diff --git a/src/core/ext/census/trace_status.h b/src/cpp/util/core_stats.h
index dd83d3f729..00e38bf266 100644
--- a/src/core/ext/census/trace_status.h
+++ b/src/cpp/util/core_stats.h
@@ -16,15 +16,20 @@
*
*/
-#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H
-#define GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H
+#ifndef GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H
+#define GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H
-#include "src/core/ext/census/trace_string.h"
+#include "src/proto/grpc/core/stats.pb.h"
-/* Stores a status code and status message for a trace. */
-typedef struct trace_status {
- int64_t errorCode;
- trace_string errorMessage;
-} trace_status;
+extern "C" {
+#include "src/core/lib/debug/stats.h"
+}
-#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H */
+namespace grpc {
+
+void CoreStatsToProto(const grpc_stats_data& core, grpc::core::Stats* proto);
+void ProtoToCoreStats(const grpc::core::Stats& proto, grpc_stats_data* core);
+
+} // namespace grpc
+
+#endif // GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H
diff --git a/src/cpp/util/error_details.cc b/src/cpp/util/error_details.cc
index 44bc4d1648..f06b475683 100644
--- a/src/cpp/util/error_details.cc
+++ b/src/cpp/util/error_details.cc
@@ -37,7 +37,8 @@ Status SetErrorDetails(const ::google::rpc::Status& from, Status* to) {
return Status(StatusCode::FAILED_PRECONDITION, "");
}
StatusCode code = StatusCode::UNKNOWN;
- if (from.code() >= StatusCode::OK && from.code() <= StatusCode::DATA_LOSS) {
+ if (from.code() >= StatusCode::OK &&
+ from.code() <= StatusCode::UNAUTHENTICATED) {
code = static_cast<StatusCode>(from.code());
}
*to = Status(code, from.message(), from.SerializeAsString());
diff --git a/src/cpp/util/slice_cc.cc b/src/cpp/util/slice_cc.cc
index 486d0cdf0e..3ae17e8052 100644
--- a/src/cpp/util/slice_cc.cc
+++ b/src/cpp/util/slice_cc.cc
@@ -50,4 +50,6 @@ Slice::Slice(void* buf, size_t len, void (*destroy)(void*), void* user_data)
Slice::Slice(void* buf, size_t len, void (*destroy)(void*, size_t))
: slice_(grpc_slice_new_with_len(buf, len, destroy)) {}
+grpc_slice Slice::c_slice() const { return grpc_slice_ref(slice_); }
+
} // namespace grpc
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
index abf326459c..bbcbd95be5 100755
--- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
@@ -15,7 +15,6 @@
<PackageTags>gRPC RPC Protocol HTTP/2 Auth OAuth2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
index 9ad6fd0c61..4d6767fa98 100755
--- a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
+++ b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
@@ -15,7 +15,6 @@
<PackageTags>gRPC test testing</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index 6df68fda58..18993a93e0 100755
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Core.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Core.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
@@ -21,7 +19,6 @@
<PackageReference Include="Newtonsoft.Json" Version="9.0.1" />
<PackageReference Include="NUnit" Version="3.6.0" />
<PackageReference Include="NUnitLite" Version="3.6.0" />
- <PackageReference Include="NUnit.ConsoleRunner" Version="3.6.0" />
<PackageReference Include="OpenCover" Version="4.6.519" />
<PackageReference Include="ReportGenerator" Version="2.4.4.0" />
</ItemGroup>
diff --git a/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs b/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
index 087b685963..f59989655e 100644
--- a/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
@@ -36,7 +36,21 @@ namespace Grpc.Core
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction;
- internal AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> responseAsync, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+ /// <summary>
+ /// Creates a new AsyncClientStreamingCall object with the specified properties.
+ /// </summary>
+ /// <param name="requestStream">Stream of request values.</param>
+ /// <param name="responseAsync">The response of the asynchronous call.</param>
+ /// <param name="responseHeadersAsync">Response headers of the asynchronous call.</param>
+ /// <param name="getStatusFunc">Delegate returning the status of the call.</param>
+ /// <param name="getTrailersFunc">Delegate returning the trailing metadata of the call.</param>
+ /// <param name="disposeAction">Delegate to invoke when Dispose is called on the call object.</param>
+ public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream,
+ Task<TResponse> responseAsync,
+ Task<Metadata> responseHeadersAsync,
+ Func<Status> getStatusFunc,
+ Func<Metadata> getTrailersFunc,
+ Action disposeAction)
{
this.requestStream = requestStream;
this.responseAsync = responseAsync;
diff --git a/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs b/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
index ce49fb1596..1cb1a91859 100644
--- a/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
@@ -35,7 +35,21 @@ namespace Grpc.Core
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction;
- internal AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+ /// <summary>
+ /// Creates a new AsyncDuplexStreamingCall object with the specified properties.
+ /// </summary>
+ /// <param name="requestStream">Stream of request values.</param>
+ /// <param name="responseStream">Stream of response values.</param>
+ /// <param name="responseHeadersAsync">Response headers of the asynchronous call.</param>
+ /// <param name="getStatusFunc">Delegate returning the status of the call.</param>
+ /// <param name="getTrailersFunc">Delegate returning the trailing metadata of the call.</param>
+ /// <param name="disposeAction">Delegate to invoke when Dispose is called on the call object.</param>
+ public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream,
+ IAsyncStreamReader<TResponse> responseStream,
+ Task<Metadata> responseHeadersAsync,
+ Func<Status> getStatusFunc,
+ Func<Metadata> getTrailersFunc,
+ Action disposeAction)
{
this.requestStream = requestStream;
this.responseStream = responseStream;
diff --git a/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs b/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
index fbc97b8148..4303b0b1b0 100644
--- a/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
@@ -33,7 +33,19 @@ namespace Grpc.Core
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction;
- internal AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+ /// <summary>
+ /// Creates a new AsyncDuplexStreamingCall object with the specified properties.
+ /// </summary>
+ /// <param name="responseStream">Stream of response values.</param>
+ /// <param name="responseHeadersAsync">Response headers of the asynchronous call.</param>
+ /// <param name="getStatusFunc">Delegate returning the status of the call.</param>
+ /// <param name="getTrailersFunc">Delegate returning the trailing metadata of the call.</param>
+ /// <param name="disposeAction">Delegate to invoke when Dispose is called on the call object.</param>
+ public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream,
+ Task<Metadata> responseHeadersAsync,
+ Func<Status> getStatusFunc,
+ Func<Metadata> getTrailersFunc,
+ Action disposeAction)
{
this.responseStream = responseStream;
this.responseHeadersAsync = responseHeadersAsync;
diff --git a/src/csharp/Grpc.Core/AsyncUnaryCall.cs b/src/csharp/Grpc.Core/AsyncUnaryCall.cs
index 6348f3c5fd..17747f86ca 100644
--- a/src/csharp/Grpc.Core/AsyncUnaryCall.cs
+++ b/src/csharp/Grpc.Core/AsyncUnaryCall.cs
@@ -34,7 +34,20 @@ namespace Grpc.Core
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction;
- internal AsyncUnaryCall(Task<TResponse> responseAsync, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+
+ /// <summary>
+ /// Creates a new AsyncUnaryCall object with the specified properties.
+ /// </summary>
+ /// <param name="responseAsync">The response of the asynchronous call.</param>
+ /// <param name="responseHeadersAsync">Response headers of the asynchronous call.</param>
+ /// <param name="getStatusFunc">Delegate returning the status of the call.</param>
+ /// <param name="getTrailersFunc">Delegate returning the trailing metadata of the call.</param>
+ /// <param name="disposeAction">Delegate to invoke when Dispose is called on the call object.</param>
+ public AsyncUnaryCall(Task<TResponse> responseAsync,
+ Task<Metadata> responseHeadersAsync,
+ Func<Status> getStatusFunc,
+ Func<Metadata> getTrailersFunc,
+ Action disposeAction)
{
this.responseAsync = responseAsync;
this.responseHeadersAsync = responseHeadersAsync;
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index dde800aadd..d9950b2f20 100755
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -14,7 +14,6 @@
<PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
@@ -65,7 +64,7 @@
<ItemGroup Condition=" '$(TargetFramework)' == 'netstandard1.5' ">
<PackageReference Include="System.Runtime.Loader" Version="4.0.0" />
<PackageReference Include="System.Threading.Thread" Version="4.0.0" />
- <PackageReference Include="System.Threading.ThreadPool" Version="4.0.0" />
+ <PackageReference Include="System.Threading.ThreadPool" Version="4.0.10" />
</ItemGroup>
<Import Project="NativeDeps.csproj.include" />
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
index 17109de587..09fb722c81 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
@@ -34,6 +34,9 @@ namespace Grpc.Core.Internal
readonly CallInvocationDetails<TRequest, TResponse> details;
readonly INativeCall injectedNativeCall; // for testing
+ // Dispose of to de-register cancellation token registration
+ IDisposable cancellationTokenRegistration;
+
// Completion of a pending unary response if not null.
TaskCompletionSource<TResponse> unaryResponseTcs;
@@ -320,6 +323,7 @@ namespace Grpc.Core.Internal
protected override void OnAfterReleaseResources()
{
details.Channel.RemoveCallReference(this);
+ cancellationTokenRegistration?.Dispose();
}
protected override bool IsClient
@@ -405,7 +409,7 @@ namespace Grpc.Core.Internal
var token = details.Options.CancellationToken;
if (token.CanBeCanceled)
{
- token.Register(() => this.Cancel());
+ cancellationTokenRegistration = token.Register(() => this.Cancel());
}
}
diff --git a/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs b/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
index b56bdbb23f..a8cb357181 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
@@ -61,12 +61,9 @@ namespace Grpc.Core.Internal
try
{
- var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr),
- Marshal.PtrToStringAnsi(methodNamePtr));
- // Don't await, we are in a native callback and need to return.
- #pragma warning disable 4014
- GetMetadataAsync(context, callbackPtr, userDataPtr);
- #pragma warning restore 4014
+ var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr), Marshal.PtrToStringAnsi(methodNamePtr));
+ // Make a guarantee that credentials_notify_from_plugin is invoked async to be compliant with c-core API.
+ ThreadPool.QueueUserWorkItem(async (stateInfo) => await GetMetadataAsync(context, callbackPtr, userDataPtr));
}
catch (Exception e)
{
diff --git a/src/csharp/Grpc.Core/Version.csproj.include b/src/csharp/Grpc.Core/Version.csproj.include
index 124ecab14c..b9ceaf8254 100755
--- a/src/csharp/Grpc.Core/Version.csproj.include
+++ b/src/csharp/Grpc.Core/Version.csproj.include
@@ -1,7 +1,7 @@
<!-- This file is generated -->
<Project>
<PropertyGroup>
- <GrpcCsharpVersion>1.7.0-dev</GrpcCsharpVersion>
+ <GrpcCsharpVersion>1.8.0-dev</GrpcCsharpVersion>
<GoogleProtobufVersion>3.3.0</GoogleProtobufVersion>
</PropertyGroup>
</Project>
diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs
index 588cc84516..dab938821f 100644
--- a/src/csharp/Grpc.Core/VersionInfo.cs
+++ b/src/csharp/Grpc.Core/VersionInfo.cs
@@ -33,11 +33,11 @@ namespace Grpc.Core
/// <summary>
/// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies
/// </summary>
- public const string CurrentAssemblyFileVersion = "1.7.0.0";
+ public const string CurrentAssemblyFileVersion = "1.8.0.0";
/// <summary>
/// Current version of gRPC C#
/// </summary>
- public const string CurrentVersion = "1.7.0-dev";
+ public const string CurrentVersion = "1.8.0-dev";
}
}
diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
index 74deed6584..db4e3ef4e3 100755
--- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
+++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
@@ -8,7 +8,6 @@
<AssemblyName>Grpc.Examples.MathClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathClient</PackageId>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
index 1abf261498..b12b418d01 100755
--- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
+++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
@@ -8,7 +8,6 @@
<AssemblyName>Grpc.Examples.MathServer</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathServer</PackageId>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
index d2a13ed6e1..3ccc9adfaf 100755
--- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
+++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Examples.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
index 491d313f17..baa3b4ce6c 100755
--- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj
+++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
@@ -7,7 +7,6 @@
<TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
<AssemblyName>Grpc.Examples</AssemblyName>
<PackageId>Grpc.Examples</PackageId>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
index 2ccf46b9b9..9da0539dcb 100755
--- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
+++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.HealthCheck.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.HealthCheck.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
index 3eb90434f3..681719d124 100755
--- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
@@ -14,7 +14,6 @@
<PackageTags>gRPC health check</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
index c67beea7cd..35713156ea 100755
--- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.Client</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Client</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
index e452257b1b..3ecefe3bc4 100755
--- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
@@ -9,8 +9,6 @@
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.QpsWorker</PackageId>
<ServerGarbageCollection>true</ServerGarbageCollection>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
index a1fb316fdb..1092b2c21e 100755
--- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.Server</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Server</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
index f64bea3d2b..22272547f6 100755
--- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.StressClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.StressClient</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
index f5077fe0f7..c02c9844e3 100755
--- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
@@ -31,10 +29,6 @@
<Reference Include="Microsoft.CSharp" />
</ItemGroup>
- <ItemGroup Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">
- <PackageReference Include="System.Linq.Expressions" Version="4.1.1" />
- </ItemGroup>
-
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs b/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
index e81157cf97..eba6276a1f 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
@@ -90,6 +90,54 @@ namespace Grpc.IntegrationTesting
}
[Test]
+ public async Task MetadataCredentials_Composed()
+ {
+ var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
+ // Attempt to exercise the case where async callback is inlineable/synchronously-runnable.
+ metadata.Add("first_authorization", "FIRST_SECRET_TOKEN");
+ return TaskUtils.CompletedTask;
+ }));
+ var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
+ metadata.Add("second_authorization", "SECOND_SECRET_TOKEN");
+ return TaskUtils.CompletedTask;
+ }));
+ var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
+ metadata.Add("third_authorization", "THIRD_SECRET_TOKEN");
+ return TaskUtils.CompletedTask;
+ }));
+ var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(),
+ CallCredentials.Compose(first, second, third));
+ channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
+ var client = new TestService.TestServiceClient(channel);
+ var call = client.StreamingOutputCall(new StreamingOutputCallRequest { });
+ Assert.IsTrue(await call.ResponseStream.MoveNext());
+ Assert.IsFalse(await call.ResponseStream.MoveNext());
+ }
+
+ [Test]
+ public async Task MetadataCredentials_ComposedPerCall()
+ {
+ channel = new Channel(Host, server.Ports.Single().BoundPort, TestCredentials.CreateSslCredentials(), options);
+ var client = new TestService.TestServiceClient(channel);
+ var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
+ metadata.Add("first_authorization", "FIRST_SECRET_TOKEN");
+ return TaskUtils.CompletedTask;
+ }));
+ var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
+ metadata.Add("second_authorization", "SECOND_SECRET_TOKEN");
+ return TaskUtils.CompletedTask;
+ }));
+ var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => {
+ metadata.Add("third_authorization", "THIRD_SECRET_TOKEN");
+ return TaskUtils.CompletedTask;
+ }));
+ var call = client.StreamingOutputCall(new StreamingOutputCallRequest{ },
+ new CallOptions(credentials: CallCredentials.Compose(first, second, third)));
+ Assert.IsTrue(await call.ResponseStream.MoveNext());
+ Assert.IsFalse(await call.ResponseStream.MoveNext());
+ }
+
+ [Test]
public void MetadataCredentials_InterceptorLeavesMetadataEmpty()
{
var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(),
@@ -125,6 +173,17 @@ namespace Grpc.IntegrationTesting
Assert.AreEqual("SECRET_TOKEN", authToken);
return Task.FromResult(new SimpleResponse());
}
+
+ public override async Task StreamingOutputCall(StreamingOutputCallRequest request, IServerStreamWriter<StreamingOutputCallResponse> responseStream, ServerCallContext context)
+ {
+ var first = context.RequestHeaders.First((entry) => entry.Key == "first_authorization").Value;
+ Assert.AreEqual("FIRST_SECRET_TOKEN", first);
+ var second = context.RequestHeaders.First((entry) => entry.Key == "second_authorization").Value;
+ Assert.AreEqual("SECOND_SECRET_TOKEN", second);
+ var third = context.RequestHeaders.First((entry) => entry.Key == "third_authorization").Value;
+ Assert.AreEqual("THIRD_SECRET_TOKEN", third);
+ await responseStream.WriteAsync(new StreamingOutputCallResponse());
+ }
}
}
}
diff --git a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
index 17797e1e1e..108357e4eb 100644
--- a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
+++ b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Microbenchmarks</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Microbenchmarks</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
index cf756c68ad..d368697124 100755
--- a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
+++ b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Reflection.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Reflection.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
index b77fd69aee..704eea5c17 100755
--- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
+++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
@@ -14,7 +14,6 @@
<PackageTags>gRPC reflection</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/build_packages_dotnetcli.bat b/src/csharp/build_packages_dotnetcli.bat
index c419d87049..ff013d5680 100755
--- a/src/csharp/build_packages_dotnetcli.bat
+++ b/src/csharp/build_packages_dotnetcli.bat
@@ -13,7 +13,7 @@
@rem limitations under the License.
@rem Current package versions
-set VERSION=1.7.0-dev
+set VERSION=1.8.0-dev
@rem Adjust the location of nuget.exe
set NUGET=C:\nuget\nuget.exe
diff --git a/src/csharp/build_packages_dotnetcli.sh b/src/csharp/build_packages_dotnetcli.sh
index 124dfbb257..44a4791146 100755
--- a/src/csharp/build_packages_dotnetcli.sh
+++ b/src/csharp/build_packages_dotnetcli.sh
@@ -39,7 +39,7 @@ dotnet pack --configuration Release Grpc.Auth --output ../../../artifacts
dotnet pack --configuration Release Grpc.HealthCheck --output ../../../artifacts
dotnet pack --configuration Release Grpc.Reflection --output ../../../artifacts
-nuget pack Grpc.nuspec -Version "1.7.0-dev" -OutputDirectory ../../artifacts
-nuget pack Grpc.Tools.nuspec -Version "1.7.0-dev" -OutputDirectory ../../artifacts
+nuget pack Grpc.nuspec -Version "1.8.0-dev" -OutputDirectory ../../artifacts
+nuget pack Grpc.Tools.nuspec -Version "1.8.0-dev" -OutputDirectory ../../artifacts
(cd ../../artifacts && zip csharp_nugets_dotnetcli.zip *.nupkg)
diff --git a/src/csharp/doc/.gitignore b/src/csharp/doc/.gitignore
new file mode 100644
index 0000000000..09ee235efc
--- /dev/null
+++ b/src/csharp/doc/.gitignore
@@ -0,0 +1,2 @@
+html
+obj
diff --git a/src/csharp/doc/README.md b/src/csharp/doc/README.md
index 585500b5ca..46cce013a1 100644
--- a/src/csharp/doc/README.md
+++ b/src/csharp/doc/README.md
@@ -1,2 +1,9 @@
+DocFX-generated C# API Reference
+--------------------------------
-SandCastle project files to generate HTML reference documentation. \ No newline at end of file
+Install docfx based on instructions here: https://github.com/dotnet/docfx
+
+```
+# generate docfx documentation into ./html directory
+$ docfx
+```
diff --git a/src/csharp/doc/docfx.json b/src/csharp/doc/docfx.json
new file mode 100644
index 0000000000..7219d0e7a6
--- /dev/null
+++ b/src/csharp/doc/docfx.json
@@ -0,0 +1,37 @@
+{
+ "metadata": [
+ {
+ "src": [
+ {
+ "files": ["Grpc.Core/Grpc.Core.csproj",
+ "Grpc.Auth/Grpc.Auth.csproj",
+ "Grpc.Core.Testing/Grpc.Core.Testing.csproj",
+ "Grpc.HealthCheck/Grpc.HealthCheck.csproj",
+ "Grpc.Reflection/Grpc.HealthCheck.csproj"],
+ "exclude": [ "**/bin/**", "**/obj/**" ],
+ "cwd": ".."
+ }
+ ],
+ "properties": { "TargetFramework": "net45" },
+ "dest": "obj/api"
+ }
+ ],
+ "build": {
+ "content": [
+ {
+ "files": [ "**/*.yml" ],
+ "cwd": "obj/api",
+ "dest": "api"
+ },
+ {
+ "files": [ "toc.yml"],
+ }
+ ],
+ "globalMetadata": {
+ "_appTitle": "gRPC C#",
+ "_enableSearch": true,
+ "_disableContribution": true
+ },
+ "dest": "html"
+ }
+}
diff --git a/src/csharp/doc/grpc_csharp_public.shfbproj b/src/csharp/doc/grpc_csharp_public.shfbproj
deleted file mode 100644
index fab953da35..0000000000
--- a/src/csharp/doc/grpc_csharp_public.shfbproj
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <PropertyGroup>
- <!-- The configuration and platform will be used to determine which assemblies to include from solution and
- project documentation sources -->
- <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
- <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
- <SchemaVersion>2.0</SchemaVersion>
- <ProjectGuid>{77e3da09-fc92-486f-a90a-99ca788e8b59}</ProjectGuid>
- <SHFBSchemaVersion>2015.6.5.0</SHFBSchemaVersion>
- <!-- AssemblyName, Name, and RootNamespace are not used by SHFB but Visual Studio adds them anyway -->
- <AssemblyName>Documentation</AssemblyName>
- <RootNamespace>Documentation</RootNamespace>
- <Name>Documentation</Name>
- <!-- SHFB properties -->
- <FrameworkVersion>.NET Framework 4.5</FrameworkVersion>
- <OutputPath>..\..\..\doc\ref\csharp\html</OutputPath>
- <Language>en-US</Language>
- <DocumentationSources>
- <DocumentationSource sourceFile="..\Grpc.Auth\Grpc.Auth.csproj" />
-<DocumentationSource sourceFile="..\Grpc.Core\Grpc.Core.csproj" />
-<DocumentationSource sourceFile="..\Grpc.HealthCheck\Grpc.HealthCheck.csproj" />
-<DocumentationSource sourceFile="..\Grpc.Reflection\Grpc.Reflection.csproj" />
-<DocumentationSource sourceFile="..\Grpc.Core.Testing\Grpc.Core.Testing.csproj" /></DocumentationSources>
- <BuildAssemblerVerbosity>OnlyWarningsAndErrors</BuildAssemblerVerbosity>
- <HelpFileFormat>Website</HelpFileFormat>
- <IndentHtml>False</IndentHtml>
- <KeepLogFile>True</KeepLogFile>
- <DisableCodeBlockComponent>False</DisableCodeBlockComponent>
- <CleanIntermediates>True</CleanIntermediates>
- <HelpFileVersion>1.0.0.0</HelpFileVersion>
- <MaximumGroupParts>2</MaximumGroupParts>
- <NamespaceGrouping>False</NamespaceGrouping>
- <SyntaxFilters>Standard</SyntaxFilters>
- <SdkLinkTarget>Blank</SdkLinkTarget>
- <RootNamespaceContainer>True</RootNamespaceContainer>
- <PresentationStyle>VS2013</PresentationStyle>
- <Preliminary>False</Preliminary>
- <NamingMethod>MemberName</NamingMethod>
- <HelpTitle>gRPC C#</HelpTitle>
- <ContentPlacement>AboveNamespaces</ContentPlacement>
- <HtmlHelpName>Documentation</HtmlHelpName>
- <NamespaceSummaries>
- <NamespaceSummaryItem name="Grpc.Auth" isDocumented="True">Provides OAuth2 based authentication for gRPC. &lt;c&gt;Grpc.Auth&lt;/c&gt; currently consists of a set of very lightweight wrappers and uses C# &lt;a href="https://www.nuget.org/packages/Google.Apis.Auth/"&gt;Google.Apis.Auth&lt;/a&gt; library.</NamespaceSummaryItem>
- <NamespaceSummaryItem name="Grpc.Core" isDocumented="True">Main namespace for gRPC C# functionality. Contains concepts representing both client side and server side gRPC logic.
-
-&lt;seealso cref="Grpc.Core.Channel"/&gt;
-&lt;seealso cref="Grpc.Core.Server"/&gt;</NamespaceSummaryItem>
- <NamespaceSummaryItem name="Grpc.Core.Logging" isDocumented="True">Provides functionality to redirect gRPC logs to application-specified destination.</NamespaceSummaryItem>
- <NamespaceSummaryItem name="Grpc.Core.Utils" isDocumented="True">Various utilities for gRPC C#.</NamespaceSummaryItem>
- </NamespaceSummaries>
- <MissingTags>Summary, Parameter, AutoDocumentCtors, Namespace, TypeParameter, AutoDocumentDispose</MissingTags>
- </PropertyGroup>
- <!-- There are no properties for these groups. AnyCPU needs to appear in order for Visual Studio to perform
- the build. The others are optional common platform types that may appear. -->
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x64' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x64' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|Win32' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|Win32' ">
- </PropertyGroup>
- <!-- Import the SHFB build targets -->
- <Import Project="$(SHFBROOT)\SandcastleHelpFileBuilder.targets" />
- <!-- The pre-build and post-build event properties must appear *after* the targets file import in order to be
- evaluated correctly. -->
- <PropertyGroup>
- <PreBuildEvent>
- </PreBuildEvent>
- <PostBuildEvent>
- </PostBuildEvent>
- <RunPostBuildEvent>OnBuildSuccess</RunPostBuildEvent>
- </PropertyGroup>
-</Project> \ No newline at end of file
diff --git a/src/csharp/doc/toc.yml b/src/csharp/doc/toc.yml
new file mode 100644
index 0000000000..c3a1e415ab
--- /dev/null
+++ b/src/csharp/doc/toc.yml
@@ -0,0 +1,3 @@
+- name: API Documentation
+ href: obj/api/
+ homepage: obj/api/Grpc.Core.yml
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index aebce364c5..92291f9011 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -1023,13 +1023,17 @@ typedef void(GPR_CALLTYPE *grpcsharp_metadata_interceptor_func)(
grpc_credentials_plugin_metadata_cb cb, void *user_data,
int32_t is_destroy);
-static void grpcsharp_get_metadata_handler(
+static int grpcsharp_get_metadata_handler(
void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void *user_data) {
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) {
grpcsharp_metadata_interceptor_func interceptor =
(grpcsharp_metadata_interceptor_func)(intptr_t)state;
interceptor(state, context.service_url, context.method_name, cb, user_data,
0);
+ return 0; /* Asynchronous return. */
}
static void grpcsharp_metadata_credentials_destroy_handler(void *state) {
diff --git a/src/node/ext/call_credentials.cc b/src/node/ext/call_credentials.cc
index 4cf3e565ef..0644a812e9 100644
--- a/src/node/ext/call_credentials.cc
+++ b/src/node/ext/call_credentials.cc
@@ -238,9 +238,12 @@ NAUV_WORK_CB(SendPluginCallback) {
}
}
-void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb,
- void *user_data) {
+int plugin_get_metadata(
+ void *state, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) {
plugin_state *p_state = reinterpret_cast<plugin_state *>(state);
plugin_callback_data *data = new plugin_callback_data;
data->service_url = context.service_url;
@@ -252,6 +255,7 @@ void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
uv_mutex_unlock(&p_state->plugin_mutex);
uv_async_send(&p_state->plugin_async);
+ return 0; // Async processing.
}
void plugin_uv_close_cb(uv_handle_t *handle) {
diff --git a/src/node/ext/call_credentials.h b/src/node/ext/call_credentials.h
index adcff84573..3a54bbf0cf 100644
--- a/src/node/ext/call_credentials.h
+++ b/src/node/ext/call_credentials.h
@@ -75,9 +75,11 @@ typedef struct plugin_state {
uv_async_t plugin_async;
} plugin_state;
-void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb,
- void *user_data);
+int plugin_get_metadata(
+ void *state, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status, const char **error_details);
void plugin_destroy_state(void *state);
diff --git a/src/node/health_check/package.json b/src/node/health_check/package.json
index 3c7d3707ee..6f09c8f9f0 100644
--- a/src/node/health_check/package.json
+++ b/src/node/health_check/package.json
@@ -1,6 +1,6 @@
{
"name": "grpc-health-check",
- "version": "1.7.0-dev",
+ "version": "1.8.0-dev",
"author": "Google Inc.",
"description": "Health check service for use with gRPC",
"repository": {
@@ -15,7 +15,7 @@
}
],
"dependencies": {
- "grpc": "^1.7.0-dev",
+ "grpc": "^1.8.0-dev",
"lodash": "^3.9.3",
"google-protobuf": "^3.0.0"
},
diff --git a/src/node/tools/package.json b/src/node/tools/package.json
index d9b1fb86c9..f88fc65cdf 100644
--- a/src/node/tools/package.json
+++ b/src/node/tools/package.json
@@ -1,6 +1,6 @@
{
"name": "grpc-tools",
- "version": "1.7.0-dev",
+ "version": "1.8.0-dev",
"author": "Google Inc.",
"description": "Tools for developing with gRPC on Node.js",
"homepage": "https://grpc.io/",
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
index 7d073c9a84..9065ab9f73 100644
--- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -42,7 +42,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler-gRPCPlugin'
- v = '1.7.0-dev'
+ v = '1.8.0-dev'
s.version = v
s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.'
s.description = <<-DESC
diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec
index 37798ec3c6..c61afc1a8f 100644
--- a/src/objective-c/BoringSSL.podspec
+++ b/src/objective-c/BoringSSL.podspec
@@ -31,7 +31,7 @@
Pod::Spec.new do |s|
s.name = 'BoringSSL'
- version = '9.0'
+ version = '9.1'
s.version = version
s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.'
# Adapted from the homepage:
@@ -67,9 +67,10 @@ Pod::Spec.new do |s|
# "The name and email addresses of the library maintainers, not the Podspec maintainer."
s.authors = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite'
+ major_version = version[0] + '.0'
s.source = {
:git => 'https://boringssl.googlesource.com/boringssl',
- :tag => "version_for_cocoapods_#{version}",
+ :tag => "version_for_cocoapods_#{major_version}",
}
name = 'openssl'
@@ -186,6 +187,7 @@ Pod::Spec.new do |s|
cat > include/openssl/BoringSSL.modulemap <<EOF
framework module openssl {
umbrella header "umbrella.h"
+ textual header "arm_arch.h"
export *
module * { export * }
}
diff --git a/src/objective-c/GRPCClient/GRPCCall.h b/src/objective-c/GRPCClient/GRPCCall.h
index 4d90cfd384..df563ca36c 100644
--- a/src/objective-c/GRPCClient/GRPCCall.h
+++ b/src/objective-c/GRPCClient/GRPCCall.h
@@ -170,6 +170,13 @@ extern id const kGRPCTrailersKey;
@property (atomic, copy, readwrite) NSString *serverName;
/**
+ * The timeout for the RPC call in seconds. If set to 0, the call will not timeout. If set to
+ * positive, the gRPC call returns with status GRPCErrorCodeDeadlineExceeded if it is not completed
+ * within \a timeout seconds. A negative value is not allowed.
+ */
+@property NSTimeInterval timeout;
+
+/**
* The container of the request headers of an RPC conforms to this protocol, which is a subset of
* NSMutableDictionary's interface. It will become a NSMutableDictionary later on.
* The keys of this container are the header names, which per the HTTP standard are case-
diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m
index 436c19e354..d6c3a3c165 100644
--- a/src/objective-c/GRPCClient/GRPCCall.m
+++ b/src/objective-c/GRPCClient/GRPCCall.m
@@ -423,7 +423,8 @@ static NSString * const kBearerPrefix = @"Bearer ";
_wrappedCall = [[GRPCWrappedCall alloc] initWithHost:_host
serverName:_serverName
- path:_path];
+ path:_path
+ timeout:_timeout];
NSAssert(_wrappedCall, @"Error allocating RPC objects. Low memory?");
[self sendHeaders:_requestHeaders];
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.h b/src/objective-c/GRPCClient/private/GRPCChannel.h
index e2aa5bd036..d37182f754 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.h
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.h
@@ -63,5 +63,6 @@ struct grpc_channel_credentials;
- (nullable grpc_call *)unmanagedCallWithPath:(nonnull NSString *)path
serverName:(nonnull NSString *)serverName
+ timeout:(NSTimeInterval)timeout
completionQueue:(nonnull GRPCCompletionQueue *)queue;
@end
diff --git a/src/objective-c/GRPCClient/private/GRPCChannel.m b/src/objective-c/GRPCClient/private/GRPCChannel.m
index 52dbc70b99..b78b14f2af 100644
--- a/src/objective-c/GRPCClient/private/GRPCChannel.m
+++ b/src/objective-c/GRPCClient/private/GRPCChannel.m
@@ -182,18 +182,28 @@ static grpc_channel_args *BuildChannelArgs(NSDictionary *dictionary) {
- (grpc_call *)unmanagedCallWithPath:(NSString *)path
serverName:(NSString *)serverName
+ timeout:(NSTimeInterval)timeout
completionQueue:(GRPCCompletionQueue *)queue {
+ GPR_ASSERT(timeout >= 0);
+ if (timeout < 0) {
+ timeout = 0;
+ }
grpc_slice host_slice;
if (serverName) {
host_slice = grpc_slice_from_copied_string(serverName.UTF8String);
}
grpc_slice path_slice = grpc_slice_from_copied_string(path.UTF8String);
+ gpr_timespec deadline_ms = timeout == 0 ?
+ gpr_inf_future(GPR_CLOCK_REALTIME) :
+ gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_millis((int64_t)(timeout * 1000), GPR_TIMESPAN));
grpc_call *call = grpc_channel_create_call(_unmanagedChannel,
NULL, GRPC_PROPAGATE_DEFAULTS,
queue.unmanagedQueue,
path_slice,
serverName ? &host_slice : NULL,
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ deadline_ms, NULL);
if (serverName) {
grpc_slice_unref(host_slice);
}
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.h b/src/objective-c/GRPCClient/private/GRPCHost.h
index 0c1d715240..58171211b0 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.h
+++ b/src/objective-c/GRPCClient/private/GRPCHost.h
@@ -55,6 +55,7 @@ struct grpc_channel_credentials;
/** Create a grpc_call object to the provided path on this host. */
- (nullable struct grpc_call *)unmanagedCallWithPath:(NSString *)path
serverName:(NSString *)serverName
+ timeout:(NSTimeInterval)timeout
completionQueue:(GRPCCompletionQueue *)queue;
// TODO: There's a race when a new RPC is coming through just as an existing one is getting
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index 23794c1fed..f73e9cbc50 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -121,6 +121,7 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
- (nullable grpc_call *)unmanagedCallWithPath:(NSString *)path
serverName:(NSString *)serverName
+ timeout:(NSTimeInterval)timeout
completionQueue:(GRPCCompletionQueue *)queue {
GRPCChannel *channel;
// This is racing -[GRPCHost disconnect].
@@ -130,7 +131,10 @@ static GRPCConnectivityMonitor *connectivityMonitor = nil;
}
channel = _channel;
}
- return [channel unmanagedCallWithPath:path serverName:serverName completionQueue:queue];
+ return [channel unmanagedCallWithPath:path
+ serverName:serverName
+ timeout:timeout
+ completionQueue:queue];
}
- (BOOL)setTLSPEMRootCerts:(nullable NSString *)pemRootCerts
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.h b/src/objective-c/GRPCClient/private/GRPCWrappedCall.h
index 64075591a3..1cd9da8f3e 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.h
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.h
@@ -76,7 +76,8 @@
- (instancetype)initWithHost:(NSString *)host
serverName:(NSString *)serverName
- path:(NSString *)path NS_DESIGNATED_INITIALIZER;
+ path:(NSString *)path
+ timeout:(NSTimeInterval)timeout NS_DESIGNATED_INITIALIZER;
- (void)startBatchWithOperations:(NSArray *)ops errorHandler:(void(^)())errorHandler;
diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
index 87dc33af88..b0b1223b64 100644
--- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
+++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m
@@ -238,12 +238,13 @@
}
- (instancetype)init {
- return [self initWithHost:nil serverName:nil path:nil];
+ return [self initWithHost:nil serverName:nil path:nil timeout:0];
}
- (instancetype)initWithHost:(NSString *)host
serverName:(NSString *)serverName
- path:(NSString *)path {
+ path:(NSString *)path
+ timeout:(NSTimeInterval)timeout {
if (!path || !host) {
[NSException raise:NSInvalidArgumentException
format:@"path and host cannot be nil."];
@@ -255,7 +256,10 @@
// queue. Currently we use a singleton queue.
_queue = [GRPCCompletionQueue completionQueue];
- _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path serverName:serverName completionQueue:_queue];
+ _call = [[GRPCHost hostWithAddress:host] unmanagedCallWithPath:path
+ serverName:serverName
+ timeout:timeout
+ completionQueue:_queue];
if (_call == NULL) {
return nil;
}
diff --git a/src/objective-c/GRPCClient/private/version.h b/src/objective-c/GRPCClient/private/version.h
index 843954e84a..db589d12de 100644
--- a/src/objective-c/GRPCClient/private/version.h
+++ b/src/objective-c/GRPCClient/private/version.h
@@ -23,4 +23,4 @@
// `tools/buildgen/generate_projects.sh`.
-#define GRPC_OBJC_VERSION_STRING @"1.7.0-dev"
+#define GRPC_OBJC_VERSION_STRING @"1.8.0-dev"
diff --git a/src/objective-c/tests/GRPCClientTests.m b/src/objective-c/tests/GRPCClientTests.m
index 9afe507121..5672bdad4c 100644
--- a/src/objective-c/tests/GRPCClientTests.m
+++ b/src/objective-c/tests/GRPCClientTests.m
@@ -18,6 +18,7 @@
#import <UIKit/UIKit.h>
#import <XCTest/XCTest.h>
+#import <grpc/grpc.h>
#import <GRPCClient/GRPCCall.h>
#import <GRPCClient/GRPCCall+ChannelArg.h>
@@ -28,6 +29,9 @@
#import <RemoteTest/Messages.pbobjc.h>
#import <RxLibrary/GRXWriteable.h>
#import <RxLibrary/GRXWriter+Immediate.h>
+#import <RxLibrary/GRXBufferedPipe.h>
+
+#import "version.h"
#define TEST_TIMEOUT 16
@@ -39,6 +43,7 @@ static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.googleapis.com";
static GRPCProtoMethod *kInexistentMethod;
static GRPCProtoMethod *kEmptyCallMethod;
static GRPCProtoMethod *kUnaryCallMethod;
+static GRPCProtoMethod *kFullDuplexCallMethod;
/** Observer class for testing that responseMetadata is KVO-compliant */
@interface PassthroughObserver : NSObject
@@ -106,6 +111,9 @@ static GRPCProtoMethod *kUnaryCallMethod;
kUnaryCallMethod = [[GRPCProtoMethod alloc] initWithPackage:kPackage
service:kService
method:@"UnaryCall"];
+ kFullDuplexCallMethod = [[GRPCProtoMethod alloc] initWithPackage:kPackage
+ service:kService
+ method:@"FullDuplexCall"];
}
- (void)testConnectionToRemoteServer {
@@ -261,12 +269,38 @@ static GRPCProtoMethod *kUnaryCallMethod;
id<GRXWriteable> responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {
XCTAssertNotNil(value, @"nil value received as response.");
XCTAssertEqual([value length], 0, @"Non-empty response received: %@", value);
- /* This test needs to be more clever in regards to changing the version of the core.
- XCTAssertEqualObjects(call.responseHeaders[@"x-grpc-test-echo-useragent"],
- @"Foo grpc-objc/0.13.0 grpc-c/0.14.0-dev (ios)",
- @"Did not receive expected user agent %@",
- call.responseHeaders[@"x-grpc-test-echo-useragent"]);
- */
+
+ NSString *userAgent = call.responseHeaders[@"x-grpc-test-echo-useragent"];
+ NSError *error = nil;
+
+ // Test the regex is correct
+ NSString *expectedUserAgent = @"Foo grpc-objc/";
+ expectedUserAgent =
+ [expectedUserAgent stringByAppendingString:GRPC_OBJC_VERSION_STRING];
+ expectedUserAgent =
+ [expectedUserAgent stringByAppendingString:@" grpc-c/"];
+ expectedUserAgent =
+ [expectedUserAgent stringByAppendingString:GRPC_C_VERSION_STRING];
+ expectedUserAgent =
+ [expectedUserAgent stringByAppendingString:@" (ios; chttp2; "];
+ expectedUserAgent =
+ [expectedUserAgent stringByAppendingString:[NSString stringWithUTF8String:grpc_g_stands_for()]];
+ expectedUserAgent = [expectedUserAgent stringByAppendingString:@")"];
+ XCTAssertEqualObjects(userAgent, expectedUserAgent);
+
+ // Change in format of user-agent field in a direction that does not match the regex will likely
+ // cause problem for certain gRPC users. For details, refer to internal doc https://goo.gl/c2diBc
+ NSRegularExpression *regex =
+ [NSRegularExpression regularExpressionWithPattern:@" grpc-[a-zA-Z0-9]+(-[a-zA-Z0-9]+)?/[^ ,]+( \\([^)]*\\))?"
+ options:0
+ error:&error];
+ NSString *customUserAgent =
+ [regex stringByReplacingMatchesInString:userAgent
+ options:0
+ range:NSMakeRange(0, [userAgent length])
+ withTemplate:@""];
+ XCTAssertEqualObjects(customUserAgent, @"Foo");
+
[response fulfill];
} completionHandler:^(NSError *errorOrNil) {
XCTAssertNil(errorOrNil, @"Finished with unexpected error: %@", errorOrNil);
@@ -422,4 +456,26 @@ static GRPCProtoMethod *kUnaryCallMethod;
[self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
}
+- (void)testTimeout {
+ __weak XCTestExpectation *completion = [self expectationWithDescription:@"RPC completed."];
+
+ GRXBufferedPipe *pipe = [GRXBufferedPipe pipe];
+ GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress
+ path:kFullDuplexCallMethod.HTTPPath
+ requestsWriter:pipe];
+
+ id<GRXWriteable> responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) {
+ XCTAssert(0, @"Failure: response received; Expect: no response received.");
+ } completionHandler:^(NSError *errorOrNil) {
+ XCTAssertNotNil(errorOrNil, @"Failure: no error received; Expect: receive deadline exceeded.");
+ XCTAssertEqual(errorOrNil.code, GRPCErrorCodeDeadlineExceeded);
+ [completion fulfill];
+ }];
+
+ call.timeout = 0.001;
+ [call startWithWriteable:responsesWriteable];
+
+ [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
+}
+
@end
diff --git a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
index 1796c6d746..ad83481595 100644
--- a/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
+++ b/src/objective-c/tests/RemoteTestClient/RemoteTest.podspec
@@ -47,7 +47,7 @@ Pod::Spec.new do |s|
s.pod_target_xcconfig = {
# This is needed by all pods that depend on Protobuf:
- 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1',
+ 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1 GPB_GRPC_FORWARD_DECLARE_MESSAGE_PROTO=1',
# This is needed by all pods that depend on gRPC-RxLibrary:
'CLANG_ALLOW_NON_MODULAR_INCLUDES_IN_FRAMEWORK_MODULES' => 'YES',
}
diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
index b01d5ffcea..2f0c8cfe18 100644
--- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
+++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
@@ -144,6 +144,7 @@
5EAD6D241E27047400002378 /* CronetUnitTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = CronetUnitTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
5EAD6D261E27047400002378 /* CronetUnitTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = CronetUnitTests.m; sourceTree = "<group>"; };
5EAD6D281E27047400002378 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
+ 5EAFE8271F8EFB87007F2189 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = "<group>"; };
5EE84BF11D4717E40050C6CC /* InteropTestsRemoteWithCronet.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = InteropTestsRemoteWithCronet.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
5EE84BF31D4717E40050C6CC /* InteropTestsRemoteWithCronet.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = InteropTestsRemoteWithCronet.m; sourceTree = "<group>"; };
5EE84BF51D4717E40050C6CC /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
@@ -398,6 +399,7 @@
635697C91B14FC11007A7283 /* Tests */ = {
isa = PBXGroup;
children = (
+ 5EAFE8271F8EFB87007F2189 /* version.h */,
6312AE4D1B1BF49B00341DEE /* GRPCClientTests.m */,
63E240CC1B6C4D3A005F3B0E /* InteropTests.h */,
635ED2EB1B1A3BC400FDE5C3 /* InteropTests.m */,
@@ -740,9 +742,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-CronetUnitTests/Pods-CronetUnitTests-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -755,9 +760,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemoteWithCronet/Pods-InteropTestsRemoteWithCronet-frameworks.sh",
+ "${PODS_ROOT}/CronetFramework/Cronet.framework",
);
name = "[CP] Embed Pods Frameworks";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Cronet.framework",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -770,13 +778,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-InteropTestsRemote-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
4F5690DC0E6AD6663FE78B8B /* [CP] Embed Pods Frameworks */ = {
@@ -800,13 +811,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-InteropTestsLocalSSL-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
5F14F59509E10C2852014F9E /* [CP] Embed Pods Frameworks */ = {
@@ -830,9 +844,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalSSL/Pods-InteropTestsLocalSSL-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -845,9 +862,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-CoreCronetEnd2EndTests/Pods-CoreCronetEnd2EndTests-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -860,13 +880,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-InteropTestsLocalCleartext-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
796680C7599CB4ED736DD62A /* [CP] Check Pods Manifest.lock */ = {
@@ -875,13 +898,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-Tests-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
80E2DDD2EC04A4009F45E933 /* [CP] Check Pods Manifest.lock */ = {
@@ -890,13 +916,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-CronetUnitTests-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
8AD3130D3C58A0FB32FF2A36 /* [CP] Copy Pods Resources */ = {
@@ -905,9 +934,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsLocalCleartext/Pods-InteropTestsLocalCleartext-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -935,13 +967,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-AllTests-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
A441F71824DCB9D0CA297748 /* [CP] Copy Pods Resources */ = {
@@ -950,9 +985,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-AllTests/Pods-AllTests-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -965,9 +1003,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-CronetUnitTests/Pods-CronetUnitTests-frameworks.sh",
+ "${PODS_ROOT}/CronetFramework/Cronet.framework",
);
name = "[CP] Embed Pods Frameworks";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Cronet.framework",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -995,9 +1036,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-Tests/Pods-Tests-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -1010,13 +1054,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-RxLibraryUnitTests-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
C0F7B1FF6F88CC5FBF362F4C /* [CP] Check Pods Manifest.lock */ = {
@@ -1025,13 +1072,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-InteropTestsRemoteWithCronet-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
C2E09DC4BD239F71160F0CC1 /* [CP] Copy Pods Resources */ = {
@@ -1040,9 +1090,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemote/Pods-InteropTestsRemote-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -1070,9 +1123,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-RxLibraryUnitTests/Pods-RxLibraryUnitTests-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -1085,9 +1141,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-InteropTestsRemoteWithCronet/Pods-InteropTestsRemoteWithCronet-resources.sh",
+ $PODS_CONFIGURATION_BUILD_DIR/gRPC/gRPCCertificates.bundle,
);
name = "[CP] Copy Pods Resources";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -1100,9 +1159,12 @@
files = (
);
inputPaths = (
+ "${SRCROOT}/Pods/Target Support Files/Pods-CoreCronetEnd2EndTests/Pods-CoreCronetEnd2EndTests-frameworks.sh",
+ "${PODS_ROOT}/CronetFramework/Cronet.framework",
);
name = "[CP] Embed Pods Frameworks";
outputPaths = (
+ "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/Cronet.framework",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
@@ -1115,13 +1177,16 @@
files = (
);
inputPaths = (
+ "${PODS_PODFILE_DIR_PATH}/Podfile.lock",
+ "${PODS_ROOT}/Manifest.lock",
);
name = "[CP] Check Pods Manifest.lock";
outputPaths = (
+ "$(DERIVED_FILE_DIR)/Pods-CoreCronetEnd2EndTests-checkManifestLockResult.txt",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "diff \"${PODS_ROOT}/../Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n";
+ shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n";
showEnvVarsInLog = 0;
};
/* End PBXShellScriptBuildPhase section */
diff --git a/src/objective-c/tests/run_tests.sh b/src/objective-c/tests/run_tests.sh
index 5b7a2d104a..608ae6884b 100755
--- a/src/objective-c/tests/run_tests.sh
+++ b/src/objective-c/tests/run_tests.sh
@@ -49,7 +49,8 @@ xcodebuild \
HOST_PORT_REMOTE=grpc-test.sandbox.googleapis.com \
test \
| egrep -v "$XCODEBUILD_FILTER" \
- | egrep -v '^$' -
+ | egrep -v '^$' \
+ | egrep -v "(GPBDictionary|GPBArray)" -
echo "TIME: $(date)"
xcodebuild \
@@ -57,7 +58,8 @@ xcodebuild \
-scheme CoreCronetEnd2EndTests \
-destination name="iPhone 6" \
test \
- | egrep "$XCODEBUILD_FILTER" \
+ | egrep -v "$XCODEBUILD_FILTER" \
+ | egrep -v '^$' \
| egrep -v "(GPBDictionary|GPBArray)" -
echo "TIME: $(date)"
@@ -65,7 +67,10 @@ xcodebuild \
-workspace Tests.xcworkspace \
-scheme CronetUnitTests \
-destination name="iPhone 6" \
- test | xcpretty
+ test \
+ | egrep -v "$XCODEBUILD_FILTER" \
+ | egrep -v '^$' \
+ | egrep -v "(GPBDictionary|GPBArray)" -
echo "TIME: $(date)"
xcodebuild \
@@ -74,5 +79,6 @@ xcodebuild \
-destination name="iPhone 6" \
HOST_PORT_REMOTE=grpc-test.sandbox.googleapis.com \
test \
- | egrep "$XCODEBUILD_FILTER" \
+ | egrep -v "$XCODEBUILD_FILTER" \
+ | egrep -v '^$' \
| egrep -v "(GPBDictionary|GPBArray)" -
diff --git a/src/core/ext/census/census_init.c b/src/objective-c/tests/version.h
index d7f719ff8c..02515063fa 100644
--- a/src/core/ext/census/census_init.c
+++ b/src/objective-c/tests/version.h
@@ -16,18 +16,12 @@
*
*/
-#include "src/core/ext/census/census_interface.h"
+// This file is autogenerated from a template file. Please make
+// modifications to
+// `templates/src/objective-c/GRPCClient/private/version.h.template`
+// instead. This file can be regenerated from the template by running
+// `tools/buildgen/generate_projects.sh`.
-#include <grpc/support/log.h>
-#include "src/core/ext/census/census_rpc_stats.h"
-#include "src/core/ext/census/census_tracing.h"
-void census_init(void) {
- census_tracing_init();
- census_stats_store_init();
-}
-
-void census_shutdown(void) {
- census_stats_store_shutdown();
- census_tracing_shutdown();
-}
+#define GRPC_OBJC_VERSION_STRING @"1.8.0-dev"
+#define GRPC_C_VERSION_STRING @"5.0.0-dev"
diff --git a/src/php/composer.json b/src/php/composer.json
index 3606a18f34..09471d23fe 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -2,7 +2,7 @@
"name": "grpc/grpc-dev",
"description": "gRPC library for PHP - for Developement use only",
"license": "Apache-2.0",
- "version": "1.7.0",
+ "version": "1.8.0",
"require": {
"php": ">=5.5.0",
"google/protobuf": "^v3.3.0"
diff --git a/src/php/ext/grpc/call_credentials.c b/src/php/ext/grpc/call_credentials.c
index 1eee8645df..a395d53614 100644
--- a/src/php/ext/grpc/call_credentials.c
+++ b/src/php/ext/grpc/call_credentials.c
@@ -35,6 +35,7 @@
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
+#include <grpc/support/string_util.h>
zend_class_entry *grpc_ce_call_credentials;
#if PHP_MAJOR_VERSION >= 7
@@ -143,9 +144,12 @@ PHP_METHOD(CallCredentials, createFromPlugin) {
}
/* Callback function for plugin creds API */
-void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb,
- void *user_data) {
+int plugin_get_metadata(
+ void *ptr, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) {
TSRMLS_FETCH();
plugin_state *state = (plugin_state *)ptr;
@@ -175,15 +179,19 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
/* call the user callback function */
zend_call_function(state->fci, state->fci_cache TSRMLS_CC);
- grpc_status_code code = GRPC_STATUS_OK;
+ *num_creds_md = 0;
+ *status = GRPC_STATUS_OK;
+ *error_details = NULL;
+
grpc_metadata_array metadata;
- bool cleanup = true;
if (retval == NULL || Z_TYPE_P(retval) != IS_ARRAY) {
- cleanup = false;
- code = GRPC_STATUS_INVALID_ARGUMENT;
- } else if (!create_metadata_array(retval, &metadata)) {
- code = GRPC_STATUS_INVALID_ARGUMENT;
+ *status = GRPC_STATUS_INVALID_ARGUMENT;
+ return true; // Synchronous return.
+ }
+ if (!create_metadata_array(retval, &metadata)) {
+ *status = GRPC_STATUS_INVALID_ARGUMENT;
+ return true; // Synchronous return.
}
if (retval != NULL) {
@@ -197,14 +205,24 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context,
#endif
}
- /* Pass control back to core */
- cb(user_data, metadata.metadata, metadata.count, code, NULL);
- if (cleanup) {
- for (int i = 0; i < metadata.count; i++) {
+ if (metadata.count > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) {
+ *status = GRPC_STATUS_INTERNAL;
+ *error_details = gpr_strdup(
+ "PHP plugin credentials returned too many metadata entries");
+ for (size_t i = 0; i < metadata.count; i++) {
+ // TODO(stanleycheung): Why don't we need to unref the key here?
grpc_slice_unref(metadata.metadata[i].value);
}
- grpc_metadata_array_destroy(&metadata);
+ } else {
+ // Return data to core.
+ *num_creds_md = metadata.count;
+ for (size_t i = 0; i < metadata.count; ++i) {
+ creds_md[i] = metadata.metadata[i];
+ }
}
+
+ grpc_metadata_array_destroy(&metadata);
+ return true; // Synchronous return.
}
/* Cleanup function for plugin creds API */
diff --git a/src/php/ext/grpc/call_credentials.h b/src/php/ext/grpc/call_credentials.h
index 9be8763278..663cc6858d 100755
--- a/src/php/ext/grpc/call_credentials.h
+++ b/src/php/ext/grpc/call_credentials.h
@@ -65,9 +65,12 @@ typedef struct plugin_state {
} plugin_state;
/* Callback function for plugin creds API */
-void plugin_get_metadata(void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb,
- void *user_data);
+int plugin_get_metadata(
+ void *ptr, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details);
/* Cleanup function for plugin creds API */
void plugin_destroy_state(void *ptr);
diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c
index e46037743d..a65d233017 100644
--- a/src/php/ext/grpc/server.c
+++ b/src/php/ext/grpc/server.c
@@ -169,7 +169,7 @@ PHP_METHOD(Server, requestCall) {
/**
* Add a http2 over tcp listener.
* @param string $addr The address to add
- * @return bool True on success, false on failure
+ * @return int Port on success, 0 on failure
*/
PHP_METHOD(Server, addHttp2Port) {
const char *addr;
@@ -190,7 +190,7 @@ PHP_METHOD(Server, addHttp2Port) {
* Add a secure http2 over tcp listener.
* @param string $addr The address to add
* @param ServerCredentials The ServerCredentials object
- * @return bool True on success, false on failure
+ * @return int Port on success, 0 on failure
*/
PHP_METHOD(Server, addSecureHttp2Port) {
const char *addr;
diff --git a/src/php/ext/grpc/version.h b/src/php/ext/grpc/version.h
index 07d8eee7fe..93dd563cff 100644
--- a/src/php/ext/grpc/version.h
+++ b/src/php/ext/grpc/version.h
@@ -20,6 +20,6 @@
#ifndef VERSION_H
#define VERSION_H
-#define PHP_GRPC_VERSION "1.7.0dev"
+#define PHP_GRPC_VERSION "1.8.0dev"
#endif /* VERSION_H */
diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php
index b62c2c2fa9..67378a34a8 100644
--- a/src/php/lib/Grpc/BaseStub.php
+++ b/src/php/lib/Grpc/BaseStub.php
@@ -218,7 +218,7 @@ class BaseStub
* (optional)
* @param array $options An array of options (optional)
*
- * @return SimpleSurfaceActiveCall The active call object
+ * @return UnaryCall The active call object
*/
protected function _simpleRequest($method,
$argument,
@@ -253,7 +253,7 @@ class BaseStub
* (optional)
* @param array $options An array of options (optional)
*
- * @return ClientStreamingSurfaceActiveCall The active call object
+ * @return ClientStreamingCall The active call object
*/
protected function _clientStreamRequest($method,
$deserialize,
@@ -288,7 +288,7 @@ class BaseStub
* (optional)
* @param array $options An array of options (optional)
*
- * @return ServerStreamingSurfaceActiveCall The active call object
+ * @return ServerStreamingCall The active call object
*/
protected function _serverStreamRequest($method,
$argument,
@@ -322,7 +322,7 @@ class BaseStub
* (optional)
* @param array $options An array of options (optional)
*
- * @return BidiStreamingSurfaceActiveCall The active call object
+ * @return BidiStreamingCall The active call object
*/
protected function _bidiRequest($method,
$deserialize,
diff --git a/src/php/tests/qps/client.php b/src/php/tests/qps/client.php
index a785d831b4..08904054eb 100644
--- a/src/php/tests/qps/client.php
+++ b/src/php/tests/qps/client.php
@@ -37,6 +37,7 @@
*/
require dirname(__FILE__).'/vendor/autoload.php';
+require dirname(__FILE__).'/histogram.php';
/**
* Assertion function that always exits with an error code if the assertion is
@@ -63,19 +64,19 @@ function hardAssertIfStatusOk($status)
}
/* Start the actual client */
-
-function qps_client_main($proxy_address) {
- echo "Initiating php client\n";
+function qps_client_main($proxy_address, $server_ind) {
+ echo "[php-client] Initiating php client\n";
$proxystubopts = [];
$proxystubopts['credentials'] = Grpc\ChannelCredentials::createInsecure();
$proxystub = new Grpc\Testing\ProxyClientServiceClient($proxy_address, $proxystubopts);
list($config, $status) = $proxystub->GetConfig(new Grpc\Testing\Void())->wait();
hardAssertIfStatusOk($status);
- hardAssert($config->getClientChannels() == 1, "Only 1 channel supported");
hardAssert($config->getOutstandingRpcsPerChannel() == 1, "Only 1 outstanding RPC supported");
- echo "Got configuration from proxy, target is " . $config->getServerTargets()[0] . "\n";
+ echo "[php-client] Got configuration from proxy, target is '$server_ind'th server" . $config->getServerTargets()[$server_ind] . "\n";
+ $histres = $config->getHistogramParams()->getResolution();
+ $histmax = $config->getHistogramParams()->getMaxPossible();
$stubopts = [];
if ($config->getSecurityParams()) {
@@ -93,10 +94,10 @@ function qps_client_main($proxy_address) {
} else {
$stubopts['credentials'] = Grpc\ChannelCredentials::createInsecure();
}
- echo "Initiating php benchmarking client\n";
+ echo "[php-client] Initiating php benchmarking client\n";
$stub = new Grpc\Testing\BenchmarkServiceClient(
- $config->getServerTargets()[0], $stubopts);
+ $config->getServerTargets()[$server_ind], $stubopts);
$req = new Grpc\Testing\SimpleRequest();
$req->setResponseType(Grpc\Testing\PayloadType::COMPRESSABLE);
@@ -115,8 +116,11 @@ function qps_client_main($proxy_address) {
} else {
$poisson = false;
}
- $metric = new Grpc\Testing\ProxyStat;
- $telemetry = $proxystub->ReportTime();
+ $histogram = new Histogram($histres, $histmax);
+ $histogram->clean();
+ $count = 0;
+ $histogram_result = new Grpc\Testing\HistogramData;
+ $telehist = $proxystub->ReportHist();
if ($config->getRpcType() == Grpc\Testing\RpcType::UNARY) {
while (1) {
if ($poisson) {
@@ -126,8 +130,20 @@ function qps_client_main($proxy_address) {
$startreq = microtime(true);
list($resp,$status) = $stub->UnaryCall($req)->wait();
hardAssertIfStatusOk($status);
- $metric->setLatency(microtime(true)-$startreq);
- $telemetry->write($metric);
+ $histogram->add((microtime(true)-$startreq)*1e9);
+ $count += 1;
+ if ($count == 2000) {
+ $contents = $histogram->contents();
+ $histogram_result->setBucket($contents);
+ $histogram_result->setMinSeen($histogram->minimum());
+ $histogram_result->setMaxSeen($histogram->maximum());
+ $histogram_result->setSum($histogram->sum());
+ $histogram_result->setSumOfSquares($histogram->sum_of_squares());
+ $histogram_result->setCount($histogram->count());
+ $telehist->write($histogram_result);
+ $histogram->clean();
+ $count = 0;
+ }
}
} else {
$stream = $stub->StreamingCall();
@@ -139,8 +155,20 @@ function qps_client_main($proxy_address) {
$startreq = microtime(true);
$stream->write($req);
$resp = $stream->read();
- $metric->setLatency(microtime(true)-$startreq);
- $telemetry->write($metric);
+ $histogram->add((microtime(true)-$startreq)*1e9);
+ $count += 1;
+ if ($count == 2000) {
+ $contents = $histogram->contents();
+ $histogram_result->setBucket($contents);
+ $histogram_result->setMinSeen($histogram->minimum());
+ $histogram_result->setMaxSeen($histogram->maximum());
+ $histogram_result->setSum($histogram->sum());
+ $histogram_result->setSumOfSquares($histogram->sum_of_squares());
+ $histogram_result->setCount($histogram->count());
+ $telehist->write($histogram_result);
+ $histogram->clean();
+ $count = 0;
+ }
}
}
}
@@ -148,4 +176,4 @@ function qps_client_main($proxy_address) {
ini_set('display_startup_errors', 1);
ini_set('display_errors', 1);
error_reporting(-1);
-qps_client_main($argv[1]);
+qps_client_main($argv[1], $argv[2]);
diff --git a/src/php/tests/qps/composer.json b/src/php/tests/qps/composer.json
index 8c1e7b6c74..f8512648c4 100644
--- a/src/php/tests/qps/composer.json
+++ b/src/php/tests/qps/composer.json
@@ -1,8 +1,7 @@
{
- "minimum-stability": "dev",
"require": {
"grpc/grpc": "dev-master",
- "google/protobuf": "^v3.3.0"
+ "google/protobuf": "v3.4.1"
},
"autoload": {
"psr-4": {
diff --git a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Core/Stats.php b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Core/Stats.php
new file mode 100644
index 0000000000..f9c710cd4e
--- /dev/null
+++ b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Core/Stats.php
@@ -0,0 +1,33 @@
+<?php
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/core/stats.proto
+
+namespace GPBMetadata\Src\Proto\Grpc\Core;
+
+class Stats
+{
+ public static $is_initialized = false;
+
+ public static function initOnce() {
+ $pool = \Google\Protobuf\Internal\DescriptorPool::getGeneratedPool();
+
+ if (static::$is_initialized == true) {
+ return;
+ }
+ $pool->internalAddGeneratedFile(hex2bin(
+ "0a97020a1f7372632f70726f746f2f677270632f636f72652f7374617473" .
+ "2e70726f746f1209677270632e636f726522260a064275636b6574120d0a" .
+ "057374617274180120012801120d0a05636f756e74180220012804222f0a" .
+ "09486973746f6772616d12220a076275636b65747318012003280b32112e" .
+ "677270632e636f72652e4275636b6574225b0a064d6574726963120c0a04" .
+ "6e616d65180120012809120f0a05636f756e74180a20012804480012290a" .
+ "09686973746f6772616d180b2001280b32142e677270632e636f72652e48" .
+ "6973746f6772616d480042070a0576616c7565222b0a0553746174731222" .
+ "0a076d65747269637318012003280b32112e677270632e636f72652e4d65" .
+ "74726963620670726f746f33"
+ ));
+
+ static::$is_initialized = true;
+ }
+}
+
diff --git a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Control.php b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Control.php
index efca18a0cb..9b3a7529ec 100644
--- a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Control.php
+++ b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Control.php
@@ -17,108 +17,119 @@ class Control
\GPBMetadata\Src\Proto\Grpc\Testing\Payloads::initOnce();
\GPBMetadata\Src\Proto\Grpc\Testing\Stats::initOnce();
$pool->internalAddGeneratedFile(hex2bin(
- "0add170a247372632f70726f746f2f677270632f74657374696e672f636f" .
- "6e74726f6c2e70726f746f120c677270632e74657374696e671a25737263" .
- "2f70726f746f2f677270632f74657374696e672f7061796c6f6164732e70" .
- "726f746f1a227372632f70726f746f2f677270632f74657374696e672f73" .
- "746174732e70726f746f22250a0d506f6973736f6e506172616d7312140a" .
- "0c6f6666657265645f6c6f616418012001280122120a10436c6f7365644c" .
- "6f6f70506172616d73227b0a0a4c6f6164506172616d7312350a0b636c6f" .
- "7365645f6c6f6f7018012001280b321e2e677270632e74657374696e672e" .
- "436c6f7365644c6f6f70506172616d734800122e0a07706f6973736f6e18" .
- "022001280b321b2e677270632e74657374696e672e506f6973736f6e5061" .
- "72616d73480042060a046c6f616422430a0e536563757269747950617261" .
- "6d7312130a0b7573655f746573745f6361180120012808121c0a14736572" .
- "7665725f686f73745f6f76657272696465180220012809224d0a0a436861" .
- "6e6e656c417267120c0a046e616d6518012001280912130a097374725f76" .
- "616c7565180220012809480012130a09696e745f76616c75651803200128" .
- "05480042070a0576616c756522a0040a0c436c69656e74436f6e66696712" .
- "160a0e7365727665725f74617267657473180120032809122d0a0b636c69" .
- "656e745f7479706518022001280e32182e677270632e74657374696e672e" .
- "436c69656e745479706512350a0f73656375726974795f706172616d7318" .
- "032001280b321c2e677270632e74657374696e672e536563757269747950" .
- "6172616d7312240a1c6f75747374616e64696e675f727063735f7065725f" .
- "6368616e6e656c18042001280512170a0f636c69656e745f6368616e6e65" .
- "6c73180520012805121c0a146173796e635f636c69656e745f7468726561" .
- "647318072001280512270a087270635f7479706518082001280e32152e67" .
- "7270632e74657374696e672e52706354797065122d0a0b6c6f61645f7061" .
- "72616d73180a2001280b32182e677270632e74657374696e672e4c6f6164" .
- "506172616d7312330a0e7061796c6f61645f636f6e666967180b2001280b" .
- "321b2e677270632e74657374696e672e5061796c6f6164436f6e66696712" .
- "370a10686973746f6772616d5f706172616d73180c2001280b321d2e6772" .
- "70632e74657374696e672e486973746f6772616d506172616d7312110a09" .
- "636f72655f6c697374180d2003280512120a0a636f72655f6c696d697418" .
- "0e2001280512180a106f746865725f636c69656e745f617069180f200128" .
- "09122e0a0c6368616e6e656c5f6172677318102003280b32182e67727063" .
- "2e74657374696e672e4368616e6e656c41726722380a0c436c69656e7453" .
- "746174757312280a05737461747318012001280b32192e677270632e7465" .
- "7374696e672e436c69656e74537461747322150a044d61726b120d0a0572" .
- "6573657418012001280822680a0a436c69656e7441726773122b0a057365" .
- "74757018012001280b321a2e677270632e74657374696e672e436c69656e" .
- "74436f6e666967480012220a046d61726b18022001280b32122e67727063" .
- "2e74657374696e672e4d61726b480042090a076172677479706522b4020a" .
- "0c536572766572436f6e666967122d0a0b7365727665725f747970651801" .
- "2001280e32182e677270632e74657374696e672e53657276657254797065" .
- "12350a0f73656375726974795f706172616d7318022001280b321c2e6772" .
- "70632e74657374696e672e5365637572697479506172616d73120c0a0470" .
- "6f7274180420012805121c0a146173796e635f7365727665725f74687265" .
- "61647318072001280512120a0a636f72655f6c696d697418082001280512" .
- "330a0e7061796c6f61645f636f6e66696718092001280b321b2e67727063" .
- "2e74657374696e672e5061796c6f6164436f6e66696712110a09636f7265" .
- "5f6c697374180a2003280512180a106f746865725f7365727665725f6170" .
- "69180b20012809121c0a137265736f757263655f71756f74615f73697a65" .
- "18e9072001280522680a0a53657276657241726773122b0a057365747570" .
- "18012001280b321a2e677270632e74657374696e672e536572766572436f" .
- "6e666967480012220a046d61726b18022001280b32122e677270632e7465" .
- "7374696e672e4d61726b480042090a076172677479706522550a0c536572" .
- "76657253746174757312280a05737461747318012001280b32192e677270" .
- "632e74657374696e672e5365727665725374617473120c0a04706f727418" .
- "0220012805120d0a05636f726573180320012805220d0a0b436f72655265" .
- "7175657374221d0a0c436f7265526573706f6e7365120d0a05636f726573" .
- "18012001280522060a04566f696422fd010a085363656e6172696f120c0a" .
- "046e616d6518012001280912310a0d636c69656e745f636f6e6669671802" .
- "2001280b321a2e677270632e74657374696e672e436c69656e74436f6e66" .
- "696712130a0b6e756d5f636c69656e747318032001280512310a0d736572" .
- "7665725f636f6e66696718042001280b321a2e677270632e74657374696e" .
- "672e536572766572436f6e66696712130a0b6e756d5f7365727665727318" .
- "052001280512160a0e7761726d75705f7365636f6e647318062001280512" .
- "190a1162656e63686d61726b5f7365636f6e647318072001280512200a18" .
- "737061776e5f6c6f63616c5f776f726b65725f636f756e74180820012805" .
- "22360a095363656e6172696f7312290a097363656e6172696f7318012003" .
- "280b32162e677270632e74657374696e672e5363656e6172696f22f8020a" .
- "155363656e6172696f526573756c7453756d6d617279120b0a0371707318" .
- "0120012801121b0a137170735f7065725f7365727665725f636f72651802" .
- "20012801121a0a127365727665725f73797374656d5f74696d6518032001" .
- "280112180a107365727665725f757365725f74696d65180420012801121a" .
- "0a12636c69656e745f73797374656d5f74696d6518052001280112180a10" .
- "636c69656e745f757365725f74696d6518062001280112120a0a6c617465" .
- "6e63795f353018072001280112120a0a6c6174656e63795f393018082001" .
- "280112120a0a6c6174656e63795f393518092001280112120a0a6c617465" .
- "6e63795f3939180a2001280112130a0b6c6174656e63795f393939180b20" .
- "01280112180a107365727665725f6370755f7573616765180c2001280112" .
- "260a1e7375636365737366756c5f72657175657374735f7065725f736563" .
- "6f6e64180d2001280112220a1a6661696c65645f72657175657374735f70" .
- "65725f7365636f6e64180e200128012283030a0e5363656e6172696f5265" .
- "73756c7412280a087363656e6172696f18012001280b32162e677270632e" .
- "74657374696e672e5363656e6172696f122e0a096c6174656e6369657318" .
- "022001280b321b2e677270632e74657374696e672e486973746f6772616d" .
- "44617461122f0a0c636c69656e745f737461747318032003280b32192e67" .
- "7270632e74657374696e672e436c69656e745374617473122f0a0c736572" .
- "7665725f737461747318042003280b32192e677270632e74657374696e67" .
- "2e536572766572537461747312140a0c7365727665725f636f7265731805" .
- "2003280512340a0773756d6d61727918062001280b32232e677270632e74" .
- "657374696e672e5363656e6172696f526573756c7453756d6d6172791216" .
- "0a0e636c69656e745f7375636365737318072003280812160a0e73657276" .
- "65725f7375636365737318082003280812390a0f726571756573745f7265" .
- "73756c747318092003280b32202e677270632e74657374696e672e526571" .
- "75657374526573756c74436f756e742a410a0a436c69656e745479706512" .
- "0f0a0b53594e435f434c49454e54100012100a0c4153594e435f434c4945" .
- "4e54100112100a0c4f544845525f434c49454e5410022a5b0a0a53657276" .
- "657254797065120f0a0b53594e435f534552564552100012100a0c415359" .
- "4e435f534552564552100112180a144153594e435f47454e455249435f53" .
- "4552564552100212100a0c4f544845525f53455256455210032a230a0752" .
- "70635479706512090a05554e4152591000120d0a0953545245414d494e47" .
- "1001620670726f746f33"
+ "0aa21a0a247372632f70726f746f2f677270632f74657374696e672f636f" .
+ "6e74726f6c2e70726f746f120c677270632e74657374696e671a22737263" .
+ "2f70726f746f2f677270632f74657374696e672f73746174732e70726f74" .
+ "6f22250a0d506f6973736f6e506172616d7312140a0c6f6666657265645f" .
+ "6c6f616418012001280122120a10436c6f7365644c6f6f70506172616d73" .
+ "227b0a0a4c6f6164506172616d7312350a0b636c6f7365645f6c6f6f7018" .
+ "012001280b321e2e677270632e74657374696e672e436c6f7365644c6f6f" .
+ "70506172616d734800122e0a07706f6973736f6e18022001280b321b2e67" .
+ "7270632e74657374696e672e506f6973736f6e506172616d73480042060a" .
+ "046c6f616422560a0e5365637572697479506172616d7312130a0b757365" .
+ "5f746573745f6361180120012808121c0a147365727665725f686f73745f" .
+ "6f7665727269646518022001280912110a09637265645f74797065180320" .
+ "012809224d0a0a4368616e6e656c417267120c0a046e616d651801200128" .
+ "0912130a097374725f76616c7565180220012809480012130a09696e745f" .
+ "76616c7565180320012805480042070a0576616c756522d5040a0c436c69" .
+ "656e74436f6e66696712160a0e7365727665725f74617267657473180120" .
+ "032809122d0a0b636c69656e745f7479706518022001280e32182e677270" .
+ "632e74657374696e672e436c69656e745479706512350a0f736563757269" .
+ "74795f706172616d7318032001280b321c2e677270632e74657374696e67" .
+ "2e5365637572697479506172616d7312240a1c6f75747374616e64696e67" .
+ "5f727063735f7065725f6368616e6e656c18042001280512170a0f636c69" .
+ "656e745f6368616e6e656c73180520012805121c0a146173796e635f636c" .
+ "69656e745f7468726561647318072001280512270a087270635f74797065" .
+ "18082001280e32152e677270632e74657374696e672e5270635479706512" .
+ "2d0a0b6c6f61645f706172616d73180a2001280b32182e677270632e7465" .
+ "7374696e672e4c6f6164506172616d7312330a0e7061796c6f61645f636f" .
+ "6e666967180b2001280b321b2e677270632e74657374696e672e5061796c" .
+ "6f6164436f6e66696712370a10686973746f6772616d5f706172616d7318" .
+ "0c2001280b321d2e677270632e74657374696e672e486973746f6772616d" .
+ "506172616d7312110a09636f72655f6c697374180d2003280512120a0a63" .
+ "6f72655f6c696d6974180e2001280512180a106f746865725f636c69656e" .
+ "745f617069180f20012809122e0a0c6368616e6e656c5f61726773181020" .
+ "03280b32182e677270632e74657374696e672e4368616e6e656c41726712" .
+ "160a0e746872656164735f7065725f6371181120012805121b0a136d6573" .
+ "73616765735f7065725f73747265616d18122001280522380a0c436c6965" .
+ "6e7453746174757312280a05737461747318012001280b32192e67727063" .
+ "2e74657374696e672e436c69656e74537461747322150a044d61726b120d" .
+ "0a05726573657418012001280822680a0a436c69656e7441726773122b0a" .
+ "05736574757018012001280b321a2e677270632e74657374696e672e436c" .
+ "69656e74436f6e666967480012220a046d61726b18022001280b32122e67" .
+ "7270632e74657374696e672e4d61726b480042090a076172677479706522" .
+ "fd020a0c536572766572436f6e666967122d0a0b7365727665725f747970" .
+ "6518012001280e32182e677270632e74657374696e672e53657276657254" .
+ "79706512350a0f73656375726974795f706172616d7318022001280b321c" .
+ "2e677270632e74657374696e672e5365637572697479506172616d73120c" .
+ "0a04706f7274180420012805121c0a146173796e635f7365727665725f74" .
+ "68726561647318072001280512120a0a636f72655f6c696d697418082001" .
+ "280512330a0e7061796c6f61645f636f6e66696718092001280b321b2e67" .
+ "7270632e74657374696e672e5061796c6f6164436f6e66696712110a0963" .
+ "6f72655f6c697374180a2003280512180a106f746865725f736572766572" .
+ "5f617069180b2001280912160a0e746872656164735f7065725f6371180c" .
+ "20012805121c0a137265736f757263655f71756f74615f73697a6518e907" .
+ "20012805122f0a0c6368616e6e656c5f6172677318ea072003280b32182e" .
+ "677270632e74657374696e672e4368616e6e656c41726722680a0a536572" .
+ "76657241726773122b0a05736574757018012001280b321a2e677270632e" .
+ "74657374696e672e536572766572436f6e666967480012220a046d61726b" .
+ "18022001280b32122e677270632e74657374696e672e4d61726b48004209" .
+ "0a076172677479706522550a0c53657276657253746174757312280a0573" .
+ "7461747318012001280b32192e677270632e74657374696e672e53657276" .
+ "65725374617473120c0a04706f7274180220012805120d0a05636f726573" .
+ "180320012805220d0a0b436f726552657175657374221d0a0c436f726552" .
+ "6573706f6e7365120d0a05636f72657318012001280522060a04566f6964" .
+ "22fd010a085363656e6172696f120c0a046e616d6518012001280912310a" .
+ "0d636c69656e745f636f6e66696718022001280b321a2e677270632e7465" .
+ "7374696e672e436c69656e74436f6e66696712130a0b6e756d5f636c6965" .
+ "6e747318032001280512310a0d7365727665725f636f6e66696718042001" .
+ "280b321a2e677270632e74657374696e672e536572766572436f6e666967" .
+ "12130a0b6e756d5f7365727665727318052001280512160a0e7761726d75" .
+ "705f7365636f6e647318062001280512190a1162656e63686d61726b5f73" .
+ "65636f6e647318072001280512200a18737061776e5f6c6f63616c5f776f" .
+ "726b65725f636f756e7418082001280522360a095363656e6172696f7312" .
+ "290a097363656e6172696f7318012003280b32162e677270632e74657374" .
+ "696e672e5363656e6172696f2284040a155363656e6172696f526573756c" .
+ "7453756d6d617279120b0a03717073180120012801121b0a137170735f70" .
+ "65725f7365727665725f636f7265180220012801121a0a12736572766572" .
+ "5f73797374656d5f74696d6518032001280112180a107365727665725f75" .
+ "7365725f74696d65180420012801121a0a12636c69656e745f7379737465" .
+ "6d5f74696d6518052001280112180a10636c69656e745f757365725f7469" .
+ "6d6518062001280112120a0a6c6174656e63795f35301807200128011212" .
+ "0a0a6c6174656e63795f393018082001280112120a0a6c6174656e63795f" .
+ "393518092001280112120a0a6c6174656e63795f3939180a200128011213" .
+ "0a0b6c6174656e63795f393939180b2001280112180a107365727665725f" .
+ "6370755f7573616765180c2001280112260a1e7375636365737366756c5f" .
+ "72657175657374735f7065725f7365636f6e64180d2001280112220a1a66" .
+ "61696c65645f72657175657374735f7065725f7365636f6e64180e200128" .
+ "0112200a18636c69656e745f706f6c6c735f7065725f7265717565737418" .
+ "0f2001280112200a187365727665725f706f6c6c735f7065725f72657175" .
+ "65737418102001280112220a1a7365727665725f717565726965735f7065" .
+ "725f6370755f73656318112001280112220a1a636c69656e745f71756572" .
+ "6965735f7065725f6370755f7365631812200128012283030a0e5363656e" .
+ "6172696f526573756c7412280a087363656e6172696f18012001280b3216" .
+ "2e677270632e74657374696e672e5363656e6172696f122e0a096c617465" .
+ "6e6369657318022001280b321b2e677270632e74657374696e672e486973" .
+ "746f6772616d44617461122f0a0c636c69656e745f737461747318032003" .
+ "280b32192e677270632e74657374696e672e436c69656e74537461747312" .
+ "2f0a0c7365727665725f737461747318042003280b32192e677270632e74" .
+ "657374696e672e536572766572537461747312140a0c7365727665725f63" .
+ "6f72657318052003280512340a0773756d6d61727918062001280b32232e" .
+ "677270632e74657374696e672e5363656e6172696f526573756c7453756d" .
+ "6d61727912160a0e636c69656e745f737563636573731807200328081216" .
+ "0a0e7365727665725f7375636365737318082003280812390a0f72657175" .
+ "6573745f726573756c747318092003280b32202e677270632e7465737469" .
+ "6e672e52657175657374526573756c74436f756e742a410a0a436c69656e" .
+ "7454797065120f0a0b53594e435f434c49454e54100012100a0c4153594e" .
+ "435f434c49454e54100112100a0c4f544845525f434c49454e5410022a5b" .
+ "0a0a53657276657254797065120f0a0b53594e435f534552564552100012" .
+ "100a0c4153594e435f534552564552100112180a144153594e435f47454e" .
+ "455249435f534552564552100212100a0c4f544845525f53455256455210" .
+ "032a720a075270635479706512090a05554e4152591000120d0a09535452" .
+ "45414d494e47100112190a1553545245414d494e475f46524f4d5f434c49" .
+ "454e54100212190a1553545245414d494e475f46524f4d5f534552564552" .
+ "100312170a1353545245414d494e475f424f54485f574159531004620670" .
+ "726f746f33"
));
static::$is_initialized = true;
diff --git a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/ProxyService.php b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/ProxyService.php
index e35944e1d8..e07f73679e 100644
--- a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/ProxyService.php
+++ b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/ProxyService.php
@@ -15,17 +15,20 @@ class ProxyService
return;
}
\GPBMetadata\Src\Proto\Grpc\Testing\Control::initOnce();
+ \GPBMetadata\Src\Proto\Grpc\Testing\Stats::initOnce();
$pool->internalAddGeneratedFile(hex2bin(
- "0a97020a2a7372632f70726f746f2f677270632f74657374696e672f7072" .
+ "0ad6020a2a7372632f70726f746f2f677270632f74657374696e672f7072" .
"6f78792d736572766963652e70726f746f120c677270632e74657374696e" .
- "671a247372632f70726f746f2f677270632f74657374696e672f636f6e74" .
- "726f6c2e70726f746f221c0a0950726f787953746174120f0a076c617465" .
- "6e6379180120012801328e010a1250726f7879436c69656e745365727669" .
- "6365123b0a09476574436f6e66696712122e677270632e74657374696e67" .
- "2e566f69641a1a2e677270632e74657374696e672e436c69656e74436f6e" .
- "666967123b0a0a5265706f727454696d6512172e677270632e7465737469" .
- "6e672e50726f7879537461741a122e677270632e74657374696e672e566f" .
- "69642801620670726f746f33"
+ "671a227372632f70726f746f2f677270632f74657374696e672f73746174" .
+ "732e70726f746f221c0a0950726f787953746174120f0a076c6174656e63" .
+ "7918012001280132cf010a1250726f7879436c69656e7453657276696365" .
+ "123b0a09476574436f6e66696712122e677270632e74657374696e672e56" .
+ "6f69641a1a2e677270632e74657374696e672e436c69656e74436f6e6669" .
+ "67123b0a0a5265706f727454696d6512172e677270632e74657374696e67" .
+ "2e50726f7879537461741a122e677270632e74657374696e672e566f6964" .
+ "2801123f0a0a5265706f727448697374121b2e677270632e74657374696e" .
+ "672e486973746f6772616d446174611a122e677270632e74657374696e67" .
+ "2e566f69642801620670726f746f33"
));
static::$is_initialized = true;
diff --git a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Services.php b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Services.php
index 7a9439a5b9..e4029182c7 100644
--- a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Services.php
+++ b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Services.php
@@ -16,27 +16,40 @@ class Services
}
\GPBMetadata\Src\Proto\Grpc\Testing\Messages::initOnce();
\GPBMetadata\Src\Proto\Grpc\Testing\Control::initOnce();
+ \GPBMetadata\Src\Proto\Grpc\Testing\Stats::initOnce();
$pool->internalAddGeneratedFile(hex2bin(
- "0ad1040a257372632f70726f746f2f677270632f74657374696e672f7365" .
- "7276696365732e70726f746f120c677270632e74657374696e671a257372" .
- "632f70726f746f2f677270632f74657374696e672f6d657373616765732e" .
- "70726f746f1a247372632f70726f746f2f677270632f74657374696e672f" .
- "636f6e74726f6c2e70726f746f32aa010a1042656e63686d61726b536572" .
- "7669636512460a09556e61727943616c6c121b2e677270632e7465737469" .
- "6e672e53696d706c65526571756573741a1c2e677270632e74657374696e" .
- "672e53696d706c65526573706f6e7365124e0a0d53747265616d696e6743" .
- "616c6c121b2e677270632e74657374696e672e53696d706c655265717565" .
- "73741a1c2e677270632e74657374696e672e53696d706c65526573706f6e" .
- "7365280130013297020a0d576f726b65725365727669636512450a095275" .
- "6e53657276657212182e677270632e74657374696e672e53657276657241" .
- "7267731a1a2e677270632e74657374696e672e5365727665725374617475" .
- "732801300112450a0952756e436c69656e7412182e677270632e74657374" .
- "696e672e436c69656e74417267731a1a2e677270632e74657374696e672e" .
- "436c69656e745374617475732801300112420a09436f7265436f756e7412" .
- "192e677270632e74657374696e672e436f7265526571756573741a1a2e67" .
- "7270632e74657374696e672e436f7265526573706f6e736512340a0a5175" .
- "6974576f726b657212122e677270632e74657374696e672e566f69641a12" .
- "2e677270632e74657374696e672e566f6964620670726f746f33"
+ "0aaa070a257372632f70726f746f2f677270632f74657374696e672f7365" .
+ "7276696365732e70726f746f120c677270632e74657374696e671a247372" .
+ "632f70726f746f2f677270632f74657374696e672f636f6e74726f6c2e70" .
+ "726f746f1a227372632f70726f746f2f677270632f74657374696e672f73" .
+ "746174732e70726f746f32a6030a1042656e63686d61726b536572766963" .
+ "6512460a09556e61727943616c6c121b2e677270632e74657374696e672e" .
+ "53696d706c65526571756573741a1c2e677270632e74657374696e672e53" .
+ "696d706c65526573706f6e7365124e0a0d53747265616d696e6743616c6c" .
+ "121b2e677270632e74657374696e672e53696d706c65526571756573741a" .
+ "1c2e677270632e74657374696e672e53696d706c65526573706f6e736528" .
+ "01300112520a1353747265616d696e6746726f6d436c69656e74121b2e67" .
+ "7270632e74657374696e672e53696d706c65526571756573741a1c2e6772" .
+ "70632e74657374696e672e53696d706c65526573706f6e7365280112520a" .
+ "1353747265616d696e6746726f6d536572766572121b2e677270632e7465" .
+ "7374696e672e53696d706c65526571756573741a1c2e677270632e746573" .
+ "74696e672e53696d706c65526573706f6e7365300112520a115374726561" .
+ "6d696e67426f746857617973121b2e677270632e74657374696e672e5369" .
+ "6d706c65526571756573741a1c2e677270632e74657374696e672e53696d" .
+ "706c65526573706f6e7365280130013297020a0d576f726b657253657276" .
+ "69636512450a0952756e53657276657212182e677270632e74657374696e" .
+ "672e536572766572417267731a1a2e677270632e74657374696e672e5365" .
+ "727665725374617475732801300112450a0952756e436c69656e7412182e" .
+ "677270632e74657374696e672e436c69656e74417267731a1a2e67727063" .
+ "2e74657374696e672e436c69656e745374617475732801300112420a0943" .
+ "6f7265436f756e7412192e677270632e74657374696e672e436f72655265" .
+ "71756573741a1a2e677270632e74657374696e672e436f7265526573706f" .
+ "6e736512340a0a51756974576f726b657212122e677270632e7465737469" .
+ "6e672e566f69641a122e677270632e74657374696e672e566f6964325e0a" .
+ "185265706f72745170735363656e6172696f5365727669636512420a0e52" .
+ "65706f72745363656e6172696f121c2e677270632e74657374696e672e53" .
+ "63656e6172696f526573756c741a122e677270632e74657374696e672e56" .
+ "6f6964620670726f746f33"
));
static::$is_initialized = true;
diff --git a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Stats.php b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Stats.php
index 99c0000a52..3d23b75dfa 100644
--- a/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Stats.php
+++ b/src/php/tests/qps/generated_code/GPBMetadata/Src/Proto/Grpc/Testing/Stats.php
@@ -14,28 +14,33 @@ class Stats
if (static::$is_initialized == true) {
return;
}
+ \GPBMetadata\Src\Proto\Grpc\Core\Stats::initOnce();
$pool->internalAddGeneratedFile(hex2bin(
- "0adf040a227372632f70726f746f2f677270632f74657374696e672f7374" .
- "6174732e70726f746f120c677270632e74657374696e67227a0a0b536572" .
- "766572537461747312140a0c74696d655f656c6170736564180120012801" .
- "12110a0974696d655f7573657218022001280112130a0b74696d655f7379" .
- "7374656d18032001280112160a0e746f74616c5f6370755f74696d651804" .
- "2001280412150a0d69646c655f6370755f74696d65180520012804223b0a" .
- "0f486973746f6772616d506172616d7312120a0a7265736f6c7574696f6e" .
- "18012001280112140a0c6d61785f706f737369626c651802200128012277" .
- "0a0d486973746f6772616d44617461120e0a066275636b65741801200328" .
- "0d12100a086d696e5f7365656e18022001280112100a086d61785f736565" .
- "6e180320012801120b0a0373756d18042001280112160a0e73756d5f6f66" .
- "5f73717561726573180520012801120d0a05636f756e7418062001280122" .
- "380a1252657175657374526573756c74436f756e7412130a0b7374617475" .
- "735f636f6465180120012805120d0a05636f756e7418022001280322b601" .
- "0a0b436c69656e745374617473122e0a096c6174656e6369657318012001" .
- "280b321b2e677270632e74657374696e672e486973746f6772616d446174" .
- "6112140a0c74696d655f656c617073656418022001280112110a0974696d" .
- "655f7573657218032001280112130a0b74696d655f73797374656d180420" .
- "01280112390a0f726571756573745f726573756c747318052003280b3220" .
- "2e677270632e74657374696e672e52657175657374526573756c74436f75" .
- "6e74620670726f746f33"
+ "0ada050a227372632f70726f746f2f677270632f74657374696e672f7374" .
+ "6174732e70726f746f120c677270632e74657374696e6722b7010a0b5365" .
+ "72766572537461747312140a0c74696d655f656c61707365641801200128" .
+ "0112110a0974696d655f7573657218022001280112130a0b74696d655f73" .
+ "797374656d18032001280112160a0e746f74616c5f6370755f74696d6518" .
+ "042001280412150a0d69646c655f6370755f74696d651805200128041215" .
+ "0a0d63715f706f6c6c5f636f756e7418062001280412240a0a636f72655f" .
+ "737461747318072001280b32102e677270632e636f72652e537461747322" .
+ "3b0a0f486973746f6772616d506172616d7312120a0a7265736f6c757469" .
+ "6f6e18012001280112140a0c6d61785f706f737369626c65180220012801" .
+ "22770a0d486973746f6772616d44617461120e0a066275636b6574180120" .
+ "03280d12100a086d696e5f7365656e18022001280112100a086d61785f73" .
+ "65656e180320012801120b0a0373756d18042001280112160a0e73756d5f" .
+ "6f665f73717561726573180520012801120d0a05636f756e741806200128" .
+ "0122380a1252657175657374526573756c74436f756e7412130a0b737461" .
+ "7475735f636f6465180120012805120d0a05636f756e7418022001280322" .
+ "f3010a0b436c69656e745374617473122e0a096c6174656e636965731801" .
+ "2001280b321b2e677270632e74657374696e672e486973746f6772616d44" .
+ "61746112140a0c74696d655f656c617073656418022001280112110a0974" .
+ "696d655f7573657218032001280112130a0b74696d655f73797374656d18" .
+ "042001280112390a0f726571756573745f726573756c747318052003280b" .
+ "32202e677270632e74657374696e672e52657175657374526573756c7443" .
+ "6f756e7412150a0d63715f706f6c6c5f636f756e7418062001280412240a" .
+ "0a636f72655f737461747318072001280b32102e677270632e636f72652e" .
+ "5374617473620670726f746f33"
));
static::$is_initialized = true;
diff --git a/src/php/tests/qps/generated_code/Grpc/Core/Bucket.php b/src/php/tests/qps/generated_code/Grpc/Core/Bucket.php
new file mode 100644
index 0000000000..897d6271c2
--- /dev/null
+++ b/src/php/tests/qps/generated_code/Grpc/Core/Bucket.php
@@ -0,0 +1,75 @@
+<?php
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/core/stats.proto
+
+namespace Grpc\Core;
+
+use Google\Protobuf\Internal\GPBType;
+use Google\Protobuf\Internal\RepeatedField;
+use Google\Protobuf\Internal\GPBUtil;
+
+/**
+ * Generated from protobuf message <code>grpc.core.Bucket</code>
+ */
+class Bucket extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Generated from protobuf field <code>double start = 1;</code>
+ */
+ private $start = 0.0;
+ /**
+ * Generated from protobuf field <code>uint64 count = 2;</code>
+ */
+ private $count = 0;
+
+ public function __construct() {
+ \GPBMetadata\Src\Proto\Grpc\Core\Stats::initOnce();
+ parent::__construct();
+ }
+
+ /**
+ * Generated from protobuf field <code>double start = 1;</code>
+ * @return float
+ */
+ public function getStart()
+ {
+ return $this->start;
+ }
+
+ /**
+ * Generated from protobuf field <code>double start = 1;</code>
+ * @param float $var
+ * @return $this
+ */
+ public function setStart($var)
+ {
+ GPBUtil::checkDouble($var);
+ $this->start = $var;
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>uint64 count = 2;</code>
+ * @return int|string
+ */
+ public function getCount()
+ {
+ return $this->count;
+ }
+
+ /**
+ * Generated from protobuf field <code>uint64 count = 2;</code>
+ * @param int|string $var
+ * @return $this
+ */
+ public function setCount($var)
+ {
+ GPBUtil::checkUint64($var);
+ $this->count = $var;
+
+ return $this;
+ }
+
+}
+
diff --git a/src/php/tests/qps/generated_code/Grpc/Core/Histogram.php b/src/php/tests/qps/generated_code/Grpc/Core/Histogram.php
new file mode 100644
index 0000000000..1902be8e4a
--- /dev/null
+++ b/src/php/tests/qps/generated_code/Grpc/Core/Histogram.php
@@ -0,0 +1,49 @@
+<?php
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/core/stats.proto
+
+namespace Grpc\Core;
+
+use Google\Protobuf\Internal\GPBType;
+use Google\Protobuf\Internal\RepeatedField;
+use Google\Protobuf\Internal\GPBUtil;
+
+/**
+ * Generated from protobuf message <code>grpc.core.Histogram</code>
+ */
+class Histogram extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Generated from protobuf field <code>repeated .grpc.core.Bucket buckets = 1;</code>
+ */
+ private $buckets;
+
+ public function __construct() {
+ \GPBMetadata\Src\Proto\Grpc\Core\Stats::initOnce();
+ parent::__construct();
+ }
+
+ /**
+ * Generated from protobuf field <code>repeated .grpc.core.Bucket buckets = 1;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getBuckets()
+ {
+ return $this->buckets;
+ }
+
+ /**
+ * Generated from protobuf field <code>repeated .grpc.core.Bucket buckets = 1;</code>
+ * @param \Grpc\Core\Bucket[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setBuckets($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Core\Bucket::class);
+ $this->buckets = $arr;
+
+ return $this;
+ }
+
+}
+
diff --git a/src/php/tests/qps/generated_code/Grpc/Core/Metric.php b/src/php/tests/qps/generated_code/Grpc/Core/Metric.php
new file mode 100644
index 0000000000..c3581b7d21
--- /dev/null
+++ b/src/php/tests/qps/generated_code/Grpc/Core/Metric.php
@@ -0,0 +1,102 @@
+<?php
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/core/stats.proto
+
+namespace Grpc\Core;
+
+use Google\Protobuf\Internal\GPBType;
+use Google\Protobuf\Internal\RepeatedField;
+use Google\Protobuf\Internal\GPBUtil;
+
+/**
+ * Generated from protobuf message <code>grpc.core.Metric</code>
+ */
+class Metric extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Generated from protobuf field <code>string name = 1;</code>
+ */
+ private $name = '';
+ protected $value;
+
+ public function __construct() {
+ \GPBMetadata\Src\Proto\Grpc\Core\Stats::initOnce();
+ parent::__construct();
+ }
+
+ /**
+ * Generated from protobuf field <code>string name = 1;</code>
+ * @return string
+ */
+ public function getName()
+ {
+ return $this->name;
+ }
+
+ /**
+ * Generated from protobuf field <code>string name = 1;</code>
+ * @param string $var
+ * @return $this
+ */
+ public function setName($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->name = $var;
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>uint64 count = 10;</code>
+ * @return int|string
+ */
+ public function getCount()
+ {
+ return $this->readOneof(10);
+ }
+
+ /**
+ * Generated from protobuf field <code>uint64 count = 10;</code>
+ * @param int|string $var
+ * @return $this
+ */
+ public function setCount($var)
+ {
+ GPBUtil::checkUint64($var);
+ $this->writeOneof(10, $var);
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>.grpc.core.Histogram histogram = 11;</code>
+ * @return \Grpc\Core\Histogram
+ */
+ public function getHistogram()
+ {
+ return $this->readOneof(11);
+ }
+
+ /**
+ * Generated from protobuf field <code>.grpc.core.Histogram histogram = 11;</code>
+ * @param \Grpc\Core\Histogram $var
+ * @return $this
+ */
+ public function setHistogram($var)
+ {
+ GPBUtil::checkMessage($var, \Grpc\Core\Histogram::class);
+ $this->writeOneof(11, $var);
+
+ return $this;
+ }
+
+ /**
+ * @return string
+ */
+ public function getValue()
+ {
+ return $this->whichOneof("value");
+ }
+
+}
+
diff --git a/src/php/tests/qps/generated_code/Grpc/Core/Stats.php b/src/php/tests/qps/generated_code/Grpc/Core/Stats.php
new file mode 100644
index 0000000000..e6f3fb0899
--- /dev/null
+++ b/src/php/tests/qps/generated_code/Grpc/Core/Stats.php
@@ -0,0 +1,49 @@
+<?php
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: src/proto/grpc/core/stats.proto
+
+namespace Grpc\Core;
+
+use Google\Protobuf\Internal\GPBType;
+use Google\Protobuf\Internal\RepeatedField;
+use Google\Protobuf\Internal\GPBUtil;
+
+/**
+ * Generated from protobuf message <code>grpc.core.Stats</code>
+ */
+class Stats extends \Google\Protobuf\Internal\Message
+{
+ /**
+ * Generated from protobuf field <code>repeated .grpc.core.Metric metrics = 1;</code>
+ */
+ private $metrics;
+
+ public function __construct() {
+ \GPBMetadata\Src\Proto\Grpc\Core\Stats::initOnce();
+ parent::__construct();
+ }
+
+ /**
+ * Generated from protobuf field <code>repeated .grpc.core.Metric metrics = 1;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getMetrics()
+ {
+ return $this->metrics;
+ }
+
+ /**
+ * Generated from protobuf field <code>repeated .grpc.core.Metric metrics = 1;</code>
+ * @param \Grpc\Core\Metric[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setMetrics($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Core\Metric::class);
+ $this->metrics = $arr;
+
+ return $this;
+ }
+
+}
+
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/BenchmarkServiceClient.php b/src/php/tests/qps/generated_code/Grpc/Testing/BenchmarkServiceClient.php
index ddf750a94f..fa3e147909 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/BenchmarkServiceClient.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/BenchmarkServiceClient.php
@@ -18,17 +18,19 @@
//
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
-namespace Grpc\Testing {
+namespace Grpc\Testing;
- class BenchmarkServiceClient extends \Grpc\BaseStub {
+/**
+ */
+class BenchmarkServiceClient extends \Grpc\BaseStub {
/**
* @param string $hostname hostname
* @param array $opts channel options
- * @param Grpc\Channel $channel (optional) re-use channel object
+ * @param \Grpc\Channel $channel (optional) re-use channel object
*/
public function __construct($hostname, $opts, $channel = null) {
- parent::__construct($hostname, $opts, $channel);
+ parent::__construct($hostname, $opts, $channel);
}
/**
@@ -40,24 +42,62 @@ namespace Grpc\Testing {
*/
public function UnaryCall(\Grpc\Testing\SimpleRequest $argument,
$metadata = [], $options = []) {
- return $this->_simpleRequest('/grpc.testing.BenchmarkService/UnaryCall',
- $argument,
- ['\Grpc\Testing\SimpleResponse', 'decode'],
- $metadata, $options);
+ return $this->_simpleRequest('/grpc.testing.BenchmarkService/UnaryCall',
+ $argument,
+ ['\Grpc\Testing\SimpleResponse', 'decode'],
+ $metadata, $options);
}
/**
- * One request followed by one response.
- * The server returns the client payload as-is.
+ * Repeated sequence of one request followed by one response.
+ * Should be called streaming ping-pong
+ * The server returns the client payload as-is on each response
* @param array $metadata metadata
* @param array $options call options
*/
public function StreamingCall($metadata = [], $options = []) {
- return $this->_bidiRequest('/grpc.testing.BenchmarkService/StreamingCall',
- ['\Grpc\Testing\SimpleResponse','decode'],
- $metadata, $options);
+ return $this->_bidiRequest('/grpc.testing.BenchmarkService/StreamingCall',
+ ['\Grpc\Testing\SimpleResponse','decode'],
+ $metadata, $options);
}
- }
+ /**
+ * Single-sided unbounded streaming from client to server
+ * The server returns the client payload as-is once the client does WritesDone
+ * @param array $metadata metadata
+ * @param array $options call options
+ */
+ public function StreamingFromClient($metadata = [], $options = []) {
+ return $this->_clientStreamRequest('/grpc.testing.BenchmarkService/StreamingFromClient',
+ ['\Grpc\Testing\SimpleResponse','decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Single-sided unbounded streaming from server to client
+ * The server repeatedly returns the client payload as-is
+ * @param \Grpc\Testing\SimpleRequest $argument input argument
+ * @param array $metadata metadata
+ * @param array $options call options
+ */
+ public function StreamingFromServer(\Grpc\Testing\SimpleRequest $argument,
+ $metadata = [], $options = []) {
+ return $this->_serverStreamRequest('/grpc.testing.BenchmarkService/StreamingFromServer',
+ $argument,
+ ['\Grpc\Testing\SimpleResponse', 'decode'],
+ $metadata, $options);
+ }
+
+ /**
+ * Two-sided unbounded streaming between server to client
+ * Both sides send the content of their own choice to the other
+ * @param array $metadata metadata
+ * @param array $options call options
+ */
+ public function StreamingBothWays($metadata = [], $options = []) {
+ return $this->_bidiRequest('/grpc.testing.BenchmarkService/StreamingBothWays',
+ ['\Grpc\Testing\SimpleResponse','decode'],
+ $metadata, $options);
+ }
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/BoolValue.php b/src/php/tests/qps/generated_code/Grpc/Testing/BoolValue.php
index f0497accfb..7eb364b7a0 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/BoolValue.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/BoolValue.php
@@ -9,22 +9,18 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* TODO(dgq): Go back to using well-known types once
* https://github.com/grpc/grpc/issues/6980 has been fixed.
* import "google/protobuf/wrappers.proto";
- * </pre>
*
- * Protobuf type <code>grpc.testing.BoolValue</code>
+ * Generated from protobuf message <code>grpc.testing.BoolValue</code>
*/
class BoolValue extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* The bool value.
- * </pre>
*
- * <code>bool value = 1;</code>
+ * Generated from protobuf field <code>bool value = 1;</code>
*/
private $value = false;
@@ -34,11 +30,10 @@ class BoolValue extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* The bool value.
- * </pre>
*
- * <code>bool value = 1;</code>
+ * Generated from protobuf field <code>bool value = 1;</code>
+ * @return bool
*/
public function getValue()
{
@@ -46,16 +41,18 @@ class BoolValue extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* The bool value.
- * </pre>
*
- * <code>bool value = 1;</code>
+ * Generated from protobuf field <code>bool value = 1;</code>
+ * @param bool $var
+ * @return $this
*/
public function setValue($var)
{
GPBUtil::checkBool($var);
$this->value = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ByteBufferParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/ByteBufferParams.php
index 0057d38748..0511026ba7 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ByteBufferParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ByteBufferParams.php
@@ -9,16 +9,16 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ByteBufferParams</code>
+ * Generated from protobuf message <code>grpc.testing.ByteBufferParams</code>
*/
class ByteBufferParams extends \Google\Protobuf\Internal\Message
{
/**
- * <code>int32 req_size = 1;</code>
+ * Generated from protobuf field <code>int32 req_size = 1;</code>
*/
private $req_size = 0;
/**
- * <code>int32 resp_size = 2;</code>
+ * Generated from protobuf field <code>int32 resp_size = 2;</code>
*/
private $resp_size = 0;
@@ -28,7 +28,8 @@ class ByteBufferParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 req_size = 1;</code>
+ * Generated from protobuf field <code>int32 req_size = 1;</code>
+ * @return int
*/
public function getReqSize()
{
@@ -36,16 +37,21 @@ class ByteBufferParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 req_size = 1;</code>
+ * Generated from protobuf field <code>int32 req_size = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setReqSize($var)
{
GPBUtil::checkInt32($var);
$this->req_size = $var;
+
+ return $this;
}
/**
- * <code>int32 resp_size = 2;</code>
+ * Generated from protobuf field <code>int32 resp_size = 2;</code>
+ * @return int
*/
public function getRespSize()
{
@@ -53,12 +59,16 @@ class ByteBufferParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 resp_size = 2;</code>
+ * Generated from protobuf field <code>int32 resp_size = 2;</code>
+ * @param int $var
+ * @return $this
*/
public function setRespSize($var)
{
GPBUtil::checkInt32($var);
$this->resp_size = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ChannelArg.php b/src/php/tests/qps/generated_code/Grpc/Testing/ChannelArg.php
index d2fe3ae5ff..5c5fb861a4 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ChannelArg.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ChannelArg.php
@@ -9,12 +9,12 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ChannelArg</code>
+ * Generated from protobuf message <code>grpc.testing.ChannelArg</code>
*/
class ChannelArg extends \Google\Protobuf\Internal\Message
{
/**
- * <code>string name = 1;</code>
+ * Generated from protobuf field <code>string name = 1;</code>
*/
private $name = '';
protected $value;
@@ -25,7 +25,8 @@ class ChannelArg extends \Google\Protobuf\Internal\Message
}
/**
- * <code>string name = 1;</code>
+ * Generated from protobuf field <code>string name = 1;</code>
+ * @return string
*/
public function getName()
{
@@ -33,16 +34,21 @@ class ChannelArg extends \Google\Protobuf\Internal\Message
}
/**
- * <code>string name = 1;</code>
+ * Generated from protobuf field <code>string name = 1;</code>
+ * @param string $var
+ * @return $this
*/
public function setName($var)
{
GPBUtil::checkString($var, True);
$this->name = $var;
+
+ return $this;
}
/**
- * <code>string str_value = 2;</code>
+ * Generated from protobuf field <code>string str_value = 2;</code>
+ * @return string
*/
public function getStrValue()
{
@@ -50,16 +56,21 @@ class ChannelArg extends \Google\Protobuf\Internal\Message
}
/**
- * <code>string str_value = 2;</code>
+ * Generated from protobuf field <code>string str_value = 2;</code>
+ * @param string $var
+ * @return $this
*/
public function setStrValue($var)
{
GPBUtil::checkString($var, True);
$this->writeOneof(2, $var);
+
+ return $this;
}
/**
- * <code>int32 int_value = 3;</code>
+ * Generated from protobuf field <code>int32 int_value = 3;</code>
+ * @return int
*/
public function getIntValue()
{
@@ -67,14 +78,21 @@ class ChannelArg extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 int_value = 3;</code>
+ * Generated from protobuf field <code>int32 int_value = 3;</code>
+ * @param int $var
+ * @return $this
*/
public function setIntValue($var)
{
GPBUtil::checkInt32($var);
$this->writeOneof(3, $var);
+
+ return $this;
}
+ /**
+ * @return string
+ */
public function getValue()
{
return $this->whichOneof("value");
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ClientArgs.php b/src/php/tests/qps/generated_code/Grpc/Testing/ClientArgs.php
index c878c5a7bc..ee3fd46f0f 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ClientArgs.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ClientArgs.php
@@ -9,7 +9,7 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ClientArgs</code>
+ * Generated from protobuf message <code>grpc.testing.ClientArgs</code>
*/
class ClientArgs extends \Google\Protobuf\Internal\Message
{
@@ -21,7 +21,8 @@ class ClientArgs extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClientConfig setup = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientConfig setup = 1;</code>
+ * @return \Grpc\Testing\ClientConfig
*/
public function getSetup()
{
@@ -29,16 +30,21 @@ class ClientArgs extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClientConfig setup = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientConfig setup = 1;</code>
+ * @param \Grpc\Testing\ClientConfig $var
+ * @return $this
*/
- public function setSetup(&$var)
+ public function setSetup($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ClientConfig::class);
$this->writeOneof(1, $var);
+
+ return $this;
}
/**
- * <code>.grpc.testing.Mark mark = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.Mark mark = 2;</code>
+ * @return \Grpc\Testing\Mark
*/
public function getMark()
{
@@ -46,14 +52,21 @@ class ClientArgs extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.Mark mark = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.Mark mark = 2;</code>
+ * @param \Grpc\Testing\Mark $var
+ * @return $this
*/
- public function setMark(&$var)
+ public function setMark($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Mark::class);
$this->writeOneof(2, $var);
+
+ return $this;
}
+ /**
+ * @return string
+ */
public function getArgtype()
{
return $this->whichOneof("argtype");
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ClientConfig.php b/src/php/tests/qps/generated_code/Grpc/Testing/ClientConfig.php
index 52d6a75fb0..f7bc21587c 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ClientConfig.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ClientConfig.php
@@ -9,96 +9,94 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ClientConfig</code>
+ * Generated from protobuf message <code>grpc.testing.ClientConfig</code>
*/
class ClientConfig extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* List of targets to connect to. At least one target needs to be specified.
- * </pre>
*
- * <code>repeated string server_targets = 1;</code>
+ * Generated from protobuf field <code>repeated string server_targets = 1;</code>
*/
private $server_targets;
/**
- * <code>.grpc.testing.ClientType client_type = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientType client_type = 2;</code>
*/
private $client_type = 0;
/**
- * <code>.grpc.testing.SecurityParams security_params = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.SecurityParams security_params = 3;</code>
*/
private $security_params = null;
/**
- * <pre>
* How many concurrent RPCs to start for each channel.
* For synchronous client, use a separate thread for each outstanding RPC.
- * </pre>
*
- * <code>int32 outstanding_rpcs_per_channel = 4;</code>
+ * Generated from protobuf field <code>int32 outstanding_rpcs_per_channel = 4;</code>
*/
private $outstanding_rpcs_per_channel = 0;
/**
- * <pre>
* Number of independent client channels to create.
* i-th channel will connect to server_target[i % server_targets.size()]
- * </pre>
*
- * <code>int32 client_channels = 5;</code>
+ * Generated from protobuf field <code>int32 client_channels = 5;</code>
*/
private $client_channels = 0;
/**
- * <pre>
* Only for async client. Number of threads to use to start/manage RPCs.
- * </pre>
*
- * <code>int32 async_client_threads = 7;</code>
+ * Generated from protobuf field <code>int32 async_client_threads = 7;</code>
*/
private $async_client_threads = 0;
/**
- * <code>.grpc.testing.RpcType rpc_type = 8;</code>
+ * Generated from protobuf field <code>.grpc.testing.RpcType rpc_type = 8;</code>
*/
private $rpc_type = 0;
/**
- * <pre>
* The requested load for the entire client (aggregated over all the threads).
- * </pre>
*
- * <code>.grpc.testing.LoadParams load_params = 10;</code>
+ * Generated from protobuf field <code>.grpc.testing.LoadParams load_params = 10;</code>
*/
private $load_params = null;
/**
- * <code>.grpc.testing.PayloadConfig payload_config = 11;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadConfig payload_config = 11;</code>
*/
private $payload_config = null;
/**
- * <code>.grpc.testing.HistogramParams histogram_params = 12;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramParams histogram_params = 12;</code>
*/
private $histogram_params = null;
/**
- * <pre>
* Specify the cores we should run the client on, if desired
- * </pre>
*
- * <code>repeated int32 core_list = 13;</code>
+ * Generated from protobuf field <code>repeated int32 core_list = 13;</code>
*/
private $core_list;
/**
- * <code>int32 core_limit = 14;</code>
+ * Generated from protobuf field <code>int32 core_limit = 14;</code>
*/
private $core_limit = 0;
/**
- * <pre>
* If we use an OTHER_CLIENT client_type, this string gives more detail
- * </pre>
*
- * <code>string other_client_api = 15;</code>
+ * Generated from protobuf field <code>string other_client_api = 15;</code>
*/
private $other_client_api = '';
/**
- * <code>repeated .grpc.testing.ChannelArg channel_args = 16;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ChannelArg channel_args = 16;</code>
*/
private $channel_args;
+ /**
+ * Number of threads that share each completion queue
+ *
+ * Generated from protobuf field <code>int32 threads_per_cq = 17;</code>
+ */
+ private $threads_per_cq = 0;
+ /**
+ * Number of messages on a stream before it gets finished/restarted
+ *
+ * Generated from protobuf field <code>int32 messages_per_stream = 18;</code>
+ */
+ private $messages_per_stream = 0;
public function __construct() {
\GPBMetadata\Src\Proto\Grpc\Testing\Control::initOnce();
@@ -106,11 +104,10 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* List of targets to connect to. At least one target needs to be specified.
- * </pre>
*
- * <code>repeated string server_targets = 1;</code>
+ * Generated from protobuf field <code>repeated string server_targets = 1;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getServerTargets()
{
@@ -118,20 +115,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* List of targets to connect to. At least one target needs to be specified.
- * </pre>
*
- * <code>repeated string server_targets = 1;</code>
+ * Generated from protobuf field <code>repeated string server_targets = 1;</code>
+ * @param string[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setServerTargets(&$var)
+ public function setServerTargets($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::STRING);
- $this->server_targets = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::STRING);
+ $this->server_targets = $arr;
+
+ return $this;
}
/**
- * <code>.grpc.testing.ClientType client_type = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientType client_type = 2;</code>
+ * @return int
*/
public function getClientType()
{
@@ -139,16 +139,21 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClientType client_type = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientType client_type = 2;</code>
+ * @param int $var
+ * @return $this
*/
public function setClientType($var)
{
GPBUtil::checkEnum($var, \Grpc\Testing\ClientType::class);
$this->client_type = $var;
+
+ return $this;
}
/**
- * <code>.grpc.testing.SecurityParams security_params = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.SecurityParams security_params = 3;</code>
+ * @return \Grpc\Testing\SecurityParams
*/
public function getSecurityParams()
{
@@ -156,21 +161,24 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.SecurityParams security_params = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.SecurityParams security_params = 3;</code>
+ * @param \Grpc\Testing\SecurityParams $var
+ * @return $this
*/
- public function setSecurityParams(&$var)
+ public function setSecurityParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\SecurityParams::class);
$this->security_params = $var;
+
+ return $this;
}
/**
- * <pre>
* How many concurrent RPCs to start for each channel.
* For synchronous client, use a separate thread for each outstanding RPC.
- * </pre>
*
- * <code>int32 outstanding_rpcs_per_channel = 4;</code>
+ * Generated from protobuf field <code>int32 outstanding_rpcs_per_channel = 4;</code>
+ * @return int
*/
public function getOutstandingRpcsPerChannel()
{
@@ -178,26 +186,27 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* How many concurrent RPCs to start for each channel.
* For synchronous client, use a separate thread for each outstanding RPC.
- * </pre>
*
- * <code>int32 outstanding_rpcs_per_channel = 4;</code>
+ * Generated from protobuf field <code>int32 outstanding_rpcs_per_channel = 4;</code>
+ * @param int $var
+ * @return $this
*/
public function setOutstandingRpcsPerChannel($var)
{
GPBUtil::checkInt32($var);
$this->outstanding_rpcs_per_channel = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of independent client channels to create.
* i-th channel will connect to server_target[i % server_targets.size()]
- * </pre>
*
- * <code>int32 client_channels = 5;</code>
+ * Generated from protobuf field <code>int32 client_channels = 5;</code>
+ * @return int
*/
public function getClientChannels()
{
@@ -205,25 +214,26 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of independent client channels to create.
* i-th channel will connect to server_target[i % server_targets.size()]
- * </pre>
*
- * <code>int32 client_channels = 5;</code>
+ * Generated from protobuf field <code>int32 client_channels = 5;</code>
+ * @param int $var
+ * @return $this
*/
public function setClientChannels($var)
{
GPBUtil::checkInt32($var);
$this->client_channels = $var;
+
+ return $this;
}
/**
- * <pre>
* Only for async client. Number of threads to use to start/manage RPCs.
- * </pre>
*
- * <code>int32 async_client_threads = 7;</code>
+ * Generated from protobuf field <code>int32 async_client_threads = 7;</code>
+ * @return int
*/
public function getAsyncClientThreads()
{
@@ -231,20 +241,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Only for async client. Number of threads to use to start/manage RPCs.
- * </pre>
*
- * <code>int32 async_client_threads = 7;</code>
+ * Generated from protobuf field <code>int32 async_client_threads = 7;</code>
+ * @param int $var
+ * @return $this
*/
public function setAsyncClientThreads($var)
{
GPBUtil::checkInt32($var);
$this->async_client_threads = $var;
+
+ return $this;
}
/**
- * <code>.grpc.testing.RpcType rpc_type = 8;</code>
+ * Generated from protobuf field <code>.grpc.testing.RpcType rpc_type = 8;</code>
+ * @return int
*/
public function getRpcType()
{
@@ -252,20 +265,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.RpcType rpc_type = 8;</code>
+ * Generated from protobuf field <code>.grpc.testing.RpcType rpc_type = 8;</code>
+ * @param int $var
+ * @return $this
*/
public function setRpcType($var)
{
GPBUtil::checkEnum($var, \Grpc\Testing\RpcType::class);
$this->rpc_type = $var;
+
+ return $this;
}
/**
- * <pre>
* The requested load for the entire client (aggregated over all the threads).
- * </pre>
*
- * <code>.grpc.testing.LoadParams load_params = 10;</code>
+ * Generated from protobuf field <code>.grpc.testing.LoadParams load_params = 10;</code>
+ * @return \Grpc\Testing\LoadParams
*/
public function getLoadParams()
{
@@ -273,20 +289,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* The requested load for the entire client (aggregated over all the threads).
- * </pre>
*
- * <code>.grpc.testing.LoadParams load_params = 10;</code>
+ * Generated from protobuf field <code>.grpc.testing.LoadParams load_params = 10;</code>
+ * @param \Grpc\Testing\LoadParams $var
+ * @return $this
*/
- public function setLoadParams(&$var)
+ public function setLoadParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\LoadParams::class);
$this->load_params = $var;
+
+ return $this;
}
/**
- * <code>.grpc.testing.PayloadConfig payload_config = 11;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadConfig payload_config = 11;</code>
+ * @return \Grpc\Testing\PayloadConfig
*/
public function getPayloadConfig()
{
@@ -294,16 +313,21 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.PayloadConfig payload_config = 11;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadConfig payload_config = 11;</code>
+ * @param \Grpc\Testing\PayloadConfig $var
+ * @return $this
*/
- public function setPayloadConfig(&$var)
+ public function setPayloadConfig($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\PayloadConfig::class);
$this->payload_config = $var;
+
+ return $this;
}
/**
- * <code>.grpc.testing.HistogramParams histogram_params = 12;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramParams histogram_params = 12;</code>
+ * @return \Grpc\Testing\HistogramParams
*/
public function getHistogramParams()
{
@@ -311,20 +335,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.HistogramParams histogram_params = 12;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramParams histogram_params = 12;</code>
+ * @param \Grpc\Testing\HistogramParams $var
+ * @return $this
*/
- public function setHistogramParams(&$var)
+ public function setHistogramParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\HistogramParams::class);
$this->histogram_params = $var;
+
+ return $this;
}
/**
- * <pre>
* Specify the cores we should run the client on, if desired
- * </pre>
*
- * <code>repeated int32 core_list = 13;</code>
+ * Generated from protobuf field <code>repeated int32 core_list = 13;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getCoreList()
{
@@ -332,20 +359,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Specify the cores we should run the client on, if desired
- * </pre>
*
- * <code>repeated int32 core_list = 13;</code>
+ * Generated from protobuf field <code>repeated int32 core_list = 13;</code>
+ * @param int[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setCoreList(&$var)
+ public function setCoreList($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
- $this->core_list = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
+ $this->core_list = $arr;
+
+ return $this;
}
/**
- * <code>int32 core_limit = 14;</code>
+ * Generated from protobuf field <code>int32 core_limit = 14;</code>
+ * @return int
*/
public function getCoreLimit()
{
@@ -353,20 +383,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 core_limit = 14;</code>
+ * Generated from protobuf field <code>int32 core_limit = 14;</code>
+ * @param int $var
+ * @return $this
*/
public function setCoreLimit($var)
{
GPBUtil::checkInt32($var);
$this->core_limit = $var;
+
+ return $this;
}
/**
- * <pre>
* If we use an OTHER_CLIENT client_type, this string gives more detail
- * </pre>
*
- * <code>string other_client_api = 15;</code>
+ * Generated from protobuf field <code>string other_client_api = 15;</code>
+ * @return string
*/
public function getOtherClientApi()
{
@@ -374,20 +407,23 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* If we use an OTHER_CLIENT client_type, this string gives more detail
- * </pre>
*
- * <code>string other_client_api = 15;</code>
+ * Generated from protobuf field <code>string other_client_api = 15;</code>
+ * @param string $var
+ * @return $this
*/
public function setOtherClientApi($var)
{
GPBUtil::checkString($var, True);
$this->other_client_api = $var;
+
+ return $this;
}
/**
- * <code>repeated .grpc.testing.ChannelArg channel_args = 16;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ChannelArg channel_args = 16;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getChannelArgs()
{
@@ -395,12 +431,68 @@ class ClientConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated .grpc.testing.ChannelArg channel_args = 16;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ChannelArg channel_args = 16;</code>
+ * @param \Grpc\Testing\ChannelArg[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setChannelArgs($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ChannelArg::class);
+ $this->channel_args = $arr;
+
+ return $this;
+ }
+
+ /**
+ * Number of threads that share each completion queue
+ *
+ * Generated from protobuf field <code>int32 threads_per_cq = 17;</code>
+ * @return int
+ */
+ public function getThreadsPerCq()
+ {
+ return $this->threads_per_cq;
+ }
+
+ /**
+ * Number of threads that share each completion queue
+ *
+ * Generated from protobuf field <code>int32 threads_per_cq = 17;</code>
+ * @param int $var
+ * @return $this
+ */
+ public function setThreadsPerCq($var)
+ {
+ GPBUtil::checkInt32($var);
+ $this->threads_per_cq = $var;
+
+ return $this;
+ }
+
+ /**
+ * Number of messages on a stream before it gets finished/restarted
+ *
+ * Generated from protobuf field <code>int32 messages_per_stream = 18;</code>
+ * @return int
+ */
+ public function getMessagesPerStream()
+ {
+ return $this->messages_per_stream;
+ }
+
+ /**
+ * Number of messages on a stream before it gets finished/restarted
+ *
+ * Generated from protobuf field <code>int32 messages_per_stream = 18;</code>
+ * @param int $var
+ * @return $this
*/
- public function setChannelArgs(&$var)
+ public function setMessagesPerStream($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ChannelArg::class);
- $this->channel_args = $var;
+ GPBUtil::checkInt32($var);
+ $this->messages_per_stream = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ClientStats.php b/src/php/tests/qps/generated_code/Grpc/Testing/ClientStats.php
index 8b9a0c33a4..f2a7621791 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ClientStats.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ClientStats.php
@@ -9,42 +9,48 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ClientStats</code>
+ * Generated from protobuf message <code>grpc.testing.ClientStats</code>
*/
class ClientStats extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Latency histogram. Data points are in nanoseconds.
- * </pre>
*
- * <code>.grpc.testing.HistogramData latencies = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramData latencies = 1;</code>
*/
private $latencies = null;
/**
- * <pre>
* See ServerStats for details.
- * </pre>
*
- * <code>double time_elapsed = 2;</code>
+ * Generated from protobuf field <code>double time_elapsed = 2;</code>
*/
private $time_elapsed = 0.0;
/**
- * <code>double time_user = 3;</code>
+ * Generated from protobuf field <code>double time_user = 3;</code>
*/
private $time_user = 0.0;
/**
- * <code>double time_system = 4;</code>
+ * Generated from protobuf field <code>double time_system = 4;</code>
*/
private $time_system = 0.0;
/**
- * <pre>
* Number of failed requests (one row per status code seen)
- * </pre>
*
- * <code>repeated .grpc.testing.RequestResultCount request_results = 5;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.RequestResultCount request_results = 5;</code>
*/
private $request_results;
+ /**
+ * Number of polls called inside completion queue
+ *
+ * Generated from protobuf field <code>uint64 cq_poll_count = 6;</code>
+ */
+ private $cq_poll_count = 0;
+ /**
+ * Core library stats
+ *
+ * Generated from protobuf field <code>.grpc.core.Stats core_stats = 7;</code>
+ */
+ private $core_stats = null;
public function __construct() {
\GPBMetadata\Src\Proto\Grpc\Testing\Stats::initOnce();
@@ -52,11 +58,10 @@ class ClientStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Latency histogram. Data points are in nanoseconds.
- * </pre>
*
- * <code>.grpc.testing.HistogramData latencies = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramData latencies = 1;</code>
+ * @return \Grpc\Testing\HistogramData
*/
public function getLatencies()
{
@@ -64,24 +69,25 @@ class ClientStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Latency histogram. Data points are in nanoseconds.
- * </pre>
*
- * <code>.grpc.testing.HistogramData latencies = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramData latencies = 1;</code>
+ * @param \Grpc\Testing\HistogramData $var
+ * @return $this
*/
- public function setLatencies(&$var)
+ public function setLatencies($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\HistogramData::class);
$this->latencies = $var;
+
+ return $this;
}
/**
- * <pre>
* See ServerStats for details.
- * </pre>
*
- * <code>double time_elapsed = 2;</code>
+ * Generated from protobuf field <code>double time_elapsed = 2;</code>
+ * @return float
*/
public function getTimeElapsed()
{
@@ -89,20 +95,23 @@ class ClientStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* See ServerStats for details.
- * </pre>
*
- * <code>double time_elapsed = 2;</code>
+ * Generated from protobuf field <code>double time_elapsed = 2;</code>
+ * @param float $var
+ * @return $this
*/
public function setTimeElapsed($var)
{
GPBUtil::checkDouble($var);
$this->time_elapsed = $var;
+
+ return $this;
}
/**
- * <code>double time_user = 3;</code>
+ * Generated from protobuf field <code>double time_user = 3;</code>
+ * @return float
*/
public function getTimeUser()
{
@@ -110,16 +119,21 @@ class ClientStats extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double time_user = 3;</code>
+ * Generated from protobuf field <code>double time_user = 3;</code>
+ * @param float $var
+ * @return $this
*/
public function setTimeUser($var)
{
GPBUtil::checkDouble($var);
$this->time_user = $var;
+
+ return $this;
}
/**
- * <code>double time_system = 4;</code>
+ * Generated from protobuf field <code>double time_system = 4;</code>
+ * @return float
*/
public function getTimeSystem()
{
@@ -127,20 +141,23 @@ class ClientStats extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double time_system = 4;</code>
+ * Generated from protobuf field <code>double time_system = 4;</code>
+ * @param float $var
+ * @return $this
*/
public function setTimeSystem($var)
{
GPBUtil::checkDouble($var);
$this->time_system = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of failed requests (one row per status code seen)
- * </pre>
*
- * <code>repeated .grpc.testing.RequestResultCount request_results = 5;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.RequestResultCount request_results = 5;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getRequestResults()
{
@@ -148,16 +165,70 @@ class ClientStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of failed requests (one row per status code seen)
- * </pre>
*
- * <code>repeated .grpc.testing.RequestResultCount request_results = 5;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.RequestResultCount request_results = 5;</code>
+ * @param \Grpc\Testing\RequestResultCount[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setRequestResults(&$var)
+ public function setRequestResults($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\RequestResultCount::class);
- $this->request_results = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\RequestResultCount::class);
+ $this->request_results = $arr;
+
+ return $this;
+ }
+
+ /**
+ * Number of polls called inside completion queue
+ *
+ * Generated from protobuf field <code>uint64 cq_poll_count = 6;</code>
+ * @return int|string
+ */
+ public function getCqPollCount()
+ {
+ return $this->cq_poll_count;
+ }
+
+ /**
+ * Number of polls called inside completion queue
+ *
+ * Generated from protobuf field <code>uint64 cq_poll_count = 6;</code>
+ * @param int|string $var
+ * @return $this
+ */
+ public function setCqPollCount($var)
+ {
+ GPBUtil::checkUint64($var);
+ $this->cq_poll_count = $var;
+
+ return $this;
+ }
+
+ /**
+ * Core library stats
+ *
+ * Generated from protobuf field <code>.grpc.core.Stats core_stats = 7;</code>
+ * @return \Grpc\Core\Stats
+ */
+ public function getCoreStats()
+ {
+ return $this->core_stats;
+ }
+
+ /**
+ * Core library stats
+ *
+ * Generated from protobuf field <code>.grpc.core.Stats core_stats = 7;</code>
+ * @param \Grpc\Core\Stats $var
+ * @return $this
+ */
+ public function setCoreStats($var)
+ {
+ GPBUtil::checkMessage($var, \Grpc\Core\Stats::class);
+ $this->core_stats = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ClientStatus.php b/src/php/tests/qps/generated_code/Grpc/Testing/ClientStatus.php
index a59f87a962..3ea40c4dfa 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ClientStatus.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ClientStatus.php
@@ -9,12 +9,12 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ClientStatus</code>
+ * Generated from protobuf message <code>grpc.testing.ClientStatus</code>
*/
class ClientStatus extends \Google\Protobuf\Internal\Message
{
/**
- * <code>.grpc.testing.ClientStats stats = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientStats stats = 1;</code>
*/
private $stats = null;
@@ -24,7 +24,8 @@ class ClientStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClientStats stats = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientStats stats = 1;</code>
+ * @return \Grpc\Testing\ClientStats
*/
public function getStats()
{
@@ -32,12 +33,16 @@ class ClientStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClientStats stats = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientStats stats = 1;</code>
+ * @param \Grpc\Testing\ClientStats $var
+ * @return $this
*/
- public function setStats(&$var)
+ public function setStats($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ClientStats::class);
$this->stats = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ClientType.php b/src/php/tests/qps/generated_code/Grpc/Testing/ClientType.php
index 4f59da992f..d1df4f1943 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ClientType.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ClientType.php
@@ -5,29 +5,25 @@
namespace Grpc\Testing;
/**
- * Protobuf enum <code>grpc.testing.ClientType</code>
+ * Protobuf enum <code>Grpc\Testing\ClientType</code>
*/
class ClientType
{
/**
- * <pre>
* Many languages support a basic distinction between using
* sync or async client, and this allows the specification
- * </pre>
*
- * <code>SYNC_CLIENT = 0;</code>
+ * Generated from protobuf enum <code>SYNC_CLIENT = 0;</code>
*/
const SYNC_CLIENT = 0;
/**
- * <code>ASYNC_CLIENT = 1;</code>
+ * Generated from protobuf enum <code>ASYNC_CLIENT = 1;</code>
*/
const ASYNC_CLIENT = 1;
/**
- * <pre>
* used for some language-specific variants
- * </pre>
*
- * <code>OTHER_CLIENT = 2;</code>
+ * Generated from protobuf enum <code>OTHER_CLIENT = 2;</code>
*/
const OTHER_CLIENT = 2;
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ClosedLoopParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/ClosedLoopParams.php
index 53f2948af2..2772836f13 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ClosedLoopParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ClosedLoopParams.php
@@ -9,12 +9,10 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Once an RPC finishes, immediately start a new one.
* No configuration parameters needed.
- * </pre>
*
- * Protobuf type <code>grpc.testing.ClosedLoopParams</code>
+ * Generated from protobuf message <code>grpc.testing.ClosedLoopParams</code>
*/
class ClosedLoopParams extends \Google\Protobuf\Internal\Message
{
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ComplexProtoParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/ComplexProtoParams.php
index 6d990f1b06..b9013cdb30 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ComplexProtoParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ComplexProtoParams.php
@@ -9,12 +9,10 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* TODO (vpai): Fill this in once the details of complex, representative
* protos are decided
- * </pre>
*
- * Protobuf type <code>grpc.testing.ComplexProtoParams</code>
+ * Generated from protobuf message <code>grpc.testing.ComplexProtoParams</code>
*/
class ComplexProtoParams extends \Google\Protobuf\Internal\Message
{
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/CoreRequest.php b/src/php/tests/qps/generated_code/Grpc/Testing/CoreRequest.php
index 2e078b3fcd..7772572f1c 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/CoreRequest.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/CoreRequest.php
@@ -9,7 +9,7 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.CoreRequest</code>
+ * Generated from protobuf message <code>grpc.testing.CoreRequest</code>
*/
class CoreRequest extends \Google\Protobuf\Internal\Message
{
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/CoreResponse.php b/src/php/tests/qps/generated_code/Grpc/Testing/CoreResponse.php
index 85cb3418ad..e0b40ee300 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/CoreResponse.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/CoreResponse.php
@@ -9,16 +9,14 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.CoreResponse</code>
+ * Generated from protobuf message <code>grpc.testing.CoreResponse</code>
*/
class CoreResponse extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Number of cores available on the server
- * </pre>
*
- * <code>int32 cores = 1;</code>
+ * Generated from protobuf field <code>int32 cores = 1;</code>
*/
private $cores = 0;
@@ -28,11 +26,10 @@ class CoreResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of cores available on the server
- * </pre>
*
- * <code>int32 cores = 1;</code>
+ * Generated from protobuf field <code>int32 cores = 1;</code>
+ * @return int
*/
public function getCores()
{
@@ -40,16 +37,18 @@ class CoreResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of cores available on the server
- * </pre>
*
- * <code>int32 cores = 1;</code>
+ * Generated from protobuf field <code>int32 cores = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setCores($var)
{
GPBUtil::checkInt32($var);
$this->cores = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/EchoStatus.php b/src/php/tests/qps/generated_code/Grpc/Testing/EchoStatus.php
index 27340fb0ef..6a6623a042 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/EchoStatus.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/EchoStatus.php
@@ -9,21 +9,19 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* A protobuf representation for grpc status. This is used by test
* clients to specify a status that the server should attempt to return.
- * </pre>
*
- * Protobuf type <code>grpc.testing.EchoStatus</code>
+ * Generated from protobuf message <code>grpc.testing.EchoStatus</code>
*/
class EchoStatus extends \Google\Protobuf\Internal\Message
{
/**
- * <code>int32 code = 1;</code>
+ * Generated from protobuf field <code>int32 code = 1;</code>
*/
private $code = 0;
/**
- * <code>string message = 2;</code>
+ * Generated from protobuf field <code>string message = 2;</code>
*/
private $message = '';
@@ -33,7 +31,8 @@ class EchoStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 code = 1;</code>
+ * Generated from protobuf field <code>int32 code = 1;</code>
+ * @return int
*/
public function getCode()
{
@@ -41,16 +40,21 @@ class EchoStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 code = 1;</code>
+ * Generated from protobuf field <code>int32 code = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setCode($var)
{
GPBUtil::checkInt32($var);
$this->code = $var;
+
+ return $this;
}
/**
- * <code>string message = 2;</code>
+ * Generated from protobuf field <code>string message = 2;</code>
+ * @return string
*/
public function getMessage()
{
@@ -58,12 +62,16 @@ class EchoStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>string message = 2;</code>
+ * Generated from protobuf field <code>string message = 2;</code>
+ * @param string $var
+ * @return $this
*/
public function setMessage($var)
{
GPBUtil::checkString($var, True);
$this->message = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/HistogramData.php b/src/php/tests/qps/generated_code/Grpc/Testing/HistogramData.php
index 056da6e5de..136eac75e2 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/HistogramData.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/HistogramData.php
@@ -9,36 +9,34 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Histogram data based on grpc/support/histogram.c
- * </pre>
*
- * Protobuf type <code>grpc.testing.HistogramData</code>
+ * Generated from protobuf message <code>grpc.testing.HistogramData</code>
*/
class HistogramData extends \Google\Protobuf\Internal\Message
{
/**
- * <code>repeated uint32 bucket = 1;</code>
+ * Generated from protobuf field <code>repeated uint32 bucket = 1;</code>
*/
private $bucket;
/**
- * <code>double min_seen = 2;</code>
+ * Generated from protobuf field <code>double min_seen = 2;</code>
*/
private $min_seen = 0.0;
/**
- * <code>double max_seen = 3;</code>
+ * Generated from protobuf field <code>double max_seen = 3;</code>
*/
private $max_seen = 0.0;
/**
- * <code>double sum = 4;</code>
+ * Generated from protobuf field <code>double sum = 4;</code>
*/
private $sum = 0.0;
/**
- * <code>double sum_of_squares = 5;</code>
+ * Generated from protobuf field <code>double sum_of_squares = 5;</code>
*/
private $sum_of_squares = 0.0;
/**
- * <code>double count = 6;</code>
+ * Generated from protobuf field <code>double count = 6;</code>
*/
private $count = 0.0;
@@ -48,7 +46,8 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated uint32 bucket = 1;</code>
+ * Generated from protobuf field <code>repeated uint32 bucket = 1;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getBucket()
{
@@ -56,16 +55,21 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated uint32 bucket = 1;</code>
+ * Generated from protobuf field <code>repeated uint32 bucket = 1;</code>
+ * @param int[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setBucket(&$var)
+ public function setBucket($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::UINT32);
- $this->bucket = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::UINT32);
+ $this->bucket = $arr;
+
+ return $this;
}
/**
- * <code>double min_seen = 2;</code>
+ * Generated from protobuf field <code>double min_seen = 2;</code>
+ * @return float
*/
public function getMinSeen()
{
@@ -73,16 +77,21 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double min_seen = 2;</code>
+ * Generated from protobuf field <code>double min_seen = 2;</code>
+ * @param float $var
+ * @return $this
*/
public function setMinSeen($var)
{
GPBUtil::checkDouble($var);
$this->min_seen = $var;
+
+ return $this;
}
/**
- * <code>double max_seen = 3;</code>
+ * Generated from protobuf field <code>double max_seen = 3;</code>
+ * @return float
*/
public function getMaxSeen()
{
@@ -90,16 +99,21 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double max_seen = 3;</code>
+ * Generated from protobuf field <code>double max_seen = 3;</code>
+ * @param float $var
+ * @return $this
*/
public function setMaxSeen($var)
{
GPBUtil::checkDouble($var);
$this->max_seen = $var;
+
+ return $this;
}
/**
- * <code>double sum = 4;</code>
+ * Generated from protobuf field <code>double sum = 4;</code>
+ * @return float
*/
public function getSum()
{
@@ -107,16 +121,21 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double sum = 4;</code>
+ * Generated from protobuf field <code>double sum = 4;</code>
+ * @param float $var
+ * @return $this
*/
public function setSum($var)
{
GPBUtil::checkDouble($var);
$this->sum = $var;
+
+ return $this;
}
/**
- * <code>double sum_of_squares = 5;</code>
+ * Generated from protobuf field <code>double sum_of_squares = 5;</code>
+ * @return float
*/
public function getSumOfSquares()
{
@@ -124,16 +143,21 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double sum_of_squares = 5;</code>
+ * Generated from protobuf field <code>double sum_of_squares = 5;</code>
+ * @param float $var
+ * @return $this
*/
public function setSumOfSquares($var)
{
GPBUtil::checkDouble($var);
$this->sum_of_squares = $var;
+
+ return $this;
}
/**
- * <code>double count = 6;</code>
+ * Generated from protobuf field <code>double count = 6;</code>
+ * @return float
*/
public function getCount()
{
@@ -141,12 +165,16 @@ class HistogramData extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double count = 6;</code>
+ * Generated from protobuf field <code>double count = 6;</code>
+ * @param float $var
+ * @return $this
*/
public function setCount($var)
{
GPBUtil::checkDouble($var);
$this->count = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/HistogramParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/HistogramParams.php
index 836c94b01d..1a1b484f14 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/HistogramParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/HistogramParams.php
@@ -9,28 +9,22 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Histogram params based on grpc/support/histogram.c
- * </pre>
*
- * Protobuf type <code>grpc.testing.HistogramParams</code>
+ * Generated from protobuf message <code>grpc.testing.HistogramParams</code>
*/
class HistogramParams extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* first bucket is [0, 1 + resolution)
- * </pre>
*
- * <code>double resolution = 1;</code>
+ * Generated from protobuf field <code>double resolution = 1;</code>
*/
private $resolution = 0.0;
/**
- * <pre>
* use enough buckets to allow this value
- * </pre>
*
- * <code>double max_possible = 2;</code>
+ * Generated from protobuf field <code>double max_possible = 2;</code>
*/
private $max_possible = 0.0;
@@ -40,11 +34,10 @@ class HistogramParams extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* first bucket is [0, 1 + resolution)
- * </pre>
*
- * <code>double resolution = 1;</code>
+ * Generated from protobuf field <code>double resolution = 1;</code>
+ * @return float
*/
public function getResolution()
{
@@ -52,24 +45,25 @@ class HistogramParams extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* first bucket is [0, 1 + resolution)
- * </pre>
*
- * <code>double resolution = 1;</code>
+ * Generated from protobuf field <code>double resolution = 1;</code>
+ * @param float $var
+ * @return $this
*/
public function setResolution($var)
{
GPBUtil::checkDouble($var);
$this->resolution = $var;
+
+ return $this;
}
/**
- * <pre>
* use enough buckets to allow this value
- * </pre>
*
- * <code>double max_possible = 2;</code>
+ * Generated from protobuf field <code>double max_possible = 2;</code>
+ * @return float
*/
public function getMaxPossible()
{
@@ -77,16 +71,18 @@ class HistogramParams extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* use enough buckets to allow this value
- * </pre>
*
- * <code>double max_possible = 2;</code>
+ * Generated from protobuf field <code>double max_possible = 2;</code>
+ * @param float $var
+ * @return $this
*/
public function setMaxPossible($var)
{
GPBUtil::checkDouble($var);
$this->max_possible = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/LoadParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/LoadParams.php
index 1f32e49c8a..04c345f242 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/LoadParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/LoadParams.php
@@ -9,7 +9,7 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.LoadParams</code>
+ * Generated from protobuf message <code>grpc.testing.LoadParams</code>
*/
class LoadParams extends \Google\Protobuf\Internal\Message
{
@@ -21,7 +21,8 @@ class LoadParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClosedLoopParams closed_loop = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClosedLoopParams closed_loop = 1;</code>
+ * @return \Grpc\Testing\ClosedLoopParams
*/
public function getClosedLoop()
{
@@ -29,16 +30,21 @@ class LoadParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ClosedLoopParams closed_loop = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClosedLoopParams closed_loop = 1;</code>
+ * @param \Grpc\Testing\ClosedLoopParams $var
+ * @return $this
*/
- public function setClosedLoop(&$var)
+ public function setClosedLoop($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ClosedLoopParams::class);
$this->writeOneof(1, $var);
+
+ return $this;
}
/**
- * <code>.grpc.testing.PoissonParams poisson = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.PoissonParams poisson = 2;</code>
+ * @return \Grpc\Testing\PoissonParams
*/
public function getPoisson()
{
@@ -46,14 +52,21 @@ class LoadParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.PoissonParams poisson = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.PoissonParams poisson = 2;</code>
+ * @param \Grpc\Testing\PoissonParams $var
+ * @return $this
*/
- public function setPoisson(&$var)
+ public function setPoisson($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\PoissonParams::class);
$this->writeOneof(2, $var);
+
+ return $this;
}
+ /**
+ * @return string
+ */
public function getLoad()
{
return $this->whichOneof("load");
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/Mark.php b/src/php/tests/qps/generated_code/Grpc/Testing/Mark.php
index ce006efacd..be058d51be 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/Mark.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/Mark.php
@@ -9,20 +9,16 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Request current stats
- * </pre>
*
- * Protobuf type <code>grpc.testing.Mark</code>
+ * Generated from protobuf message <code>grpc.testing.Mark</code>
*/
class Mark extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* if true, the stats will be reset after taking their snapshot.
- * </pre>
*
- * <code>bool reset = 1;</code>
+ * Generated from protobuf field <code>bool reset = 1;</code>
*/
private $reset = false;
@@ -32,11 +28,10 @@ class Mark extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* if true, the stats will be reset after taking their snapshot.
- * </pre>
*
- * <code>bool reset = 1;</code>
+ * Generated from protobuf field <code>bool reset = 1;</code>
+ * @return bool
*/
public function getReset()
{
@@ -44,16 +39,18 @@ class Mark extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* if true, the stats will be reset after taking their snapshot.
- * </pre>
*
- * <code>bool reset = 1;</code>
+ * Generated from protobuf field <code>bool reset = 1;</code>
+ * @param bool $var
+ * @return $this
*/
public function setReset($var)
{
GPBUtil::checkBool($var);
$this->reset = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/Payload.php b/src/php/tests/qps/generated_code/Grpc/Testing/Payload.php
index d17c271af7..ad97890c93 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/Payload.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/Payload.php
@@ -9,29 +9,23 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* A block of data, to simply increase gRPC message size.
- * </pre>
*
- * Protobuf type <code>grpc.testing.Payload</code>
+ * Generated from protobuf message <code>grpc.testing.Payload</code>
*/
class Payload extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* The type of data in body.
- * </pre>
*
- * <code>.grpc.testing.PayloadType type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType type = 1;</code>
*/
private $type = 0;
/**
- * <pre>
* Primary contents of payload.
- * </pre>
*
- * <code>bytes body = 2;</code>
+ * Generated from protobuf field <code>bytes body = 2;</code>
*/
private $body = '';
@@ -41,12 +35,11 @@ class Payload extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* The type of data in body.
- * </pre>
*
- * <code>.grpc.testing.PayloadType type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType type = 1;</code>
+ * @return int
*/
public function getType()
{
@@ -54,25 +47,26 @@ class Payload extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* The type of data in body.
- * </pre>
*
- * <code>.grpc.testing.PayloadType type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType type = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setType($var)
{
GPBUtil::checkEnum($var, \Grpc\Testing\PayloadType::class);
$this->type = $var;
+
+ return $this;
}
/**
- * <pre>
* Primary contents of payload.
- * </pre>
*
- * <code>bytes body = 2;</code>
+ * Generated from protobuf field <code>bytes body = 2;</code>
+ * @return string
*/
public function getBody()
{
@@ -80,16 +74,18 @@ class Payload extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Primary contents of payload.
- * </pre>
*
- * <code>bytes body = 2;</code>
+ * Generated from protobuf field <code>bytes body = 2;</code>
+ * @param string $var
+ * @return $this
*/
public function setBody($var)
{
GPBUtil::checkString($var, False);
$this->body = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/PayloadConfig.php b/src/php/tests/qps/generated_code/Grpc/Testing/PayloadConfig.php
index a2fe7109ba..748f52da82 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/PayloadConfig.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/PayloadConfig.php
@@ -9,7 +9,7 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.PayloadConfig</code>
+ * Generated from protobuf message <code>grpc.testing.PayloadConfig</code>
*/
class PayloadConfig extends \Google\Protobuf\Internal\Message
{
@@ -21,7 +21,8 @@ class PayloadConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ByteBufferParams bytebuf_params = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ByteBufferParams bytebuf_params = 1;</code>
+ * @return \Grpc\Testing\ByteBufferParams
*/
public function getBytebufParams()
{
@@ -29,16 +30,21 @@ class PayloadConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ByteBufferParams bytebuf_params = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ByteBufferParams bytebuf_params = 1;</code>
+ * @param \Grpc\Testing\ByteBufferParams $var
+ * @return $this
*/
- public function setBytebufParams(&$var)
+ public function setBytebufParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ByteBufferParams::class);
$this->writeOneof(1, $var);
+
+ return $this;
}
/**
- * <code>.grpc.testing.SimpleProtoParams simple_params = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.SimpleProtoParams simple_params = 2;</code>
+ * @return \Grpc\Testing\SimpleProtoParams
*/
public function getSimpleParams()
{
@@ -46,16 +52,21 @@ class PayloadConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.SimpleProtoParams simple_params = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.SimpleProtoParams simple_params = 2;</code>
+ * @param \Grpc\Testing\SimpleProtoParams $var
+ * @return $this
*/
- public function setSimpleParams(&$var)
+ public function setSimpleParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\SimpleProtoParams::class);
$this->writeOneof(2, $var);
+
+ return $this;
}
/**
- * <code>.grpc.testing.ComplexProtoParams complex_params = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.ComplexProtoParams complex_params = 3;</code>
+ * @return \Grpc\Testing\ComplexProtoParams
*/
public function getComplexParams()
{
@@ -63,14 +74,21 @@ class PayloadConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ComplexProtoParams complex_params = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.ComplexProtoParams complex_params = 3;</code>
+ * @param \Grpc\Testing\ComplexProtoParams $var
+ * @return $this
*/
- public function setComplexParams(&$var)
+ public function setComplexParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ComplexProtoParams::class);
$this->writeOneof(3, $var);
+
+ return $this;
}
+ /**
+ * @return string
+ */
public function getPayload()
{
return $this->whichOneof("payload");
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/PayloadType.php b/src/php/tests/qps/generated_code/Grpc/Testing/PayloadType.php
index 189ef034b4..d8df1af798 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/PayloadType.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/PayloadType.php
@@ -5,21 +5,17 @@
namespace Grpc\Testing;
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* The type of payload that should be returned.
- * </pre>
*
- * Protobuf enum <code>grpc.testing.PayloadType</code>
+ * Protobuf enum <code>Grpc\Testing\PayloadType</code>
*/
class PayloadType
{
/**
- * <pre>
* Compressable text format.
- * </pre>
*
- * <code>COMPRESSABLE = 0;</code>
+ * Generated from protobuf enum <code>COMPRESSABLE = 0;</code>
*/
const COMPRESSABLE = 0;
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/PoissonParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/PoissonParams.php
index d64edd45f0..6a4047f2ec 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/PoissonParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/PoissonParams.php
@@ -9,21 +9,17 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Parameters of poisson process distribution, which is a good representation
* of activity coming in from independent identical stationary sources.
- * </pre>
*
- * Protobuf type <code>grpc.testing.PoissonParams</code>
+ * Generated from protobuf message <code>grpc.testing.PoissonParams</code>
*/
class PoissonParams extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
- * </pre>
*
- * <code>double offered_load = 1;</code>
+ * Generated from protobuf field <code>double offered_load = 1;</code>
*/
private $offered_load = 0.0;
@@ -33,11 +29,10 @@ class PoissonParams extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
- * </pre>
*
- * <code>double offered_load = 1;</code>
+ * Generated from protobuf field <code>double offered_load = 1;</code>
+ * @return float
*/
public function getOfferedLoad()
{
@@ -45,16 +40,18 @@ class PoissonParams extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* The rate of arrivals (a.k.a. lambda parameter of the exp distribution).
- * </pre>
*
- * <code>double offered_load = 1;</code>
+ * Generated from protobuf field <code>double offered_load = 1;</code>
+ * @param float $var
+ * @return $this
*/
public function setOfferedLoad($var)
{
GPBUtil::checkDouble($var);
$this->offered_load = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ProxyClientServiceClient.php b/src/php/tests/qps/generated_code/Grpc/Testing/ProxyClientServiceClient.php
index a6da2e7aef..5510b57064 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ProxyClientServiceClient.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ProxyClientServiceClient.php
@@ -16,17 +16,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-namespace Grpc\Testing {
+namespace Grpc\Testing;
- class ProxyClientServiceClient extends \Grpc\BaseStub {
+/**
+ */
+class ProxyClientServiceClient extends \Grpc\BaseStub {
/**
* @param string $hostname hostname
* @param array $opts channel options
- * @param Grpc\Channel $channel (optional) re-use channel object
+ * @param \Grpc\Channel $channel (optional) re-use channel object
*/
public function __construct($hostname, $opts, $channel = null) {
- parent::__construct($hostname, $opts, $channel);
+ parent::__construct($hostname, $opts, $channel);
}
/**
@@ -36,10 +38,10 @@ namespace Grpc\Testing {
*/
public function GetConfig(\Grpc\Testing\Void $argument,
$metadata = [], $options = []) {
- return $this->_simpleRequest('/grpc.testing.ProxyClientService/GetConfig',
- $argument,
- ['\Grpc\Testing\ClientConfig', 'decode'],
- $metadata, $options);
+ return $this->_simpleRequest('/grpc.testing.ProxyClientService/GetConfig',
+ $argument,
+ ['\Grpc\Testing\ClientConfig', 'decode'],
+ $metadata, $options);
}
/**
@@ -47,11 +49,19 @@ namespace Grpc\Testing {
* @param array $options call options
*/
public function ReportTime($metadata = [], $options = []) {
- return $this->_clientStreamRequest('/grpc.testing.ProxyClientService/ReportTime',
- ['\Grpc\Testing\Void','decode'],
- $metadata, $options);
+ return $this->_clientStreamRequest('/grpc.testing.ProxyClientService/ReportTime',
+ ['\Grpc\Testing\Void','decode'],
+ $metadata, $options);
}
- }
+ /**
+ * @param array $metadata metadata
+ * @param array $options call options
+ */
+ public function ReportHist($metadata = [], $options = []) {
+ return $this->_clientStreamRequest('/grpc.testing.ProxyClientService/ReportHist',
+ ['\Grpc\Testing\Void','decode'],
+ $metadata, $options);
+ }
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ProxyStat.php b/src/php/tests/qps/generated_code/Grpc/Testing/ProxyStat.php
index ed43be99ce..6fab611534 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ProxyStat.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ProxyStat.php
@@ -9,12 +9,12 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ProxyStat</code>
+ * Generated from protobuf message <code>grpc.testing.ProxyStat</code>
*/
class ProxyStat extends \Google\Protobuf\Internal\Message
{
/**
- * <code>double latency = 1;</code>
+ * Generated from protobuf field <code>double latency = 1;</code>
*/
private $latency = 0.0;
@@ -24,7 +24,8 @@ class ProxyStat extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double latency = 1;</code>
+ * Generated from protobuf field <code>double latency = 1;</code>
+ * @return float
*/
public function getLatency()
{
@@ -32,12 +33,16 @@ class ProxyStat extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double latency = 1;</code>
+ * Generated from protobuf field <code>double latency = 1;</code>
+ * @param float $var
+ * @return $this
*/
public function setLatency($var)
{
GPBUtil::checkDouble($var);
$this->latency = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectInfo.php b/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectInfo.php
index dfaaa606c3..cd728705fa 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectInfo.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectInfo.php
@@ -9,22 +9,20 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* For reconnect interop test only.
* Server tells client whether its reconnects are following the spec and the
* reconnect backoffs it saw.
- * </pre>
*
- * Protobuf type <code>grpc.testing.ReconnectInfo</code>
+ * Generated from protobuf message <code>grpc.testing.ReconnectInfo</code>
*/
class ReconnectInfo extends \Google\Protobuf\Internal\Message
{
/**
- * <code>bool passed = 1;</code>
+ * Generated from protobuf field <code>bool passed = 1;</code>
*/
private $passed = false;
/**
- * <code>repeated int32 backoff_ms = 2;</code>
+ * Generated from protobuf field <code>repeated int32 backoff_ms = 2;</code>
*/
private $backoff_ms;
@@ -34,7 +32,8 @@ class ReconnectInfo extends \Google\Protobuf\Internal\Message
}
/**
- * <code>bool passed = 1;</code>
+ * Generated from protobuf field <code>bool passed = 1;</code>
+ * @return bool
*/
public function getPassed()
{
@@ -42,16 +41,21 @@ class ReconnectInfo extends \Google\Protobuf\Internal\Message
}
/**
- * <code>bool passed = 1;</code>
+ * Generated from protobuf field <code>bool passed = 1;</code>
+ * @param bool $var
+ * @return $this
*/
public function setPassed($var)
{
GPBUtil::checkBool($var);
$this->passed = $var;
+
+ return $this;
}
/**
- * <code>repeated int32 backoff_ms = 2;</code>
+ * Generated from protobuf field <code>repeated int32 backoff_ms = 2;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getBackoffMs()
{
@@ -59,12 +63,16 @@ class ReconnectInfo extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated int32 backoff_ms = 2;</code>
+ * Generated from protobuf field <code>repeated int32 backoff_ms = 2;</code>
+ * @param int[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setBackoffMs(&$var)
+ public function setBackoffMs($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
- $this->backoff_ms = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
+ $this->backoff_ms = $arr;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectParams.php
index 9715855783..f91dc410cb 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ReconnectParams.php
@@ -9,17 +9,15 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* For reconnect interop test only.
* Client tells server what reconnection parameters it used.
- * </pre>
*
- * Protobuf type <code>grpc.testing.ReconnectParams</code>
+ * Generated from protobuf message <code>grpc.testing.ReconnectParams</code>
*/
class ReconnectParams extends \Google\Protobuf\Internal\Message
{
/**
- * <code>int32 max_reconnect_backoff_ms = 1;</code>
+ * Generated from protobuf field <code>int32 max_reconnect_backoff_ms = 1;</code>
*/
private $max_reconnect_backoff_ms = 0;
@@ -29,7 +27,8 @@ class ReconnectParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 max_reconnect_backoff_ms = 1;</code>
+ * Generated from protobuf field <code>int32 max_reconnect_backoff_ms = 1;</code>
+ * @return int
*/
public function getMaxReconnectBackoffMs()
{
@@ -37,12 +36,16 @@ class ReconnectParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 max_reconnect_backoff_ms = 1;</code>
+ * Generated from protobuf field <code>int32 max_reconnect_backoff_ms = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setMaxReconnectBackoffMs($var)
{
GPBUtil::checkInt32($var);
$this->max_reconnect_backoff_ms = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ReportQpsScenarioServiceClient.php b/src/php/tests/qps/generated_code/Grpc/Testing/ReportQpsScenarioServiceClient.php
new file mode 100644
index 0000000000..72d44ffc66
--- /dev/null
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ReportQpsScenarioServiceClient.php
@@ -0,0 +1,50 @@
+<?php
+// GENERATED CODE -- DO NOT EDIT!
+
+// Original file comments:
+// Copyright 2015 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// An integration test service that covers all the method signature permutations
+// of unary/streaming requests/responses.
+namespace Grpc\Testing;
+
+/**
+ */
+class ReportQpsScenarioServiceClient extends \Grpc\BaseStub {
+
+ /**
+ * @param string $hostname hostname
+ * @param array $opts channel options
+ * @param \Grpc\Channel $channel (optional) re-use channel object
+ */
+ public function __construct($hostname, $opts, $channel = null) {
+ parent::__construct($hostname, $opts, $channel);
+ }
+
+ /**
+ * Report results of a QPS test benchmark scenario.
+ * @param \Grpc\Testing\ScenarioResult $argument input argument
+ * @param array $metadata metadata
+ * @param array $options call options
+ */
+ public function ReportScenario(\Grpc\Testing\ScenarioResult $argument,
+ $metadata = [], $options = []) {
+ return $this->_simpleRequest('/grpc.testing.ReportQpsScenarioService/ReportScenario',
+ $argument,
+ ['\Grpc\Testing\Void', 'decode'],
+ $metadata, $options);
+ }
+
+}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/RequestResultCount.php b/src/php/tests/qps/generated_code/Grpc/Testing/RequestResultCount.php
index 1be42b2ac9..75fa6cafe2 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/RequestResultCount.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/RequestResultCount.php
@@ -9,16 +9,16 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.RequestResultCount</code>
+ * Generated from protobuf message <code>grpc.testing.RequestResultCount</code>
*/
class RequestResultCount extends \Google\Protobuf\Internal\Message
{
/**
- * <code>int32 status_code = 1;</code>
+ * Generated from protobuf field <code>int32 status_code = 1;</code>
*/
private $status_code = 0;
/**
- * <code>int64 count = 2;</code>
+ * Generated from protobuf field <code>int64 count = 2;</code>
*/
private $count = 0;
@@ -28,7 +28,8 @@ class RequestResultCount extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 status_code = 1;</code>
+ * Generated from protobuf field <code>int32 status_code = 1;</code>
+ * @return int
*/
public function getStatusCode()
{
@@ -36,16 +37,21 @@ class RequestResultCount extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 status_code = 1;</code>
+ * Generated from protobuf field <code>int32 status_code = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setStatusCode($var)
{
GPBUtil::checkInt32($var);
$this->status_code = $var;
+
+ return $this;
}
/**
- * <code>int64 count = 2;</code>
+ * Generated from protobuf field <code>int64 count = 2;</code>
+ * @return int|string
*/
public function getCount()
{
@@ -53,12 +59,16 @@ class RequestResultCount extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int64 count = 2;</code>
+ * Generated from protobuf field <code>int64 count = 2;</code>
+ * @param int|string $var
+ * @return $this
*/
public function setCount($var)
{
GPBUtil::checkInt64($var);
$this->count = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ResponseParameters.php b/src/php/tests/qps/generated_code/Grpc/Testing/ResponseParameters.php
index b7a8e5ece7..b2f0a827fe 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ResponseParameters.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ResponseParameters.php
@@ -9,40 +9,32 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Configuration for a particular response.
- * </pre>
*
- * Protobuf type <code>grpc.testing.ResponseParameters</code>
+ * Generated from protobuf message <code>grpc.testing.ResponseParameters</code>
*/
class ResponseParameters extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Desired payload sizes in responses from the server.
- * </pre>
*
- * <code>int32 size = 1;</code>
+ * Generated from protobuf field <code>int32 size = 1;</code>
*/
private $size = 0;
/**
- * <pre>
* Desired interval between consecutive responses in the response stream in
* microseconds.
- * </pre>
*
- * <code>int32 interval_us = 2;</code>
+ * Generated from protobuf field <code>int32 interval_us = 2;</code>
*/
private $interval_us = 0;
/**
- * <pre>
* Whether to request the server to compress the response. This field is
* "nullable" in order to interoperate seamlessly with clients not able to
* implement the full compression tests by introspecting the call to verify
* the response's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue compressed = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue compressed = 3;</code>
*/
private $compressed = null;
@@ -52,11 +44,10 @@ class ResponseParameters extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Desired payload sizes in responses from the server.
- * </pre>
*
- * <code>int32 size = 1;</code>
+ * Generated from protobuf field <code>int32 size = 1;</code>
+ * @return int
*/
public function getSize()
{
@@ -64,25 +55,26 @@ class ResponseParameters extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Desired payload sizes in responses from the server.
- * </pre>
*
- * <code>int32 size = 1;</code>
+ * Generated from protobuf field <code>int32 size = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setSize($var)
{
GPBUtil::checkInt32($var);
$this->size = $var;
+
+ return $this;
}
/**
- * <pre>
* Desired interval between consecutive responses in the response stream in
* microseconds.
- * </pre>
*
- * <code>int32 interval_us = 2;</code>
+ * Generated from protobuf field <code>int32 interval_us = 2;</code>
+ * @return int
*/
public function getIntervalUs()
{
@@ -90,28 +82,29 @@ class ResponseParameters extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Desired interval between consecutive responses in the response stream in
* microseconds.
- * </pre>
*
- * <code>int32 interval_us = 2;</code>
+ * Generated from protobuf field <code>int32 interval_us = 2;</code>
+ * @param int $var
+ * @return $this
*/
public function setIntervalUs($var)
{
GPBUtil::checkInt32($var);
$this->interval_us = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether to request the server to compress the response. This field is
* "nullable" in order to interoperate seamlessly with clients not able to
* implement the full compression tests by introspecting the call to verify
* the response's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue compressed = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue compressed = 3;</code>
+ * @return \Grpc\Testing\BoolValue
*/
public function getCompressed()
{
@@ -119,19 +112,21 @@ class ResponseParameters extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether to request the server to compress the response. This field is
* "nullable" in order to interoperate seamlessly with clients not able to
* implement the full compression tests by introspecting the call to verify
* the response's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue compressed = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue compressed = 3;</code>
+ * @param \Grpc\Testing\BoolValue $var
+ * @return $this
*/
- public function setCompressed(&$var)
+ public function setCompressed($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\BoolValue::class);
$this->compressed = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/RpcType.php b/src/php/tests/qps/generated_code/Grpc/Testing/RpcType.php
index 2e664fff47..73a66490ea 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/RpcType.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/RpcType.php
@@ -5,17 +5,29 @@
namespace Grpc\Testing;
/**
- * Protobuf enum <code>grpc.testing.RpcType</code>
+ * Protobuf enum <code>Grpc\Testing\RpcType</code>
*/
class RpcType
{
/**
- * <code>UNARY = 0;</code>
+ * Generated from protobuf enum <code>UNARY = 0;</code>
*/
const UNARY = 0;
/**
- * <code>STREAMING = 1;</code>
+ * Generated from protobuf enum <code>STREAMING = 1;</code>
*/
const STREAMING = 1;
+ /**
+ * Generated from protobuf enum <code>STREAMING_FROM_CLIENT = 2;</code>
+ */
+ const STREAMING_FROM_CLIENT = 2;
+ /**
+ * Generated from protobuf enum <code>STREAMING_FROM_SERVER = 3;</code>
+ */
+ const STREAMING_FROM_SERVER = 3;
+ /**
+ * Generated from protobuf enum <code>STREAMING_BOTH_WAYS = 4;</code>
+ */
+ const STREAMING_BOTH_WAYS = 4;
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/Scenario.php b/src/php/tests/qps/generated_code/Grpc/Testing/Scenario.php
index 136ed299ea..9ec284b71f 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/Scenario.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/Scenario.php
@@ -9,76 +9,58 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* A single performance scenario: input to qps_json_driver
- * </pre>
*
- * Protobuf type <code>grpc.testing.Scenario</code>
+ * Generated from protobuf message <code>grpc.testing.Scenario</code>
*/
class Scenario extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Human readable name for this scenario
- * </pre>
*
- * <code>string name = 1;</code>
+ * Generated from protobuf field <code>string name = 1;</code>
*/
private $name = '';
/**
- * <pre>
* Client configuration
- * </pre>
*
- * <code>.grpc.testing.ClientConfig client_config = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientConfig client_config = 2;</code>
*/
private $client_config = null;
/**
- * <pre>
* Number of clients to start for the test
- * </pre>
*
- * <code>int32 num_clients = 3;</code>
+ * Generated from protobuf field <code>int32 num_clients = 3;</code>
*/
private $num_clients = 0;
/**
- * <pre>
* Server configuration
- * </pre>
*
- * <code>.grpc.testing.ServerConfig server_config = 4;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerConfig server_config = 4;</code>
*/
private $server_config = null;
/**
- * <pre>
* Number of servers to start for the test
- * </pre>
*
- * <code>int32 num_servers = 5;</code>
+ * Generated from protobuf field <code>int32 num_servers = 5;</code>
*/
private $num_servers = 0;
/**
- * <pre>
* Warmup period, in seconds
- * </pre>
*
- * <code>int32 warmup_seconds = 6;</code>
+ * Generated from protobuf field <code>int32 warmup_seconds = 6;</code>
*/
private $warmup_seconds = 0;
/**
- * <pre>
* Benchmark time, in seconds
- * </pre>
*
- * <code>int32 benchmark_seconds = 7;</code>
+ * Generated from protobuf field <code>int32 benchmark_seconds = 7;</code>
*/
private $benchmark_seconds = 0;
/**
- * <pre>
* Number of workers to spawn locally (usually zero)
- * </pre>
*
- * <code>int32 spawn_local_worker_count = 8;</code>
+ * Generated from protobuf field <code>int32 spawn_local_worker_count = 8;</code>
*/
private $spawn_local_worker_count = 0;
@@ -88,11 +70,10 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Human readable name for this scenario
- * </pre>
*
- * <code>string name = 1;</code>
+ * Generated from protobuf field <code>string name = 1;</code>
+ * @return string
*/
public function getName()
{
@@ -100,24 +81,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Human readable name for this scenario
- * </pre>
*
- * <code>string name = 1;</code>
+ * Generated from protobuf field <code>string name = 1;</code>
+ * @param string $var
+ * @return $this
*/
public function setName($var)
{
GPBUtil::checkString($var, True);
$this->name = $var;
+
+ return $this;
}
/**
- * <pre>
* Client configuration
- * </pre>
*
- * <code>.grpc.testing.ClientConfig client_config = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientConfig client_config = 2;</code>
+ * @return \Grpc\Testing\ClientConfig
*/
public function getClientConfig()
{
@@ -125,24 +107,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Client configuration
- * </pre>
*
- * <code>.grpc.testing.ClientConfig client_config = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.ClientConfig client_config = 2;</code>
+ * @param \Grpc\Testing\ClientConfig $var
+ * @return $this
*/
- public function setClientConfig(&$var)
+ public function setClientConfig($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ClientConfig::class);
$this->client_config = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of clients to start for the test
- * </pre>
*
- * <code>int32 num_clients = 3;</code>
+ * Generated from protobuf field <code>int32 num_clients = 3;</code>
+ * @return int
*/
public function getNumClients()
{
@@ -150,24 +133,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of clients to start for the test
- * </pre>
*
- * <code>int32 num_clients = 3;</code>
+ * Generated from protobuf field <code>int32 num_clients = 3;</code>
+ * @param int $var
+ * @return $this
*/
public function setNumClients($var)
{
GPBUtil::checkInt32($var);
$this->num_clients = $var;
+
+ return $this;
}
/**
- * <pre>
* Server configuration
- * </pre>
*
- * <code>.grpc.testing.ServerConfig server_config = 4;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerConfig server_config = 4;</code>
+ * @return \Grpc\Testing\ServerConfig
*/
public function getServerConfig()
{
@@ -175,24 +159,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Server configuration
- * </pre>
*
- * <code>.grpc.testing.ServerConfig server_config = 4;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerConfig server_config = 4;</code>
+ * @param \Grpc\Testing\ServerConfig $var
+ * @return $this
*/
- public function setServerConfig(&$var)
+ public function setServerConfig($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ServerConfig::class);
$this->server_config = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of servers to start for the test
- * </pre>
*
- * <code>int32 num_servers = 5;</code>
+ * Generated from protobuf field <code>int32 num_servers = 5;</code>
+ * @return int
*/
public function getNumServers()
{
@@ -200,24 +185,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of servers to start for the test
- * </pre>
*
- * <code>int32 num_servers = 5;</code>
+ * Generated from protobuf field <code>int32 num_servers = 5;</code>
+ * @param int $var
+ * @return $this
*/
public function setNumServers($var)
{
GPBUtil::checkInt32($var);
$this->num_servers = $var;
+
+ return $this;
}
/**
- * <pre>
* Warmup period, in seconds
- * </pre>
*
- * <code>int32 warmup_seconds = 6;</code>
+ * Generated from protobuf field <code>int32 warmup_seconds = 6;</code>
+ * @return int
*/
public function getWarmupSeconds()
{
@@ -225,24 +211,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Warmup period, in seconds
- * </pre>
*
- * <code>int32 warmup_seconds = 6;</code>
+ * Generated from protobuf field <code>int32 warmup_seconds = 6;</code>
+ * @param int $var
+ * @return $this
*/
public function setWarmupSeconds($var)
{
GPBUtil::checkInt32($var);
$this->warmup_seconds = $var;
+
+ return $this;
}
/**
- * <pre>
* Benchmark time, in seconds
- * </pre>
*
- * <code>int32 benchmark_seconds = 7;</code>
+ * Generated from protobuf field <code>int32 benchmark_seconds = 7;</code>
+ * @return int
*/
public function getBenchmarkSeconds()
{
@@ -250,24 +237,25 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Benchmark time, in seconds
- * </pre>
*
- * <code>int32 benchmark_seconds = 7;</code>
+ * Generated from protobuf field <code>int32 benchmark_seconds = 7;</code>
+ * @param int $var
+ * @return $this
*/
public function setBenchmarkSeconds($var)
{
GPBUtil::checkInt32($var);
$this->benchmark_seconds = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of workers to spawn locally (usually zero)
- * </pre>
*
- * <code>int32 spawn_local_worker_count = 8;</code>
+ * Generated from protobuf field <code>int32 spawn_local_worker_count = 8;</code>
+ * @return int
*/
public function getSpawnLocalWorkerCount()
{
@@ -275,16 +263,18 @@ class Scenario extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of workers to spawn locally (usually zero)
- * </pre>
*
- * <code>int32 spawn_local_worker_count = 8;</code>
+ * Generated from protobuf field <code>int32 spawn_local_worker_count = 8;</code>
+ * @param int $var
+ * @return $this
*/
public function setSpawnLocalWorkerCount($var)
{
GPBUtil::checkInt32($var);
$this->spawn_local_worker_count = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResult.php b/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResult.php
index 809cd96244..31d9a39a1f 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResult.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResult.php
@@ -9,80 +9,62 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Results of a single benchmark scenario.
- * </pre>
*
- * Protobuf type <code>grpc.testing.ScenarioResult</code>
+ * Generated from protobuf message <code>grpc.testing.ScenarioResult</code>
*/
class ScenarioResult extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Inputs used to run the scenario.
- * </pre>
*
- * <code>.grpc.testing.Scenario scenario = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Scenario scenario = 1;</code>
*/
private $scenario = null;
/**
- * <pre>
* Histograms from all clients merged into one histogram.
- * </pre>
*
- * <code>.grpc.testing.HistogramData latencies = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramData latencies = 2;</code>
*/
private $latencies = null;
/**
- * <pre>
* Client stats for each client
- * </pre>
*
- * <code>repeated .grpc.testing.ClientStats client_stats = 3;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ClientStats client_stats = 3;</code>
*/
private $client_stats;
/**
- * <pre>
* Server stats for each server
- * </pre>
*
- * <code>repeated .grpc.testing.ServerStats server_stats = 4;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ServerStats server_stats = 4;</code>
*/
private $server_stats;
/**
- * <pre>
* Number of cores available to each server
- * </pre>
*
- * <code>repeated int32 server_cores = 5;</code>
+ * Generated from protobuf field <code>repeated int32 server_cores = 5;</code>
*/
private $server_cores;
/**
- * <pre>
* An after-the-fact computed summary
- * </pre>
*
- * <code>.grpc.testing.ScenarioResultSummary summary = 6;</code>
+ * Generated from protobuf field <code>.grpc.testing.ScenarioResultSummary summary = 6;</code>
*/
private $summary = null;
/**
- * <pre>
* Information on success or failure of each worker
- * </pre>
*
- * <code>repeated bool client_success = 7;</code>
+ * Generated from protobuf field <code>repeated bool client_success = 7;</code>
*/
private $client_success;
/**
- * <code>repeated bool server_success = 8;</code>
+ * Generated from protobuf field <code>repeated bool server_success = 8;</code>
*/
private $server_success;
/**
- * <pre>
* Number of failed requests (one row per status code seen)
- * </pre>
*
- * <code>repeated .grpc.testing.RequestResultCount request_results = 9;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.RequestResultCount request_results = 9;</code>
*/
private $request_results;
@@ -92,11 +74,10 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Inputs used to run the scenario.
- * </pre>
*
- * <code>.grpc.testing.Scenario scenario = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Scenario scenario = 1;</code>
+ * @return \Grpc\Testing\Scenario
*/
public function getScenario()
{
@@ -104,24 +85,25 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Inputs used to run the scenario.
- * </pre>
*
- * <code>.grpc.testing.Scenario scenario = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Scenario scenario = 1;</code>
+ * @param \Grpc\Testing\Scenario $var
+ * @return $this
*/
- public function setScenario(&$var)
+ public function setScenario($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Scenario::class);
$this->scenario = $var;
+
+ return $this;
}
/**
- * <pre>
* Histograms from all clients merged into one histogram.
- * </pre>
*
- * <code>.grpc.testing.HistogramData latencies = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramData latencies = 2;</code>
+ * @return \Grpc\Testing\HistogramData
*/
public function getLatencies()
{
@@ -129,24 +111,25 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Histograms from all clients merged into one histogram.
- * </pre>
*
- * <code>.grpc.testing.HistogramData latencies = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.HistogramData latencies = 2;</code>
+ * @param \Grpc\Testing\HistogramData $var
+ * @return $this
*/
- public function setLatencies(&$var)
+ public function setLatencies($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\HistogramData::class);
$this->latencies = $var;
+
+ return $this;
}
/**
- * <pre>
* Client stats for each client
- * </pre>
*
- * <code>repeated .grpc.testing.ClientStats client_stats = 3;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ClientStats client_stats = 3;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getClientStats()
{
@@ -154,24 +137,25 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Client stats for each client
- * </pre>
*
- * <code>repeated .grpc.testing.ClientStats client_stats = 3;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ClientStats client_stats = 3;</code>
+ * @param \Grpc\Testing\ClientStats[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setClientStats(&$var)
+ public function setClientStats($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ClientStats::class);
- $this->client_stats = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ClientStats::class);
+ $this->client_stats = $arr;
+
+ return $this;
}
/**
- * <pre>
* Server stats for each server
- * </pre>
*
- * <code>repeated .grpc.testing.ServerStats server_stats = 4;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ServerStats server_stats = 4;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getServerStats()
{
@@ -179,24 +163,25 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Server stats for each server
- * </pre>
*
- * <code>repeated .grpc.testing.ServerStats server_stats = 4;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ServerStats server_stats = 4;</code>
+ * @param \Grpc\Testing\ServerStats[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setServerStats(&$var)
+ public function setServerStats($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ServerStats::class);
- $this->server_stats = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ServerStats::class);
+ $this->server_stats = $arr;
+
+ return $this;
}
/**
- * <pre>
* Number of cores available to each server
- * </pre>
*
- * <code>repeated int32 server_cores = 5;</code>
+ * Generated from protobuf field <code>repeated int32 server_cores = 5;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getServerCores()
{
@@ -204,24 +189,25 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of cores available to each server
- * </pre>
*
- * <code>repeated int32 server_cores = 5;</code>
+ * Generated from protobuf field <code>repeated int32 server_cores = 5;</code>
+ * @param int[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setServerCores(&$var)
+ public function setServerCores($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
- $this->server_cores = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
+ $this->server_cores = $arr;
+
+ return $this;
}
/**
- * <pre>
* An after-the-fact computed summary
- * </pre>
*
- * <code>.grpc.testing.ScenarioResultSummary summary = 6;</code>
+ * Generated from protobuf field <code>.grpc.testing.ScenarioResultSummary summary = 6;</code>
+ * @return \Grpc\Testing\ScenarioResultSummary
*/
public function getSummary()
{
@@ -229,24 +215,25 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* An after-the-fact computed summary
- * </pre>
*
- * <code>.grpc.testing.ScenarioResultSummary summary = 6;</code>
+ * Generated from protobuf field <code>.grpc.testing.ScenarioResultSummary summary = 6;</code>
+ * @param \Grpc\Testing\ScenarioResultSummary $var
+ * @return $this
*/
- public function setSummary(&$var)
+ public function setSummary($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ScenarioResultSummary::class);
$this->summary = $var;
+
+ return $this;
}
/**
- * <pre>
* Information on success or failure of each worker
- * </pre>
*
- * <code>repeated bool client_success = 7;</code>
+ * Generated from protobuf field <code>repeated bool client_success = 7;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getClientSuccess()
{
@@ -254,20 +241,23 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Information on success or failure of each worker
- * </pre>
*
- * <code>repeated bool client_success = 7;</code>
+ * Generated from protobuf field <code>repeated bool client_success = 7;</code>
+ * @param bool[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setClientSuccess(&$var)
+ public function setClientSuccess($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::BOOL);
- $this->client_success = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::BOOL);
+ $this->client_success = $arr;
+
+ return $this;
}
/**
- * <code>repeated bool server_success = 8;</code>
+ * Generated from protobuf field <code>repeated bool server_success = 8;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getServerSuccess()
{
@@ -275,20 +265,23 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated bool server_success = 8;</code>
+ * Generated from protobuf field <code>repeated bool server_success = 8;</code>
+ * @param bool[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setServerSuccess(&$var)
+ public function setServerSuccess($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::BOOL);
- $this->server_success = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::BOOL);
+ $this->server_success = $arr;
+
+ return $this;
}
/**
- * <pre>
* Number of failed requests (one row per status code seen)
- * </pre>
*
- * <code>repeated .grpc.testing.RequestResultCount request_results = 9;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.RequestResultCount request_results = 9;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getRequestResults()
{
@@ -296,16 +289,18 @@ class ScenarioResult extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of failed requests (one row per status code seen)
- * </pre>
*
- * <code>repeated .grpc.testing.RequestResultCount request_results = 9;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.RequestResultCount request_results = 9;</code>
+ * @param \Grpc\Testing\RequestResultCount[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setRequestResults(&$var)
+ public function setRequestResults($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\RequestResultCount::class);
- $this->request_results = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\RequestResultCount::class);
+ $this->request_results = $arr;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResultSummary.php b/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResultSummary.php
index 7520cff78e..f7f1c987b5 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResultSummary.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ScenarioResultSummary.php
@@ -9,107 +9,107 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Basic summary that can be computed from ClientStats and ServerStats
* once the scenario has finished.
- * </pre>
*
- * Protobuf type <code>grpc.testing.ScenarioResultSummary</code>
+ * Generated from protobuf message <code>grpc.testing.ScenarioResultSummary</code>
*/
class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Total number of operations per second over all clients.
- * </pre>
*
- * <code>double qps = 1;</code>
+ * Generated from protobuf field <code>double qps = 1;</code>
*/
private $qps = 0.0;
/**
- * <pre>
* QPS per one server core.
- * </pre>
*
- * <code>double qps_per_server_core = 2;</code>
+ * Generated from protobuf field <code>double qps_per_server_core = 2;</code>
*/
private $qps_per_server_core = 0.0;
/**
- * <pre>
- * server load based on system_time (0.85 =&gt; 85%)
- * </pre>
+ * server load based on system_time (0.85 => 85%)
*
- * <code>double server_system_time = 3;</code>
+ * Generated from protobuf field <code>double server_system_time = 3;</code>
*/
private $server_system_time = 0.0;
/**
- * <pre>
- * server load based on user_time (0.85 =&gt; 85%)
- * </pre>
+ * server load based on user_time (0.85 => 85%)
*
- * <code>double server_user_time = 4;</code>
+ * Generated from protobuf field <code>double server_user_time = 4;</code>
*/
private $server_user_time = 0.0;
/**
- * <pre>
- * client load based on system_time (0.85 =&gt; 85%)
- * </pre>
+ * client load based on system_time (0.85 => 85%)
*
- * <code>double client_system_time = 5;</code>
+ * Generated from protobuf field <code>double client_system_time = 5;</code>
*/
private $client_system_time = 0.0;
/**
- * <pre>
- * client load based on user_time (0.85 =&gt; 85%)
- * </pre>
+ * client load based on user_time (0.85 => 85%)
*
- * <code>double client_user_time = 6;</code>
+ * Generated from protobuf field <code>double client_user_time = 6;</code>
*/
private $client_user_time = 0.0;
/**
- * <pre>
* X% latency percentiles (in nanoseconds)
- * </pre>
*
- * <code>double latency_50 = 7;</code>
+ * Generated from protobuf field <code>double latency_50 = 7;</code>
*/
private $latency_50 = 0.0;
/**
- * <code>double latency_90 = 8;</code>
+ * Generated from protobuf field <code>double latency_90 = 8;</code>
*/
private $latency_90 = 0.0;
/**
- * <code>double latency_95 = 9;</code>
+ * Generated from protobuf field <code>double latency_95 = 9;</code>
*/
private $latency_95 = 0.0;
/**
- * <code>double latency_99 = 10;</code>
+ * Generated from protobuf field <code>double latency_99 = 10;</code>
*/
private $latency_99 = 0.0;
/**
- * <code>double latency_999 = 11;</code>
+ * Generated from protobuf field <code>double latency_999 = 11;</code>
*/
private $latency_999 = 0.0;
/**
- * <pre>
* server cpu usage percentage
- * </pre>
*
- * <code>double server_cpu_usage = 12;</code>
+ * Generated from protobuf field <code>double server_cpu_usage = 12;</code>
*/
private $server_cpu_usage = 0.0;
/**
- * <pre>
* Number of requests that succeeded/failed
- * </pre>
*
- * <code>double successful_requests_per_second = 13;</code>
+ * Generated from protobuf field <code>double successful_requests_per_second = 13;</code>
*/
private $successful_requests_per_second = 0.0;
/**
- * <code>double failed_requests_per_second = 14;</code>
+ * Generated from protobuf field <code>double failed_requests_per_second = 14;</code>
*/
private $failed_requests_per_second = 0.0;
+ /**
+ * Number of polls called inside completion queue per request
+ *
+ * Generated from protobuf field <code>double client_polls_per_request = 15;</code>
+ */
+ private $client_polls_per_request = 0.0;
+ /**
+ * Generated from protobuf field <code>double server_polls_per_request = 16;</code>
+ */
+ private $server_polls_per_request = 0.0;
+ /**
+ * Queries per CPU-sec over all servers or clients
+ *
+ * Generated from protobuf field <code>double server_queries_per_cpu_sec = 17;</code>
+ */
+ private $server_queries_per_cpu_sec = 0.0;
+ /**
+ * Generated from protobuf field <code>double client_queries_per_cpu_sec = 18;</code>
+ */
+ private $client_queries_per_cpu_sec = 0.0;
public function __construct() {
\GPBMetadata\Src\Proto\Grpc\Testing\Control::initOnce();
@@ -117,11 +117,10 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Total number of operations per second over all clients.
- * </pre>
*
- * <code>double qps = 1;</code>
+ * Generated from protobuf field <code>double qps = 1;</code>
+ * @return float
*/
public function getQps()
{
@@ -129,24 +128,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Total number of operations per second over all clients.
- * </pre>
*
- * <code>double qps = 1;</code>
+ * Generated from protobuf field <code>double qps = 1;</code>
+ * @param float $var
+ * @return $this
*/
public function setQps($var)
{
GPBUtil::checkDouble($var);
$this->qps = $var;
+
+ return $this;
}
/**
- * <pre>
* QPS per one server core.
- * </pre>
*
- * <code>double qps_per_server_core = 2;</code>
+ * Generated from protobuf field <code>double qps_per_server_core = 2;</code>
+ * @return float
*/
public function getQpsPerServerCore()
{
@@ -154,24 +154,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* QPS per one server core.
- * </pre>
*
- * <code>double qps_per_server_core = 2;</code>
+ * Generated from protobuf field <code>double qps_per_server_core = 2;</code>
+ * @param float $var
+ * @return $this
*/
public function setQpsPerServerCore($var)
{
GPBUtil::checkDouble($var);
$this->qps_per_server_core = $var;
+
+ return $this;
}
/**
- * <pre>
- * server load based on system_time (0.85 =&gt; 85%)
- * </pre>
+ * server load based on system_time (0.85 => 85%)
*
- * <code>double server_system_time = 3;</code>
+ * Generated from protobuf field <code>double server_system_time = 3;</code>
+ * @return float
*/
public function getServerSystemTime()
{
@@ -179,24 +180,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
- * server load based on system_time (0.85 =&gt; 85%)
- * </pre>
+ * server load based on system_time (0.85 => 85%)
*
- * <code>double server_system_time = 3;</code>
+ * Generated from protobuf field <code>double server_system_time = 3;</code>
+ * @param float $var
+ * @return $this
*/
public function setServerSystemTime($var)
{
GPBUtil::checkDouble($var);
$this->server_system_time = $var;
+
+ return $this;
}
/**
- * <pre>
- * server load based on user_time (0.85 =&gt; 85%)
- * </pre>
+ * server load based on user_time (0.85 => 85%)
*
- * <code>double server_user_time = 4;</code>
+ * Generated from protobuf field <code>double server_user_time = 4;</code>
+ * @return float
*/
public function getServerUserTime()
{
@@ -204,24 +206,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
- * server load based on user_time (0.85 =&gt; 85%)
- * </pre>
+ * server load based on user_time (0.85 => 85%)
*
- * <code>double server_user_time = 4;</code>
+ * Generated from protobuf field <code>double server_user_time = 4;</code>
+ * @param float $var
+ * @return $this
*/
public function setServerUserTime($var)
{
GPBUtil::checkDouble($var);
$this->server_user_time = $var;
+
+ return $this;
}
/**
- * <pre>
- * client load based on system_time (0.85 =&gt; 85%)
- * </pre>
+ * client load based on system_time (0.85 => 85%)
*
- * <code>double client_system_time = 5;</code>
+ * Generated from protobuf field <code>double client_system_time = 5;</code>
+ * @return float
*/
public function getClientSystemTime()
{
@@ -229,24 +232,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
- * client load based on system_time (0.85 =&gt; 85%)
- * </pre>
+ * client load based on system_time (0.85 => 85%)
*
- * <code>double client_system_time = 5;</code>
+ * Generated from protobuf field <code>double client_system_time = 5;</code>
+ * @param float $var
+ * @return $this
*/
public function setClientSystemTime($var)
{
GPBUtil::checkDouble($var);
$this->client_system_time = $var;
+
+ return $this;
}
/**
- * <pre>
- * client load based on user_time (0.85 =&gt; 85%)
- * </pre>
+ * client load based on user_time (0.85 => 85%)
*
- * <code>double client_user_time = 6;</code>
+ * Generated from protobuf field <code>double client_user_time = 6;</code>
+ * @return float
*/
public function getClientUserTime()
{
@@ -254,24 +258,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
- * client load based on user_time (0.85 =&gt; 85%)
- * </pre>
+ * client load based on user_time (0.85 => 85%)
*
- * <code>double client_user_time = 6;</code>
+ * Generated from protobuf field <code>double client_user_time = 6;</code>
+ * @param float $var
+ * @return $this
*/
public function setClientUserTime($var)
{
GPBUtil::checkDouble($var);
$this->client_user_time = $var;
+
+ return $this;
}
/**
- * <pre>
* X% latency percentiles (in nanoseconds)
- * </pre>
*
- * <code>double latency_50 = 7;</code>
+ * Generated from protobuf field <code>double latency_50 = 7;</code>
+ * @return float
*/
public function getLatency50()
{
@@ -279,20 +284,23 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* X% latency percentiles (in nanoseconds)
- * </pre>
*
- * <code>double latency_50 = 7;</code>
+ * Generated from protobuf field <code>double latency_50 = 7;</code>
+ * @param float $var
+ * @return $this
*/
public function setLatency50($var)
{
GPBUtil::checkDouble($var);
$this->latency_50 = $var;
+
+ return $this;
}
/**
- * <code>double latency_90 = 8;</code>
+ * Generated from protobuf field <code>double latency_90 = 8;</code>
+ * @return float
*/
public function getLatency90()
{
@@ -300,16 +308,21 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double latency_90 = 8;</code>
+ * Generated from protobuf field <code>double latency_90 = 8;</code>
+ * @param float $var
+ * @return $this
*/
public function setLatency90($var)
{
GPBUtil::checkDouble($var);
$this->latency_90 = $var;
+
+ return $this;
}
/**
- * <code>double latency_95 = 9;</code>
+ * Generated from protobuf field <code>double latency_95 = 9;</code>
+ * @return float
*/
public function getLatency95()
{
@@ -317,16 +330,21 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double latency_95 = 9;</code>
+ * Generated from protobuf field <code>double latency_95 = 9;</code>
+ * @param float $var
+ * @return $this
*/
public function setLatency95($var)
{
GPBUtil::checkDouble($var);
$this->latency_95 = $var;
+
+ return $this;
}
/**
- * <code>double latency_99 = 10;</code>
+ * Generated from protobuf field <code>double latency_99 = 10;</code>
+ * @return float
*/
public function getLatency99()
{
@@ -334,16 +352,21 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double latency_99 = 10;</code>
+ * Generated from protobuf field <code>double latency_99 = 10;</code>
+ * @param float $var
+ * @return $this
*/
public function setLatency99($var)
{
GPBUtil::checkDouble($var);
$this->latency_99 = $var;
+
+ return $this;
}
/**
- * <code>double latency_999 = 11;</code>
+ * Generated from protobuf field <code>double latency_999 = 11;</code>
+ * @return float
*/
public function getLatency999()
{
@@ -351,20 +374,23 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double latency_999 = 11;</code>
+ * Generated from protobuf field <code>double latency_999 = 11;</code>
+ * @param float $var
+ * @return $this
*/
public function setLatency999($var)
{
GPBUtil::checkDouble($var);
$this->latency_999 = $var;
+
+ return $this;
}
/**
- * <pre>
* server cpu usage percentage
- * </pre>
*
- * <code>double server_cpu_usage = 12;</code>
+ * Generated from protobuf field <code>double server_cpu_usage = 12;</code>
+ * @return float
*/
public function getServerCpuUsage()
{
@@ -372,24 +398,25 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* server cpu usage percentage
- * </pre>
*
- * <code>double server_cpu_usage = 12;</code>
+ * Generated from protobuf field <code>double server_cpu_usage = 12;</code>
+ * @param float $var
+ * @return $this
*/
public function setServerCpuUsage($var)
{
GPBUtil::checkDouble($var);
$this->server_cpu_usage = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of requests that succeeded/failed
- * </pre>
*
- * <code>double successful_requests_per_second = 13;</code>
+ * Generated from protobuf field <code>double successful_requests_per_second = 13;</code>
+ * @return float
*/
public function getSuccessfulRequestsPerSecond()
{
@@ -397,20 +424,23 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of requests that succeeded/failed
- * </pre>
*
- * <code>double successful_requests_per_second = 13;</code>
+ * Generated from protobuf field <code>double successful_requests_per_second = 13;</code>
+ * @param float $var
+ * @return $this
*/
public function setSuccessfulRequestsPerSecond($var)
{
GPBUtil::checkDouble($var);
$this->successful_requests_per_second = $var;
+
+ return $this;
}
/**
- * <code>double failed_requests_per_second = 14;</code>
+ * Generated from protobuf field <code>double failed_requests_per_second = 14;</code>
+ * @return float
*/
public function getFailedRequestsPerSecond()
{
@@ -418,12 +448,112 @@ class ScenarioResultSummary extends \Google\Protobuf\Internal\Message
}
/**
- * <code>double failed_requests_per_second = 14;</code>
+ * Generated from protobuf field <code>double failed_requests_per_second = 14;</code>
+ * @param float $var
+ * @return $this
*/
public function setFailedRequestsPerSecond($var)
{
GPBUtil::checkDouble($var);
$this->failed_requests_per_second = $var;
+
+ return $this;
+ }
+
+ /**
+ * Number of polls called inside completion queue per request
+ *
+ * Generated from protobuf field <code>double client_polls_per_request = 15;</code>
+ * @return float
+ */
+ public function getClientPollsPerRequest()
+ {
+ return $this->client_polls_per_request;
+ }
+
+ /**
+ * Number of polls called inside completion queue per request
+ *
+ * Generated from protobuf field <code>double client_polls_per_request = 15;</code>
+ * @param float $var
+ * @return $this
+ */
+ public function setClientPollsPerRequest($var)
+ {
+ GPBUtil::checkDouble($var);
+ $this->client_polls_per_request = $var;
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>double server_polls_per_request = 16;</code>
+ * @return float
+ */
+ public function getServerPollsPerRequest()
+ {
+ return $this->server_polls_per_request;
+ }
+
+ /**
+ * Generated from protobuf field <code>double server_polls_per_request = 16;</code>
+ * @param float $var
+ * @return $this
+ */
+ public function setServerPollsPerRequest($var)
+ {
+ GPBUtil::checkDouble($var);
+ $this->server_polls_per_request = $var;
+
+ return $this;
+ }
+
+ /**
+ * Queries per CPU-sec over all servers or clients
+ *
+ * Generated from protobuf field <code>double server_queries_per_cpu_sec = 17;</code>
+ * @return float
+ */
+ public function getServerQueriesPerCpuSec()
+ {
+ return $this->server_queries_per_cpu_sec;
+ }
+
+ /**
+ * Queries per CPU-sec over all servers or clients
+ *
+ * Generated from protobuf field <code>double server_queries_per_cpu_sec = 17;</code>
+ * @param float $var
+ * @return $this
+ */
+ public function setServerQueriesPerCpuSec($var)
+ {
+ GPBUtil::checkDouble($var);
+ $this->server_queries_per_cpu_sec = $var;
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>double client_queries_per_cpu_sec = 18;</code>
+ * @return float
+ */
+ public function getClientQueriesPerCpuSec()
+ {
+ return $this->client_queries_per_cpu_sec;
+ }
+
+ /**
+ * Generated from protobuf field <code>double client_queries_per_cpu_sec = 18;</code>
+ * @param float $var
+ * @return $this
+ */
+ public function setClientQueriesPerCpuSec($var)
+ {
+ GPBUtil::checkDouble($var);
+ $this->client_queries_per_cpu_sec = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/Scenarios.php b/src/php/tests/qps/generated_code/Grpc/Testing/Scenarios.php
index 278f555b76..2146b4776e 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/Scenarios.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/Scenarios.php
@@ -9,16 +9,14 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* A set of scenarios to be run with qps_json_driver
- * </pre>
*
- * Protobuf type <code>grpc.testing.Scenarios</code>
+ * Generated from protobuf message <code>grpc.testing.Scenarios</code>
*/
class Scenarios extends \Google\Protobuf\Internal\Message
{
/**
- * <code>repeated .grpc.testing.Scenario scenarios = 1;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.Scenario scenarios = 1;</code>
*/
private $scenarios;
@@ -28,7 +26,8 @@ class Scenarios extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated .grpc.testing.Scenario scenarios = 1;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.Scenario scenarios = 1;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getScenarios()
{
@@ -36,12 +35,16 @@ class Scenarios extends \Google\Protobuf\Internal\Message
}
/**
- * <code>repeated .grpc.testing.Scenario scenarios = 1;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.Scenario scenarios = 1;</code>
+ * @param \Grpc\Testing\Scenario[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setScenarios(&$var)
+ public function setScenarios($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\Scenario::class);
- $this->scenarios = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\Scenario::class);
+ $this->scenarios = $arr;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/SecurityParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/SecurityParams.php
index 27a5b95cc9..8ce623a4bc 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/SecurityParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/SecurityParams.php
@@ -9,22 +9,24 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* presence of SecurityParams implies use of TLS
- * </pre>
*
- * Protobuf type <code>grpc.testing.SecurityParams</code>
+ * Generated from protobuf message <code>grpc.testing.SecurityParams</code>
*/
class SecurityParams extends \Google\Protobuf\Internal\Message
{
/**
- * <code>bool use_test_ca = 1;</code>
+ * Generated from protobuf field <code>bool use_test_ca = 1;</code>
*/
private $use_test_ca = false;
/**
- * <code>string server_host_override = 2;</code>
+ * Generated from protobuf field <code>string server_host_override = 2;</code>
*/
private $server_host_override = '';
+ /**
+ * Generated from protobuf field <code>string cred_type = 3;</code>
+ */
+ private $cred_type = '';
public function __construct() {
\GPBMetadata\Src\Proto\Grpc\Testing\Control::initOnce();
@@ -32,7 +34,8 @@ class SecurityParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>bool use_test_ca = 1;</code>
+ * Generated from protobuf field <code>bool use_test_ca = 1;</code>
+ * @return bool
*/
public function getUseTestCa()
{
@@ -40,16 +43,21 @@ class SecurityParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>bool use_test_ca = 1;</code>
+ * Generated from protobuf field <code>bool use_test_ca = 1;</code>
+ * @param bool $var
+ * @return $this
*/
public function setUseTestCa($var)
{
GPBUtil::checkBool($var);
$this->use_test_ca = $var;
+
+ return $this;
}
/**
- * <code>string server_host_override = 2;</code>
+ * Generated from protobuf field <code>string server_host_override = 2;</code>
+ * @return string
*/
public function getServerHostOverride()
{
@@ -57,12 +65,38 @@ class SecurityParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>string server_host_override = 2;</code>
+ * Generated from protobuf field <code>string server_host_override = 2;</code>
+ * @param string $var
+ * @return $this
*/
public function setServerHostOverride($var)
{
GPBUtil::checkString($var, True);
$this->server_host_override = $var;
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>string cred_type = 3;</code>
+ * @return string
+ */
+ public function getCredType()
+ {
+ return $this->cred_type;
+ }
+
+ /**
+ * Generated from protobuf field <code>string cred_type = 3;</code>
+ * @param string $var
+ * @return $this
+ */
+ public function setCredType($var)
+ {
+ GPBUtil::checkString($var, True);
+ $this->cred_type = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ServerArgs.php b/src/php/tests/qps/generated_code/Grpc/Testing/ServerArgs.php
index 0d84b80124..acf7e18b6d 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ServerArgs.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ServerArgs.php
@@ -9,7 +9,7 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ServerArgs</code>
+ * Generated from protobuf message <code>grpc.testing.ServerArgs</code>
*/
class ServerArgs extends \Google\Protobuf\Internal\Message
{
@@ -21,7 +21,8 @@ class ServerArgs extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ServerConfig setup = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerConfig setup = 1;</code>
+ * @return \Grpc\Testing\ServerConfig
*/
public function getSetup()
{
@@ -29,16 +30,21 @@ class ServerArgs extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ServerConfig setup = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerConfig setup = 1;</code>
+ * @param \Grpc\Testing\ServerConfig $var
+ * @return $this
*/
- public function setSetup(&$var)
+ public function setSetup($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ServerConfig::class);
$this->writeOneof(1, $var);
+
+ return $this;
}
/**
- * <code>.grpc.testing.Mark mark = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.Mark mark = 2;</code>
+ * @return \Grpc\Testing\Mark
*/
public function getMark()
{
@@ -46,14 +52,21 @@ class ServerArgs extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.Mark mark = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.Mark mark = 2;</code>
+ * @param \Grpc\Testing\Mark $var
+ * @return $this
*/
- public function setMark(&$var)
+ public function setMark($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Mark::class);
$this->writeOneof(2, $var);
+
+ return $this;
}
+ /**
+ * @return string
+ */
public function getArgtype()
{
return $this->whichOneof("argtype");
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ServerConfig.php b/src/php/tests/qps/generated_code/Grpc/Testing/ServerConfig.php
index e2bcede48c..8bd4c69566 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ServerConfig.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ServerConfig.php
@@ -9,77 +9,73 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ServerConfig</code>
+ * Generated from protobuf message <code>grpc.testing.ServerConfig</code>
*/
class ServerConfig extends \Google\Protobuf\Internal\Message
{
/**
- * <code>.grpc.testing.ServerType server_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerType server_type = 1;</code>
*/
private $server_type = 0;
/**
- * <code>.grpc.testing.SecurityParams security_params = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.SecurityParams security_params = 2;</code>
*/
private $security_params = null;
/**
- * <pre>
* Port on which to listen. Zero means pick unused port.
- * </pre>
*
- * <code>int32 port = 4;</code>
+ * Generated from protobuf field <code>int32 port = 4;</code>
*/
private $port = 0;
/**
- * <pre>
* Only for async server. Number of threads used to serve the requests.
- * </pre>
*
- * <code>int32 async_server_threads = 7;</code>
+ * Generated from protobuf field <code>int32 async_server_threads = 7;</code>
*/
private $async_server_threads = 0;
/**
- * <pre>
* Specify the number of cores to limit server to, if desired
- * </pre>
*
- * <code>int32 core_limit = 8;</code>
+ * Generated from protobuf field <code>int32 core_limit = 8;</code>
*/
private $core_limit = 0;
/**
- * <pre>
* payload config, used in generic server.
* Note this must NOT be used in proto (non-generic) servers. For proto servers,
* 'response sizes' must be configured from the 'response_size' field of the
* 'SimpleRequest' objects in RPC requests.
- * </pre>
*
- * <code>.grpc.testing.PayloadConfig payload_config = 9;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadConfig payload_config = 9;</code>
*/
private $payload_config = null;
/**
- * <pre>
* Specify the cores we should run the server on, if desired
- * </pre>
*
- * <code>repeated int32 core_list = 10;</code>
+ * Generated from protobuf field <code>repeated int32 core_list = 10;</code>
*/
private $core_list;
/**
- * <pre>
* If we use an OTHER_SERVER client_type, this string gives more detail
- * </pre>
*
- * <code>string other_server_api = 11;</code>
+ * Generated from protobuf field <code>string other_server_api = 11;</code>
*/
private $other_server_api = '';
/**
- * <pre>
+ * Number of threads that share each completion queue
+ *
+ * Generated from protobuf field <code>int32 threads_per_cq = 12;</code>
+ */
+ private $threads_per_cq = 0;
+ /**
* Buffer pool size (no buffer pool specified if unset)
- * </pre>
*
- * <code>int32 resource_quota_size = 1001;</code>
+ * Generated from protobuf field <code>int32 resource_quota_size = 1001;</code>
*/
private $resource_quota_size = 0;
+ /**
+ * Generated from protobuf field <code>repeated .grpc.testing.ChannelArg channel_args = 1002;</code>
+ */
+ private $channel_args;
public function __construct() {
\GPBMetadata\Src\Proto\Grpc\Testing\Control::initOnce();
@@ -87,7 +83,8 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ServerType server_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerType server_type = 1;</code>
+ * @return int
*/
public function getServerType()
{
@@ -95,16 +92,21 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ServerType server_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerType server_type = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setServerType($var)
{
GPBUtil::checkEnum($var, \Grpc\Testing\ServerType::class);
$this->server_type = $var;
+
+ return $this;
}
/**
- * <code>.grpc.testing.SecurityParams security_params = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.SecurityParams security_params = 2;</code>
+ * @return \Grpc\Testing\SecurityParams
*/
public function getSecurityParams()
{
@@ -112,20 +114,23 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.SecurityParams security_params = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.SecurityParams security_params = 2;</code>
+ * @param \Grpc\Testing\SecurityParams $var
+ * @return $this
*/
- public function setSecurityParams(&$var)
+ public function setSecurityParams($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\SecurityParams::class);
$this->security_params = $var;
+
+ return $this;
}
/**
- * <pre>
* Port on which to listen. Zero means pick unused port.
- * </pre>
*
- * <code>int32 port = 4;</code>
+ * Generated from protobuf field <code>int32 port = 4;</code>
+ * @return int
*/
public function getPort()
{
@@ -133,24 +138,25 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Port on which to listen. Zero means pick unused port.
- * </pre>
*
- * <code>int32 port = 4;</code>
+ * Generated from protobuf field <code>int32 port = 4;</code>
+ * @param int $var
+ * @return $this
*/
public function setPort($var)
{
GPBUtil::checkInt32($var);
$this->port = $var;
+
+ return $this;
}
/**
- * <pre>
* Only for async server. Number of threads used to serve the requests.
- * </pre>
*
- * <code>int32 async_server_threads = 7;</code>
+ * Generated from protobuf field <code>int32 async_server_threads = 7;</code>
+ * @return int
*/
public function getAsyncServerThreads()
{
@@ -158,24 +164,25 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Only for async server. Number of threads used to serve the requests.
- * </pre>
*
- * <code>int32 async_server_threads = 7;</code>
+ * Generated from protobuf field <code>int32 async_server_threads = 7;</code>
+ * @param int $var
+ * @return $this
*/
public function setAsyncServerThreads($var)
{
GPBUtil::checkInt32($var);
$this->async_server_threads = $var;
+
+ return $this;
}
/**
- * <pre>
* Specify the number of cores to limit server to, if desired
- * </pre>
*
- * <code>int32 core_limit = 8;</code>
+ * Generated from protobuf field <code>int32 core_limit = 8;</code>
+ * @return int
*/
public function getCoreLimit()
{
@@ -183,27 +190,28 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Specify the number of cores to limit server to, if desired
- * </pre>
*
- * <code>int32 core_limit = 8;</code>
+ * Generated from protobuf field <code>int32 core_limit = 8;</code>
+ * @param int $var
+ * @return $this
*/
public function setCoreLimit($var)
{
GPBUtil::checkInt32($var);
$this->core_limit = $var;
+
+ return $this;
}
/**
- * <pre>
* payload config, used in generic server.
* Note this must NOT be used in proto (non-generic) servers. For proto servers,
* 'response sizes' must be configured from the 'response_size' field of the
* 'SimpleRequest' objects in RPC requests.
- * </pre>
*
- * <code>.grpc.testing.PayloadConfig payload_config = 9;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadConfig payload_config = 9;</code>
+ * @return \Grpc\Testing\PayloadConfig
*/
public function getPayloadConfig()
{
@@ -211,27 +219,28 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* payload config, used in generic server.
* Note this must NOT be used in proto (non-generic) servers. For proto servers,
* 'response sizes' must be configured from the 'response_size' field of the
* 'SimpleRequest' objects in RPC requests.
- * </pre>
*
- * <code>.grpc.testing.PayloadConfig payload_config = 9;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadConfig payload_config = 9;</code>
+ * @param \Grpc\Testing\PayloadConfig $var
+ * @return $this
*/
- public function setPayloadConfig(&$var)
+ public function setPayloadConfig($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\PayloadConfig::class);
$this->payload_config = $var;
+
+ return $this;
}
/**
- * <pre>
* Specify the cores we should run the server on, if desired
- * </pre>
*
- * <code>repeated int32 core_list = 10;</code>
+ * Generated from protobuf field <code>repeated int32 core_list = 10;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getCoreList()
{
@@ -239,24 +248,25 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Specify the cores we should run the server on, if desired
- * </pre>
*
- * <code>repeated int32 core_list = 10;</code>
+ * Generated from protobuf field <code>repeated int32 core_list = 10;</code>
+ * @param int[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setCoreList(&$var)
+ public function setCoreList($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
- $this->core_list = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::INT32);
+ $this->core_list = $arr;
+
+ return $this;
}
/**
- * <pre>
* If we use an OTHER_SERVER client_type, this string gives more detail
- * </pre>
*
- * <code>string other_server_api = 11;</code>
+ * Generated from protobuf field <code>string other_server_api = 11;</code>
+ * @return string
*/
public function getOtherServerApi()
{
@@ -264,24 +274,51 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* If we use an OTHER_SERVER client_type, this string gives more detail
- * </pre>
*
- * <code>string other_server_api = 11;</code>
+ * Generated from protobuf field <code>string other_server_api = 11;</code>
+ * @param string $var
+ * @return $this
*/
public function setOtherServerApi($var)
{
GPBUtil::checkString($var, True);
$this->other_server_api = $var;
+
+ return $this;
+ }
+
+ /**
+ * Number of threads that share each completion queue
+ *
+ * Generated from protobuf field <code>int32 threads_per_cq = 12;</code>
+ * @return int
+ */
+ public function getThreadsPerCq()
+ {
+ return $this->threads_per_cq;
+ }
+
+ /**
+ * Number of threads that share each completion queue
+ *
+ * Generated from protobuf field <code>int32 threads_per_cq = 12;</code>
+ * @param int $var
+ * @return $this
+ */
+ public function setThreadsPerCq($var)
+ {
+ GPBUtil::checkInt32($var);
+ $this->threads_per_cq = $var;
+
+ return $this;
}
/**
- * <pre>
* Buffer pool size (no buffer pool specified if unset)
- * </pre>
*
- * <code>int32 resource_quota_size = 1001;</code>
+ * Generated from protobuf field <code>int32 resource_quota_size = 1001;</code>
+ * @return int
*/
public function getResourceQuotaSize()
{
@@ -289,16 +326,40 @@ class ServerConfig extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Buffer pool size (no buffer pool specified if unset)
- * </pre>
*
- * <code>int32 resource_quota_size = 1001;</code>
+ * Generated from protobuf field <code>int32 resource_quota_size = 1001;</code>
+ * @param int $var
+ * @return $this
*/
public function setResourceQuotaSize($var)
{
GPBUtil::checkInt32($var);
$this->resource_quota_size = $var;
+
+ return $this;
+ }
+
+ /**
+ * Generated from protobuf field <code>repeated .grpc.testing.ChannelArg channel_args = 1002;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
+ */
+ public function getChannelArgs()
+ {
+ return $this->channel_args;
+ }
+
+ /**
+ * Generated from protobuf field <code>repeated .grpc.testing.ChannelArg channel_args = 1002;</code>
+ * @param \Grpc\Testing\ChannelArg[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
+ */
+ public function setChannelArgs($var)
+ {
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ChannelArg::class);
+ $this->channel_args = $arr;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ServerStats.php b/src/php/tests/qps/generated_code/Grpc/Testing/ServerStats.php
index 98b2af764c..aea2cb0fce 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ServerStats.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ServerStats.php
@@ -9,51 +9,53 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ServerStats</code>
+ * Generated from protobuf message <code>grpc.testing.ServerStats</code>
*/
class ServerStats extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* wall clock time change in seconds since last reset
- * </pre>
*
- * <code>double time_elapsed = 1;</code>
+ * Generated from protobuf field <code>double time_elapsed = 1;</code>
*/
private $time_elapsed = 0.0;
/**
- * <pre>
* change in user time (in seconds) used by the server since last reset
- * </pre>
*
- * <code>double time_user = 2;</code>
+ * Generated from protobuf field <code>double time_user = 2;</code>
*/
private $time_user = 0.0;
/**
- * <pre>
* change in server time (in seconds) used by the server process and all
* threads since last reset
- * </pre>
*
- * <code>double time_system = 3;</code>
+ * Generated from protobuf field <code>double time_system = 3;</code>
*/
private $time_system = 0.0;
/**
- * <pre>
* change in total cpu time of the server (data from proc/stat)
- * </pre>
*
- * <code>uint64 total_cpu_time = 4;</code>
+ * Generated from protobuf field <code>uint64 total_cpu_time = 4;</code>
*/
private $total_cpu_time = 0;
/**
- * <pre>
* change in idle time of the server (data from proc/stat)
- * </pre>
*
- * <code>uint64 idle_cpu_time = 5;</code>
+ * Generated from protobuf field <code>uint64 idle_cpu_time = 5;</code>
*/
private $idle_cpu_time = 0;
+ /**
+ * Number of polls called inside completion queue
+ *
+ * Generated from protobuf field <code>uint64 cq_poll_count = 6;</code>
+ */
+ private $cq_poll_count = 0;
+ /**
+ * Core library stats
+ *
+ * Generated from protobuf field <code>.grpc.core.Stats core_stats = 7;</code>
+ */
+ private $core_stats = null;
public function __construct() {
\GPBMetadata\Src\Proto\Grpc\Testing\Stats::initOnce();
@@ -61,11 +63,10 @@ class ServerStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* wall clock time change in seconds since last reset
- * </pre>
*
- * <code>double time_elapsed = 1;</code>
+ * Generated from protobuf field <code>double time_elapsed = 1;</code>
+ * @return float
*/
public function getTimeElapsed()
{
@@ -73,24 +74,25 @@ class ServerStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* wall clock time change in seconds since last reset
- * </pre>
*
- * <code>double time_elapsed = 1;</code>
+ * Generated from protobuf field <code>double time_elapsed = 1;</code>
+ * @param float $var
+ * @return $this
*/
public function setTimeElapsed($var)
{
GPBUtil::checkDouble($var);
$this->time_elapsed = $var;
+
+ return $this;
}
/**
- * <pre>
* change in user time (in seconds) used by the server since last reset
- * </pre>
*
- * <code>double time_user = 2;</code>
+ * Generated from protobuf field <code>double time_user = 2;</code>
+ * @return float
*/
public function getTimeUser()
{
@@ -98,25 +100,26 @@ class ServerStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* change in user time (in seconds) used by the server since last reset
- * </pre>
*
- * <code>double time_user = 2;</code>
+ * Generated from protobuf field <code>double time_user = 2;</code>
+ * @param float $var
+ * @return $this
*/
public function setTimeUser($var)
{
GPBUtil::checkDouble($var);
$this->time_user = $var;
+
+ return $this;
}
/**
- * <pre>
* change in server time (in seconds) used by the server process and all
* threads since last reset
- * </pre>
*
- * <code>double time_system = 3;</code>
+ * Generated from protobuf field <code>double time_system = 3;</code>
+ * @return float
*/
public function getTimeSystem()
{
@@ -124,25 +127,26 @@ class ServerStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* change in server time (in seconds) used by the server process and all
* threads since last reset
- * </pre>
*
- * <code>double time_system = 3;</code>
+ * Generated from protobuf field <code>double time_system = 3;</code>
+ * @param float $var
+ * @return $this
*/
public function setTimeSystem($var)
{
GPBUtil::checkDouble($var);
$this->time_system = $var;
+
+ return $this;
}
/**
- * <pre>
* change in total cpu time of the server (data from proc/stat)
- * </pre>
*
- * <code>uint64 total_cpu_time = 4;</code>
+ * Generated from protobuf field <code>uint64 total_cpu_time = 4;</code>
+ * @return int|string
*/
public function getTotalCpuTime()
{
@@ -150,24 +154,25 @@ class ServerStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* change in total cpu time of the server (data from proc/stat)
- * </pre>
*
- * <code>uint64 total_cpu_time = 4;</code>
+ * Generated from protobuf field <code>uint64 total_cpu_time = 4;</code>
+ * @param int|string $var
+ * @return $this
*/
public function setTotalCpuTime($var)
{
GPBUtil::checkUint64($var);
$this->total_cpu_time = $var;
+
+ return $this;
}
/**
- * <pre>
* change in idle time of the server (data from proc/stat)
- * </pre>
*
- * <code>uint64 idle_cpu_time = 5;</code>
+ * Generated from protobuf field <code>uint64 idle_cpu_time = 5;</code>
+ * @return int|string
*/
public function getIdleCpuTime()
{
@@ -175,16 +180,70 @@ class ServerStats extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* change in idle time of the server (data from proc/stat)
- * </pre>
*
- * <code>uint64 idle_cpu_time = 5;</code>
+ * Generated from protobuf field <code>uint64 idle_cpu_time = 5;</code>
+ * @param int|string $var
+ * @return $this
*/
public function setIdleCpuTime($var)
{
GPBUtil::checkUint64($var);
$this->idle_cpu_time = $var;
+
+ return $this;
+ }
+
+ /**
+ * Number of polls called inside completion queue
+ *
+ * Generated from protobuf field <code>uint64 cq_poll_count = 6;</code>
+ * @return int|string
+ */
+ public function getCqPollCount()
+ {
+ return $this->cq_poll_count;
+ }
+
+ /**
+ * Number of polls called inside completion queue
+ *
+ * Generated from protobuf field <code>uint64 cq_poll_count = 6;</code>
+ * @param int|string $var
+ * @return $this
+ */
+ public function setCqPollCount($var)
+ {
+ GPBUtil::checkUint64($var);
+ $this->cq_poll_count = $var;
+
+ return $this;
+ }
+
+ /**
+ * Core library stats
+ *
+ * Generated from protobuf field <code>.grpc.core.Stats core_stats = 7;</code>
+ * @return \Grpc\Core\Stats
+ */
+ public function getCoreStats()
+ {
+ return $this->core_stats;
+ }
+
+ /**
+ * Core library stats
+ *
+ * Generated from protobuf field <code>.grpc.core.Stats core_stats = 7;</code>
+ * @param \Grpc\Core\Stats $var
+ * @return $this
+ */
+ public function setCoreStats($var)
+ {
+ GPBUtil::checkMessage($var, \Grpc\Core\Stats::class);
+ $this->core_stats = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ServerStatus.php b/src/php/tests/qps/generated_code/Grpc/Testing/ServerStatus.php
index d293f03fbd..04f2ca7c4a 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ServerStatus.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ServerStatus.php
@@ -9,28 +9,24 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.ServerStatus</code>
+ * Generated from protobuf message <code>grpc.testing.ServerStatus</code>
*/
class ServerStatus extends \Google\Protobuf\Internal\Message
{
/**
- * <code>.grpc.testing.ServerStats stats = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerStats stats = 1;</code>
*/
private $stats = null;
/**
- * <pre>
* the port bound by the server
- * </pre>
*
- * <code>int32 port = 2;</code>
+ * Generated from protobuf field <code>int32 port = 2;</code>
*/
private $port = 0;
/**
- * <pre>
* Number of cores available to the server
- * </pre>
*
- * <code>int32 cores = 3;</code>
+ * Generated from protobuf field <code>int32 cores = 3;</code>
*/
private $cores = 0;
@@ -40,7 +36,8 @@ class ServerStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ServerStats stats = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerStats stats = 1;</code>
+ * @return \Grpc\Testing\ServerStats
*/
public function getStats()
{
@@ -48,20 +45,23 @@ class ServerStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <code>.grpc.testing.ServerStats stats = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.ServerStats stats = 1;</code>
+ * @param \Grpc\Testing\ServerStats $var
+ * @return $this
*/
- public function setStats(&$var)
+ public function setStats($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\ServerStats::class);
$this->stats = $var;
+
+ return $this;
}
/**
- * <pre>
* the port bound by the server
- * </pre>
*
- * <code>int32 port = 2;</code>
+ * Generated from protobuf field <code>int32 port = 2;</code>
+ * @return int
*/
public function getPort()
{
@@ -69,24 +69,25 @@ class ServerStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* the port bound by the server
- * </pre>
*
- * <code>int32 port = 2;</code>
+ * Generated from protobuf field <code>int32 port = 2;</code>
+ * @param int $var
+ * @return $this
*/
public function setPort($var)
{
GPBUtil::checkInt32($var);
$this->port = $var;
+
+ return $this;
}
/**
- * <pre>
* Number of cores available to the server
- * </pre>
*
- * <code>int32 cores = 3;</code>
+ * Generated from protobuf field <code>int32 cores = 3;</code>
+ * @return int
*/
public function getCores()
{
@@ -94,16 +95,18 @@ class ServerStatus extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Number of cores available to the server
- * </pre>
*
- * <code>int32 cores = 3;</code>
+ * Generated from protobuf field <code>int32 cores = 3;</code>
+ * @param int $var
+ * @return $this
*/
public function setCores($var)
{
GPBUtil::checkInt32($var);
$this->cores = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/ServerType.php b/src/php/tests/qps/generated_code/Grpc/Testing/ServerType.php
index 605c83c3f7..4110e91c18 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/ServerType.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/ServerType.php
@@ -5,28 +5,26 @@
namespace Grpc\Testing;
/**
- * Protobuf enum <code>grpc.testing.ServerType</code>
+ * Protobuf enum <code>Grpc\Testing\ServerType</code>
*/
class ServerType
{
/**
- * <code>SYNC_SERVER = 0;</code>
+ * Generated from protobuf enum <code>SYNC_SERVER = 0;</code>
*/
const SYNC_SERVER = 0;
/**
- * <code>ASYNC_SERVER = 1;</code>
+ * Generated from protobuf enum <code>ASYNC_SERVER = 1;</code>
*/
const ASYNC_SERVER = 1;
/**
- * <code>ASYNC_GENERIC_SERVER = 2;</code>
+ * Generated from protobuf enum <code>ASYNC_GENERIC_SERVER = 2;</code>
*/
const ASYNC_GENERIC_SERVER = 2;
/**
- * <pre>
* used for some language-specific variants
- * </pre>
*
- * <code>OTHER_SERVER = 3;</code>
+ * Generated from protobuf enum <code>OTHER_SERVER = 3;</code>
*/
const OTHER_SERVER = 3;
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/SimpleProtoParams.php b/src/php/tests/qps/generated_code/Grpc/Testing/SimpleProtoParams.php
index 29834a3be7..507db598f0 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/SimpleProtoParams.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/SimpleProtoParams.php
@@ -9,16 +9,16 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.SimpleProtoParams</code>
+ * Generated from protobuf message <code>grpc.testing.SimpleProtoParams</code>
*/
class SimpleProtoParams extends \Google\Protobuf\Internal\Message
{
/**
- * <code>int32 req_size = 1;</code>
+ * Generated from protobuf field <code>int32 req_size = 1;</code>
*/
private $req_size = 0;
/**
- * <code>int32 resp_size = 2;</code>
+ * Generated from protobuf field <code>int32 resp_size = 2;</code>
*/
private $resp_size = 0;
@@ -28,7 +28,8 @@ class SimpleProtoParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 req_size = 1;</code>
+ * Generated from protobuf field <code>int32 req_size = 1;</code>
+ * @return int
*/
public function getReqSize()
{
@@ -36,16 +37,21 @@ class SimpleProtoParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 req_size = 1;</code>
+ * Generated from protobuf field <code>int32 req_size = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setReqSize($var)
{
GPBUtil::checkInt32($var);
$this->req_size = $var;
+
+ return $this;
}
/**
- * <code>int32 resp_size = 2;</code>
+ * Generated from protobuf field <code>int32 resp_size = 2;</code>
+ * @return int
*/
public function getRespSize()
{
@@ -53,12 +59,16 @@ class SimpleProtoParams extends \Google\Protobuf\Internal\Message
}
/**
- * <code>int32 resp_size = 2;</code>
+ * Generated from protobuf field <code>int32 resp_size = 2;</code>
+ * @param int $var
+ * @return $this
*/
public function setRespSize($var)
{
GPBUtil::checkInt32($var);
$this->resp_size = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/SimpleRequest.php b/src/php/tests/qps/generated_code/Grpc/Testing/SimpleRequest.php
index f84c95319f..e0c2d2d94c 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/SimpleRequest.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/SimpleRequest.php
@@ -9,81 +9,63 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Unary request.
- * </pre>
*
- * Protobuf type <code>grpc.testing.SimpleRequest</code>
+ * Generated from protobuf message <code>grpc.testing.SimpleRequest</code>
*/
class SimpleRequest extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* Desired payload type in the response from the server.
* If response_type is RANDOM, server randomly chooses one from other formats.
- * </pre>
*
- * <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType response_type = 1;</code>
*/
private $response_type = 0;
/**
- * <pre>
* Desired payload size in the response from the server.
- * </pre>
*
- * <code>int32 response_size = 2;</code>
+ * Generated from protobuf field <code>int32 response_size = 2;</code>
*/
private $response_size = 0;
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 3;</code>
*/
private $payload = null;
/**
- * <pre>
* Whether SimpleResponse should include username.
- * </pre>
*
- * <code>bool fill_username = 4;</code>
+ * Generated from protobuf field <code>bool fill_username = 4;</code>
*/
private $fill_username = false;
/**
- * <pre>
* Whether SimpleResponse should include OAuth scope.
- * </pre>
*
- * <code>bool fill_oauth_scope = 5;</code>
+ * Generated from protobuf field <code>bool fill_oauth_scope = 5;</code>
*/
private $fill_oauth_scope = false;
/**
- * <pre>
* Whether to request the server to compress the response. This field is
* "nullable" in order to interoperate seamlessly with clients not able to
* implement the full compression tests by introspecting the call to verify
* the response's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue response_compressed = 6;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue response_compressed = 6;</code>
*/
private $response_compressed = null;
/**
- * <pre>
* Whether server should return a given status
- * </pre>
*
- * <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * Generated from protobuf field <code>.grpc.testing.EchoStatus response_status = 7;</code>
*/
private $response_status = null;
/**
- * <pre>
* Whether the server should expect this request to be compressed.
- * </pre>
*
- * <code>.grpc.testing.BoolValue expect_compressed = 8;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue expect_compressed = 8;</code>
*/
private $expect_compressed = null;
@@ -93,13 +75,12 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* Desired payload type in the response from the server.
* If response_type is RANDOM, server randomly chooses one from other formats.
- * </pre>
*
- * <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * @return int
*/
public function getResponseType()
{
@@ -107,26 +88,27 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* Desired payload type in the response from the server.
* If response_type is RANDOM, server randomly chooses one from other formats.
- * </pre>
*
- * <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setResponseType($var)
{
GPBUtil::checkEnum($var, \Grpc\Testing\PayloadType::class);
$this->response_type = $var;
+
+ return $this;
}
/**
- * <pre>
* Desired payload size in the response from the server.
- * </pre>
*
- * <code>int32 response_size = 2;</code>
+ * Generated from protobuf field <code>int32 response_size = 2;</code>
+ * @return int
*/
public function getResponseSize()
{
@@ -134,24 +116,25 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Desired payload size in the response from the server.
- * </pre>
*
- * <code>int32 response_size = 2;</code>
+ * Generated from protobuf field <code>int32 response_size = 2;</code>
+ * @param int $var
+ * @return $this
*/
public function setResponseSize($var)
{
GPBUtil::checkInt32($var);
$this->response_size = $var;
+
+ return $this;
}
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 3;</code>
+ * @return \Grpc\Testing\Payload
*/
public function getPayload()
{
@@ -159,24 +142,25 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 3;</code>
+ * @param \Grpc\Testing\Payload $var
+ * @return $this
*/
- public function setPayload(&$var)
+ public function setPayload($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Payload::class);
$this->payload = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether SimpleResponse should include username.
- * </pre>
*
- * <code>bool fill_username = 4;</code>
+ * Generated from protobuf field <code>bool fill_username = 4;</code>
+ * @return bool
*/
public function getFillUsername()
{
@@ -184,24 +168,25 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether SimpleResponse should include username.
- * </pre>
*
- * <code>bool fill_username = 4;</code>
+ * Generated from protobuf field <code>bool fill_username = 4;</code>
+ * @param bool $var
+ * @return $this
*/
public function setFillUsername($var)
{
GPBUtil::checkBool($var);
$this->fill_username = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether SimpleResponse should include OAuth scope.
- * </pre>
*
- * <code>bool fill_oauth_scope = 5;</code>
+ * Generated from protobuf field <code>bool fill_oauth_scope = 5;</code>
+ * @return bool
*/
public function getFillOauthScope()
{
@@ -209,27 +194,28 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether SimpleResponse should include OAuth scope.
- * </pre>
*
- * <code>bool fill_oauth_scope = 5;</code>
+ * Generated from protobuf field <code>bool fill_oauth_scope = 5;</code>
+ * @param bool $var
+ * @return $this
*/
public function setFillOauthScope($var)
{
GPBUtil::checkBool($var);
$this->fill_oauth_scope = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether to request the server to compress the response. This field is
* "nullable" in order to interoperate seamlessly with clients not able to
* implement the full compression tests by introspecting the call to verify
* the response's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue response_compressed = 6;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue response_compressed = 6;</code>
+ * @return \Grpc\Testing\BoolValue
*/
public function getResponseCompressed()
{
@@ -237,27 +223,28 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether to request the server to compress the response. This field is
* "nullable" in order to interoperate seamlessly with clients not able to
* implement the full compression tests by introspecting the call to verify
* the response's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue response_compressed = 6;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue response_compressed = 6;</code>
+ * @param \Grpc\Testing\BoolValue $var
+ * @return $this
*/
- public function setResponseCompressed(&$var)
+ public function setResponseCompressed($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\BoolValue::class);
$this->response_compressed = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether server should return a given status
- * </pre>
*
- * <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * Generated from protobuf field <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * @return \Grpc\Testing\EchoStatus
*/
public function getResponseStatus()
{
@@ -265,24 +252,25 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether server should return a given status
- * </pre>
*
- * <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * Generated from protobuf field <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * @param \Grpc\Testing\EchoStatus $var
+ * @return $this
*/
- public function setResponseStatus(&$var)
+ public function setResponseStatus($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\EchoStatus::class);
$this->response_status = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether the server should expect this request to be compressed.
- * </pre>
*
- * <code>.grpc.testing.BoolValue expect_compressed = 8;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue expect_compressed = 8;</code>
+ * @return \Grpc\Testing\BoolValue
*/
public function getExpectCompressed()
{
@@ -290,16 +278,18 @@ class SimpleRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether the server should expect this request to be compressed.
- * </pre>
*
- * <code>.grpc.testing.BoolValue expect_compressed = 8;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue expect_compressed = 8;</code>
+ * @param \Grpc\Testing\BoolValue $var
+ * @return $this
*/
- public function setExpectCompressed(&$var)
+ public function setExpectCompressed($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\BoolValue::class);
$this->expect_compressed = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/SimpleResponse.php b/src/php/tests/qps/generated_code/Grpc/Testing/SimpleResponse.php
index ccc628ec4c..d49f33746e 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/SimpleResponse.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/SimpleResponse.php
@@ -9,37 +9,29 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Unary response, as configured by the request.
- * </pre>
*
- * Protobuf type <code>grpc.testing.SimpleResponse</code>
+ * Generated from protobuf message <code>grpc.testing.SimpleResponse</code>
*/
class SimpleResponse extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Payload to increase message size.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
*/
private $payload = null;
/**
- * <pre>
* The user the request came from, for verifying authentication was
* successful when the client expected it.
- * </pre>
*
- * <code>string username = 2;</code>
+ * Generated from protobuf field <code>string username = 2;</code>
*/
private $username = '';
/**
- * <pre>
* OAuth scope.
- * </pre>
*
- * <code>string oauth_scope = 3;</code>
+ * Generated from protobuf field <code>string oauth_scope = 3;</code>
*/
private $oauth_scope = '';
@@ -49,11 +41,10 @@ class SimpleResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Payload to increase message size.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
+ * @return \Grpc\Testing\Payload
*/
public function getPayload()
{
@@ -61,25 +52,26 @@ class SimpleResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Payload to increase message size.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
+ * @param \Grpc\Testing\Payload $var
+ * @return $this
*/
- public function setPayload(&$var)
+ public function setPayload($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Payload::class);
$this->payload = $var;
+
+ return $this;
}
/**
- * <pre>
* The user the request came from, for verifying authentication was
* successful when the client expected it.
- * </pre>
*
- * <code>string username = 2;</code>
+ * Generated from protobuf field <code>string username = 2;</code>
+ * @return string
*/
public function getUsername()
{
@@ -87,25 +79,26 @@ class SimpleResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* The user the request came from, for verifying authentication was
* successful when the client expected it.
- * </pre>
*
- * <code>string username = 2;</code>
+ * Generated from protobuf field <code>string username = 2;</code>
+ * @param string $var
+ * @return $this
*/
public function setUsername($var)
{
GPBUtil::checkString($var, True);
$this->username = $var;
+
+ return $this;
}
/**
- * <pre>
* OAuth scope.
- * </pre>
*
- * <code>string oauth_scope = 3;</code>
+ * Generated from protobuf field <code>string oauth_scope = 3;</code>
+ * @return string
*/
public function getOauthScope()
{
@@ -113,16 +106,18 @@ class SimpleResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* OAuth scope.
- * </pre>
*
- * <code>string oauth_scope = 3;</code>
+ * Generated from protobuf field <code>string oauth_scope = 3;</code>
+ * @param string $var
+ * @return $this
*/
public function setOauthScope($var)
{
GPBUtil::checkString($var, True);
$this->oauth_scope = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallRequest.php b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallRequest.php
index d7bbc70779..a7460af83a 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallRequest.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallRequest.php
@@ -9,31 +9,25 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Client-streaming request.
- * </pre>
*
- * Protobuf type <code>grpc.testing.StreamingInputCallRequest</code>
+ * Generated from protobuf message <code>grpc.testing.StreamingInputCallRequest</code>
*/
class StreamingInputCallRequest extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
*/
private $payload = null;
/**
- * <pre>
* Whether the server should expect this request to be compressed. This field
* is "nullable" in order to interoperate seamlessly with servers not able to
* implement the full compression tests by introspecting the call to verify
* the request's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue expect_compressed = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue expect_compressed = 2;</code>
*/
private $expect_compressed = null;
@@ -43,11 +37,10 @@ class StreamingInputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
+ * @return \Grpc\Testing\Payload
*/
public function getPayload()
{
@@ -55,27 +48,28 @@ class StreamingInputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
+ * @param \Grpc\Testing\Payload $var
+ * @return $this
*/
- public function setPayload(&$var)
+ public function setPayload($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Payload::class);
$this->payload = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether the server should expect this request to be compressed. This field
* is "nullable" in order to interoperate seamlessly with servers not able to
* implement the full compression tests by introspecting the call to verify
* the request's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue expect_compressed = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue expect_compressed = 2;</code>
+ * @return \Grpc\Testing\BoolValue
*/
public function getExpectCompressed()
{
@@ -83,19 +77,21 @@ class StreamingInputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether the server should expect this request to be compressed. This field
* is "nullable" in order to interoperate seamlessly with servers not able to
* implement the full compression tests by introspecting the call to verify
* the request's compression status.
- * </pre>
*
- * <code>.grpc.testing.BoolValue expect_compressed = 2;</code>
+ * Generated from protobuf field <code>.grpc.testing.BoolValue expect_compressed = 2;</code>
+ * @param \Grpc\Testing\BoolValue $var
+ * @return $this
*/
- public function setExpectCompressed(&$var)
+ public function setExpectCompressed($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\BoolValue::class);
$this->expect_compressed = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallResponse.php b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallResponse.php
index fdd1d0dbf8..41f3893aa3 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallResponse.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingInputCallResponse.php
@@ -9,20 +9,16 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Client-streaming response.
- * </pre>
*
- * Protobuf type <code>grpc.testing.StreamingInputCallResponse</code>
+ * Generated from protobuf message <code>grpc.testing.StreamingInputCallResponse</code>
*/
class StreamingInputCallResponse extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Aggregated size of payloads received from the client.
- * </pre>
*
- * <code>int32 aggregated_payload_size = 1;</code>
+ * Generated from protobuf field <code>int32 aggregated_payload_size = 1;</code>
*/
private $aggregated_payload_size = 0;
@@ -32,11 +28,10 @@ class StreamingInputCallResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Aggregated size of payloads received from the client.
- * </pre>
*
- * <code>int32 aggregated_payload_size = 1;</code>
+ * Generated from protobuf field <code>int32 aggregated_payload_size = 1;</code>
+ * @return int
*/
public function getAggregatedPayloadSize()
{
@@ -44,16 +39,18 @@ class StreamingInputCallResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Aggregated size of payloads received from the client.
- * </pre>
*
- * <code>int32 aggregated_payload_size = 1;</code>
+ * Generated from protobuf field <code>int32 aggregated_payload_size = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setAggregatedPayloadSize($var)
{
GPBUtil::checkInt32($var);
$this->aggregated_payload_size = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallRequest.php b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallRequest.php
index 2aab5fadad..69d9cecffa 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallRequest.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallRequest.php
@@ -9,48 +9,38 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Server-streaming request.
- * </pre>
*
- * Protobuf type <code>grpc.testing.StreamingOutputCallRequest</code>
+ * Generated from protobuf message <code>grpc.testing.StreamingOutputCallRequest</code>
*/
class StreamingOutputCallRequest extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* Desired payload type in the response from the server.
* If response_type is RANDOM, the payload from each response in the stream
* might be of different types. This is to simulate a mixed type of payload
* stream.
- * </pre>
*
- * <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType response_type = 1;</code>
*/
private $response_type = 0;
/**
- * <pre>
* Configuration for each expected response message.
- * </pre>
*
- * <code>repeated .grpc.testing.ResponseParameters response_parameters = 2;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ResponseParameters response_parameters = 2;</code>
*/
private $response_parameters;
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 3;</code>
*/
private $payload = null;
/**
- * <pre>
* Whether server should return a given status
- * </pre>
*
- * <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * Generated from protobuf field <code>.grpc.testing.EchoStatus response_status = 7;</code>
*/
private $response_status = null;
@@ -60,15 +50,14 @@ class StreamingOutputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* Desired payload type in the response from the server.
* If response_type is RANDOM, the payload from each response in the stream
* might be of different types. This is to simulate a mixed type of payload
* stream.
- * </pre>
*
- * <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * @return int
*/
public function getResponseType()
{
@@ -76,28 +65,29 @@ class StreamingOutputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* DEPRECATED, don't use. To be removed shortly.
* Desired payload type in the response from the server.
* If response_type is RANDOM, the payload from each response in the stream
* might be of different types. This is to simulate a mixed type of payload
* stream.
- * </pre>
*
- * <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.PayloadType response_type = 1;</code>
+ * @param int $var
+ * @return $this
*/
public function setResponseType($var)
{
GPBUtil::checkEnum($var, \Grpc\Testing\PayloadType::class);
$this->response_type = $var;
+
+ return $this;
}
/**
- * <pre>
* Configuration for each expected response message.
- * </pre>
*
- * <code>repeated .grpc.testing.ResponseParameters response_parameters = 2;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ResponseParameters response_parameters = 2;</code>
+ * @return \Google\Protobuf\Internal\RepeatedField
*/
public function getResponseParameters()
{
@@ -105,24 +95,25 @@ class StreamingOutputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Configuration for each expected response message.
- * </pre>
*
- * <code>repeated .grpc.testing.ResponseParameters response_parameters = 2;</code>
+ * Generated from protobuf field <code>repeated .grpc.testing.ResponseParameters response_parameters = 2;</code>
+ * @param \Grpc\Testing\ResponseParameters[]|\Google\Protobuf\Internal\RepeatedField $var
+ * @return $this
*/
- public function setResponseParameters(&$var)
+ public function setResponseParameters($var)
{
- GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ResponseParameters::class);
- $this->response_parameters = $var;
+ $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Grpc\Testing\ResponseParameters::class);
+ $this->response_parameters = $arr;
+
+ return $this;
}
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 3;</code>
+ * @return \Grpc\Testing\Payload
*/
public function getPayload()
{
@@ -130,24 +121,25 @@ class StreamingOutputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Optional input payload sent along with the request.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 3;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 3;</code>
+ * @param \Grpc\Testing\Payload $var
+ * @return $this
*/
- public function setPayload(&$var)
+ public function setPayload($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Payload::class);
$this->payload = $var;
+
+ return $this;
}
/**
- * <pre>
* Whether server should return a given status
- * </pre>
*
- * <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * Generated from protobuf field <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * @return \Grpc\Testing\EchoStatus
*/
public function getResponseStatus()
{
@@ -155,16 +147,18 @@ class StreamingOutputCallRequest extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Whether server should return a given status
- * </pre>
*
- * <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * Generated from protobuf field <code>.grpc.testing.EchoStatus response_status = 7;</code>
+ * @param \Grpc\Testing\EchoStatus $var
+ * @return $this
*/
- public function setResponseStatus(&$var)
+ public function setResponseStatus($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\EchoStatus::class);
$this->response_status = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallResponse.php b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallResponse.php
index c06c78c9d8..52315bb499 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallResponse.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/StreamingOutputCallResponse.php
@@ -9,20 +9,16 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * <pre>
* Server-streaming response, as configured by the request and parameters.
- * </pre>
*
- * Protobuf type <code>grpc.testing.StreamingOutputCallResponse</code>
+ * Generated from protobuf message <code>grpc.testing.StreamingOutputCallResponse</code>
*/
class StreamingOutputCallResponse extends \Google\Protobuf\Internal\Message
{
/**
- * <pre>
* Payload to increase response size.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
*/
private $payload = null;
@@ -32,11 +28,10 @@ class StreamingOutputCallResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Payload to increase response size.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
+ * @return \Grpc\Testing\Payload
*/
public function getPayload()
{
@@ -44,16 +39,18 @@ class StreamingOutputCallResponse extends \Google\Protobuf\Internal\Message
}
/**
- * <pre>
* Payload to increase response size.
- * </pre>
*
- * <code>.grpc.testing.Payload payload = 1;</code>
+ * Generated from protobuf field <code>.grpc.testing.Payload payload = 1;</code>
+ * @param \Grpc\Testing\Payload $var
+ * @return $this
*/
- public function setPayload(&$var)
+ public function setPayload($var)
{
GPBUtil::checkMessage($var, \Grpc\Testing\Payload::class);
$this->payload = $var;
+
+ return $this;
}
}
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/Void.php b/src/php/tests/qps/generated_code/Grpc/Testing/Void.php
index 38c100845a..623021d99b 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/Void.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/Void.php
@@ -9,7 +9,7 @@ use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
- * Protobuf type <code>grpc.testing.Void</code>
+ * Generated from protobuf message <code>grpc.testing.Void</code>
*/
class Void extends \Google\Protobuf\Internal\Message
{
diff --git a/src/php/tests/qps/generated_code/Grpc/Testing/WorkerServiceClient.php b/src/php/tests/qps/generated_code/Grpc/Testing/WorkerServiceClient.php
index 959d839c80..98c244ff9d 100644
--- a/src/php/tests/qps/generated_code/Grpc/Testing/WorkerServiceClient.php
+++ b/src/php/tests/qps/generated_code/Grpc/Testing/WorkerServiceClient.php
@@ -18,17 +18,19 @@
//
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
-namespace Grpc\Testing {
+namespace Grpc\Testing;
- class WorkerServiceClient extends \Grpc\BaseStub {
+/**
+ */
+class WorkerServiceClient extends \Grpc\BaseStub {
/**
* @param string $hostname hostname
* @param array $opts channel options
- * @param Grpc\Channel $channel (optional) re-use channel object
+ * @param \Grpc\Channel $channel (optional) re-use channel object
*/
public function __construct($hostname, $opts, $channel = null) {
- parent::__construct($hostname, $opts, $channel);
+ parent::__construct($hostname, $opts, $channel);
}
/**
@@ -42,9 +44,9 @@ namespace Grpc\Testing {
* @param array $options call options
*/
public function RunServer($metadata = [], $options = []) {
- return $this->_bidiRequest('/grpc.testing.WorkerService/RunServer',
- ['\Grpc\Testing\ServerStatus','decode'],
- $metadata, $options);
+ return $this->_bidiRequest('/grpc.testing.WorkerService/RunServer',
+ ['\Grpc\Testing\ServerStatus','decode'],
+ $metadata, $options);
}
/**
@@ -58,9 +60,9 @@ namespace Grpc\Testing {
* @param array $options call options
*/
public function RunClient($metadata = [], $options = []) {
- return $this->_bidiRequest('/grpc.testing.WorkerService/RunClient',
- ['\Grpc\Testing\ClientStatus','decode'],
- $metadata, $options);
+ return $this->_bidiRequest('/grpc.testing.WorkerService/RunClient',
+ ['\Grpc\Testing\ClientStatus','decode'],
+ $metadata, $options);
}
/**
@@ -71,10 +73,10 @@ namespace Grpc\Testing {
*/
public function CoreCount(\Grpc\Testing\CoreRequest $argument,
$metadata = [], $options = []) {
- return $this->_simpleRequest('/grpc.testing.WorkerService/CoreCount',
- $argument,
- ['\Grpc\Testing\CoreResponse', 'decode'],
- $metadata, $options);
+ return $this->_simpleRequest('/grpc.testing.WorkerService/CoreCount',
+ $argument,
+ ['\Grpc\Testing\CoreResponse', 'decode'],
+ $metadata, $options);
}
/**
@@ -85,12 +87,10 @@ namespace Grpc\Testing {
*/
public function QuitWorker(\Grpc\Testing\Void $argument,
$metadata = [], $options = []) {
- return $this->_simpleRequest('/grpc.testing.WorkerService/QuitWorker',
- $argument,
- ['\Grpc\Testing\Void', 'decode'],
- $metadata, $options);
+ return $this->_simpleRequest('/grpc.testing.WorkerService/QuitWorker',
+ $argument,
+ ['\Grpc\Testing\Void', 'decode'],
+ $metadata, $options);
}
- }
-
}
diff --git a/src/php/tests/qps/histogram.php b/src/php/tests/qps/histogram.php
new file mode 100644
index 0000000000..c11a67c618
--- /dev/null
+++ b/src/php/tests/qps/histogram.php
@@ -0,0 +1,93 @@
+<?php
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Histogram class for use in performance testing and measurement
+class Histogram {
+ private $resolution;
+ private $max_possible;
+ private $sum;
+ private $sum_of_squares;
+ private $multiplier;
+ private $count;
+ private $min_seen;
+ private $max_seen;
+ private $buckets;
+
+ private function bucket_for($value) {
+ return (int)(log($value) / log($this->multiplier));
+ }
+
+ public function __construct($resolution, $max_possible) {
+ $this->resolution = $resolution;
+ $this->max_possible = $max_possible;
+ $this->sum = 0;
+ $this->sum_of_squares = 0;
+ $this->multiplier = 1+$resolution;
+ $this->count = 0;
+ $this->min_seen = $max_possible;
+ $this->max_seen = 0;
+ $this->buckets = array_fill(0, $this->bucket_for($max_possible)+1, 0);
+ }
+
+ public function add($value) {
+ $this->sum += $value;
+ $this->sum_of_squares += $value * $value;
+ $this->count += 1;
+ if ($value < $this->min_seen) {
+ $this->min_seen = $value;
+ }
+ if ($value > $this->max_seen) {
+ $this->max_seen = $value;
+ }
+ $this->buckets[$this->bucket_for($value)] += 1;
+ }
+
+ public function minimum() {
+ return $this->min_seen;
+ }
+
+ public function maximum() {
+ return $this->max_seen;
+ }
+
+ public function sum() {
+ return $this->sum;
+ }
+
+ public function sum_of_squares() {
+ return $this->sum_of_squares;
+ }
+
+ public function count() {
+ return $this->count;
+ }
+
+ public function contents() {
+ return $this->buckets;
+ }
+
+ public function clean() {
+ $this->sum = 0;
+ $this->sum_of_squares = 0;
+ $this->count = 0;
+ $this->min_seen = $this->max_possible;
+ $this->max_seen = 0;
+ $this->buckets = array_fill(0, $this->bucket_for($this->max_possible)+1, 0);
+ }
+}
diff --git a/src/python/grpcio_tests/tests/unit/_sanity/__init__.py b/src/proto/grpc/core/BUILD
index 5772620b60..46de9fae18 100644
--- a/src/python/grpcio_tests/tests/unit/_sanity/__init__.py
+++ b/src/proto/grpc/core/BUILD
@@ -1,4 +1,4 @@
-# Copyright 2016 gRPC authors.
+# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,3 +11,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+licenses(["notice"]) # Apache v2
+
+load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package")
+
+grpc_package(name = "core", visibility = "public")
+
+grpc_proto_library(
+ name = "stats_proto",
+ srcs = ["stats.proto"],
+)
diff --git a/src/proto/grpc/core/stats.proto b/src/proto/grpc/core/stats.proto
new file mode 100644
index 0000000000..ac181b0439
--- /dev/null
+++ b/src/proto/grpc/core/stats.proto
@@ -0,0 +1,38 @@
+// Copyright 2017 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package grpc.core;
+
+message Bucket {
+ double start = 1;
+ uint64 count = 2;
+}
+
+message Histogram {
+ repeated Bucket buckets = 1;
+}
+
+message Metric {
+ string name = 1;
+ oneof value {
+ uint64 count = 10;
+ Histogram histogram = 11;
+ }
+}
+
+message Stats {
+ repeated Metric metrics = 1;
+}
diff --git a/src/proto/grpc/testing/BUILD b/src/proto/grpc/testing/BUILD
index 07e08117f0..36d3782262 100644
--- a/src/proto/grpc/testing/BUILD
+++ b/src/proto/grpc/testing/BUILD
@@ -84,6 +84,9 @@ grpc_proto_library(
name = "stats_proto",
srcs = ["stats.proto"],
has_services = False,
+ deps = [
+ "//src/proto/grpc/core:stats_proto",
+ ]
)
grpc_proto_library(
diff --git a/src/proto/grpc/testing/echo_messages.proto b/src/proto/grpc/testing/echo_messages.proto
index c5c5fdb3fc..5396a2fd39 100644
--- a/src/proto/grpc/testing/echo_messages.proto
+++ b/src/proto/grpc/testing/echo_messages.proto
@@ -45,6 +45,7 @@ message RequestParams {
bool server_die = 12; // Server should not see a request with this set.
string binary_error_details = 13;
ErrorStatus expected_error = 14;
+ int32 server_sleep_us = 15; // Amount to sleep when invoking server
}
message EchoRequest {
diff --git a/src/proto/grpc/testing/proxy-service.proto b/src/proto/grpc/testing/proxy-service.proto
index 8d0a9498c0..deaabd1365 100644
--- a/src/proto/grpc/testing/proxy-service.proto
+++ b/src/proto/grpc/testing/proxy-service.proto
@@ -15,6 +15,7 @@
syntax = "proto3";
import "src/proto/grpc/testing/control.proto";
+import "src/proto/grpc/testing/stats.proto";
package grpc.testing;
@@ -25,5 +26,6 @@ message ProxyStat {
service ProxyClientService {
rpc GetConfig(Void) returns (ClientConfig);
rpc ReportTime(stream ProxyStat) returns (Void);
+ rpc ReportHist(stream HistogramData) returns (Void);
}
diff --git a/src/proto/grpc/testing/stats.proto b/src/proto/grpc/testing/stats.proto
index c738c4f895..a0f84ddbce 100644
--- a/src/proto/grpc/testing/stats.proto
+++ b/src/proto/grpc/testing/stats.proto
@@ -16,6 +16,8 @@ syntax = "proto3";
package grpc.testing;
+import "src/proto/grpc/core/stats.proto";
+
message ServerStats {
// wall clock time change in seconds since last reset
double time_elapsed = 1;
@@ -35,6 +37,9 @@ message ServerStats {
// Number of polls called inside completion queue
uint64 cq_poll_count = 6;
+
+ // Core library stats
+ grpc.core.Stats core_stats = 7;
}
// Histogram params based on grpc/support/histogram.c
@@ -72,4 +77,7 @@ message ClientStats {
// Number of polls called inside completion queue
uint64 cq_poll_count = 6;
+
+ // Core library stats
+ grpc.core.Stats core_stats = 7;
}
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index 28c30e5d35..237f430799 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -41,9 +41,8 @@ cdef class CompletionQueue:
cdef object user_tag = None
cdef Call operation_call = None
cdef CallDetails request_call_details = None
- cdef Metadata request_metadata = None
+ cdef object request_metadata = None
cdef Operations batch_operations = None
- cdef Operation batch_operation = None
if event.type == GRPC_QUEUE_TIMEOUT:
return Event(
event.type, False, None, None, None, None, False, None)
@@ -63,14 +62,8 @@ cdef class CompletionQueue:
operation_call = tag.operation_call
request_call_details = tag.request_call_details
if tag.request_metadata is not None:
- request_metadata = tag.request_metadata
- request_metadata._claim_slice_ownership()
+ request_metadata = tuple(tag.request_metadata)
batch_operations = tag.batch_operations
- if tag.batch_operations is not None:
- for op in batch_operations.operations:
- batch_operation = <Operation>op
- if batch_operation._received_metadata is not None:
- batch_operation._received_metadata._claim_slice_ownership()
if tag.is_new_request:
# Stuff in the tag not explicitly handled by us needs to live through
# the life of the call
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
index a0e69dd613..41975cbe9e 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi
@@ -41,7 +41,8 @@ cdef class CredentialsMetadataPlugin:
cdef object plugin_callback
cdef bytes plugin_name
- cdef grpc_metadata_credentials_plugin make_c_plugin(self)
+
+cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin)
cdef class AuthMetadataContext:
@@ -49,8 +50,11 @@ cdef class AuthMetadataContext:
cdef grpc_auth_metadata_context context
-cdef void plugin_get_metadata(
+cdef int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) with gil
cdef void plugin_destroy_c_plugin_state(void *state) with gil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index 98d7a9820d..0fabda19ce 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -14,6 +14,7 @@
cimport cpython
+import threading
import traceback
@@ -76,7 +77,7 @@ cdef class CredentialsMetadataPlugin:
"""
Args:
plugin_callback (callable): Callback accepting a service URL (str/bytes)
- and callback object (accepting a Metadata,
+ and callback object (accepting a MetadataArray,
grpc_status_code, and a str/bytes error message). This argument
when called should be non-blocking and eventually call the callback
object with the appropriate status code/details and metadata (if
@@ -89,20 +90,20 @@ cdef class CredentialsMetadataPlugin:
self.plugin_callback = plugin_callback
self.plugin_name = name
- @staticmethod
- cdef grpc_metadata_credentials_plugin make_c_plugin(self):
- cdef grpc_metadata_credentials_plugin result
- result.get_metadata = plugin_get_metadata
- result.destroy = plugin_destroy_c_plugin_state
- result.state = <void *>self
- result.type = self.plugin_name
- cpython.Py_INCREF(self)
- return result
-
def __dealloc__(self):
grpc_shutdown()
+cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin):
+ cdef grpc_metadata_credentials_plugin c_plugin
+ c_plugin.get_metadata = plugin_get_metadata
+ c_plugin.destroy = plugin_destroy_c_plugin_state
+ c_plugin.state = <void *>plugin
+ c_plugin.type = plugin.plugin_name
+ cpython.Py_INCREF(plugin)
+ return c_plugin
+
+
cdef class AuthMetadataContext:
def __cinit__(self):
@@ -122,25 +123,30 @@ cdef class AuthMetadataContext:
grpc_shutdown()
-cdef void plugin_get_metadata(
+cdef int plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil:
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) with gil:
called_flag = [False]
def python_callback(
Metadata metadata, grpc_status_code status,
bytes error_details):
- cb(user_data, metadata.c_metadata_array.metadata,
- metadata.c_metadata_array.count, status, error_details)
+ cb(user_data, metadata.c_metadata, metadata.c_count, status, error_details)
called_flag[0] = True
cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state
cdef AuthMetadataContext cy_context = AuthMetadataContext()
cy_context.context = context
- try:
- self.plugin_callback(cy_context, python_callback)
- except Exception as error:
- if not called_flag[0]:
- cb(user_data, Metadata([]).c_metadata_array.metadata,
- 0, StatusCode.unknown, traceback.format_exc().encode())
+ def async_callback():
+ try:
+ self.plugin_callback(cy_context, python_callback)
+ except Exception as error:
+ if not called_flag[0]:
+ cb(user_data, NULL, 0, StatusCode.unknown,
+ traceback.format_exc().encode())
+ threading.Thread(group=None, target=async_callback).start()
+ return 0 # Asynchronous return
cdef void plugin_destroy_c_plugin_state(void *state) with gil:
cpython.Py_DECREF(<CredentialsMetadataPlugin>state)
@@ -240,7 +246,7 @@ def call_credentials_google_iam(authorization_token, authority_selector):
def call_credentials_metadata_plugin(CredentialsMetadataPlugin plugin):
cdef CallCredentials credentials = CallCredentials()
- cdef grpc_metadata_credentials_plugin c_plugin = plugin.make_c_plugin()
+ cdef grpc_metadata_credentials_plugin c_plugin = _c_plugin(plugin)
with nogil:
credentials.c_credentials = (
grpc_metadata_credentials_create_from_plugin(c_plugin, NULL))
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 5950bfa0e6..f115106e60 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -59,6 +59,7 @@ cdef extern from "grpc/grpc.h":
grpc_slice grpc_slice_malloc(size_t length) nogil
grpc_slice grpc_slice_from_copied_string(const char *source) nogil
grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len) nogil
+ grpc_slice grpc_slice_copy(grpc_slice s) nogil
# Declare functions for function-like macros (because Cython)...
void *grpc_slice_start_ptr "GRPC_SLICE_START_PTR" (grpc_slice s) nogil
@@ -374,6 +375,10 @@ cdef extern from "grpc/grpc.h":
cdef extern from "grpc/grpc_security.h":
+ # Declare this as an enum, this is the only way to make it a const in
+ # cython
+ enum: GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX
+
ctypedef enum grpc_ssl_roots_override_result:
GRPC_SSL_ROOTS_OVERRIDE_OK
GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY
@@ -461,9 +466,12 @@ cdef extern from "grpc/grpc_security.h":
grpc_status_code status, const char *error_details)
ctypedef struct grpc_metadata_credentials_plugin:
- void (*get_metadata)(
+ int (*get_metadata)(
void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void *user_data)
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details)
void (*destroy)(void *state)
void *state
const char *type
@@ -522,7 +530,7 @@ cdef extern from "grpc/compression.h":
int grpc_compression_algorithm_parse(
grpc_slice value, grpc_compression_algorithm *algorithm) nogil
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
- char **name) nogil
+ const char **name) nogil
grpc_compression_algorithm grpc_compression_algorithm_for_level(
grpc_compression_level level, uint32_t accepted_encodings) nogil
void grpc_compression_options_init(grpc_compression_options *opts) nogil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 8ace6aeb52..9c40ebf0c2 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -37,7 +37,7 @@ cdef class OperationTag:
cdef Server shutting_down_server
cdef Call operation_call
cdef CallDetails request_call_details
- cdef Metadata request_metadata
+ cdef MetadataArray request_metadata
cdef Operations batch_operations
cdef bint is_new_request
@@ -51,7 +51,7 @@ cdef class Event:
# For Server.request_call
cdef readonly bint is_new_request
cdef readonly CallDetails request_call_details
- cdef readonly Metadata request_metadata
+ cdef readonly object request_metadata
# For server calls
cdef readonly Call operation_call
@@ -92,15 +92,20 @@ cdef class Metadatum:
cdef class Metadata:
+ cdef grpc_metadata *c_metadata
+ cdef readonly size_t c_count
+
+
+cdef class MetadataArray:
+
cdef grpc_metadata_array c_metadata_array
- cdef void _claim_slice_ownership(self)
cdef class Operation:
cdef grpc_op c_op
cdef ByteBuffer _received_message
- cdef Metadata _received_metadata
+ cdef MetadataArray _received_metadata
cdef grpc_status_code _received_status_code
cdef grpc_slice _status_details
cdef int _received_cancelled
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index 1b2ddd2469..4f87261e17 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -171,14 +171,6 @@ cdef class Timespec:
gpr_convert_clock_type(self.c_time, GPR_CLOCK_REALTIME))
return <double>real_time.seconds + <double>real_time.nanoseconds / 1e9
- @staticmethod
- def infinite_future():
- return Timespec(float("+inf"))
-
- @staticmethod
- def infinite_past():
- return Timespec(float("-inf"))
-
def __richcmp__(Timespec self not None, Timespec other not None, int op):
cdef gpr_timespec self_c_time = self.c_time
cdef gpr_timespec other_c_time = other.c_time
@@ -238,7 +230,7 @@ cdef class Event:
def __cinit__(self, grpc_completion_type type, bint success,
object tag, Call operation_call,
CallDetails request_call_details,
- Metadata request_metadata,
+ object request_metadata,
bint is_new_request,
Operations batch_operations):
self.type = type
@@ -437,48 +429,79 @@ cdef class Metadatum:
cdef class _MetadataIterator:
cdef size_t i
- cdef Metadata metadata
+ cdef size_t _length
+ cdef object _metadatum_indexable
- def __cinit__(self, Metadata metadata not None):
+ def __cinit__(self, length, metadatum_indexable):
+ self._length = length
+ self._metadatum_indexable = metadatum_indexable
self.i = 0
- self.metadata = metadata
def __iter__(self):
return self
def __next__(self):
- if self.i < len(self.metadata):
- result = self.metadata[self.i]
+ if self.i < self._length:
+ result = self._metadatum_indexable[self.i]
self.i = self.i + 1
return result
else:
- raise StopIteration
+ raise StopIteration()
+# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an
+# ordinary sequence of pairs of bytestrings all the way down to the
+# grpc_call_start_batch call.
cdef class Metadata:
+ """Metadata being passed from application to core."""
def __cinit__(self, metadata_iterable):
+ metadata_sequence = tuple(metadata_iterable)
+ cdef size_t count = len(metadata_sequence)
with nogil:
grpc_init()
- grpc_metadata_array_init(&self.c_metadata_array)
- metadata = list(metadata_iterable)
- for metadatum in metadata:
- if not isinstance(metadatum, Metadatum):
- raise TypeError("expected list of Metadatum")
- self.c_metadata_array.count = len(metadata)
- self.c_metadata_array.capacity = len(metadata)
+ self.c_metadata = <grpc_metadata *>gpr_malloc(
+ count * sizeof(grpc_metadata))
+ self.c_count = count
+ for index, metadatum in enumerate(metadata_sequence):
+ self.c_metadata[index].key = grpc_slice_copy(
+ (<Metadatum>metadatum).c_metadata.key)
+ self.c_metadata[index].value = grpc_slice_copy(
+ (<Metadatum>metadatum).c_metadata.value)
+
+ def __dealloc__(self):
+ with nogil:
+ for index in range(self.c_count):
+ grpc_slice_unref(self.c_metadata[index].key)
+ grpc_slice_unref(self.c_metadata[index].value)
+ gpr_free(self.c_metadata)
+ grpc_shutdown()
+
+ def __len__(self):
+ return self.c_count
+
+ def __getitem__(self, size_t index):
+ if index < self.c_count:
+ key = _slice_bytes(self.c_metadata[index].key)
+ value = _slice_bytes(self.c_metadata[index].value)
+ return Metadatum(key, value)
+ else:
+ raise IndexError()
+
+ def __iter__(self):
+ return _MetadataIterator(self.c_count, self)
+
+
+cdef class MetadataArray:
+ """Metadata being passed from core to application."""
+
+ def __cinit__(self):
with nogil:
- self.c_metadata_array.metadata = <grpc_metadata *>gpr_malloc(
- self.c_metadata_array.count*sizeof(grpc_metadata)
- )
- for i in range(self.c_metadata_array.count):
- (<Metadatum>metadata[i])._copy_metadatum(&self.c_metadata_array.metadata[i])
+ grpc_init()
+ grpc_metadata_array_init(&self.c_metadata_array)
def __dealloc__(self):
with nogil:
- # this frees the allocated memory for the grpc_metadata_array (although
- # it'd be nice if that were documented somewhere...)
- # TODO(atash): document this in the C core
grpc_metadata_array_destroy(&self.c_metadata_array)
grpc_shutdown()
@@ -487,27 +510,13 @@ cdef class Metadata:
def __getitem__(self, size_t i):
if i >= self.c_metadata_array.count:
- raise IndexError
+ raise IndexError()
key = _slice_bytes(self.c_metadata_array.metadata[i].key)
value = _slice_bytes(self.c_metadata_array.metadata[i].value)
return Metadatum(key=key, value=value)
def __iter__(self):
- return _MetadataIterator(self)
-
- cdef void _claim_slice_ownership(self):
- cdef grpc_metadata_array new_c_metadata_array
- grpc_metadata_array_init(&new_c_metadata_array)
- new_c_metadata_array.metadata = <grpc_metadata *>gpr_malloc(
- self.c_metadata_array.count*sizeof(grpc_metadata))
- new_c_metadata_array.count = self.c_metadata_array.count
- for i in range(self.c_metadata_array.count):
- new_c_metadata_array.metadata[i].key = _copy_slice(
- self.c_metadata_array.metadata[i].key)
- new_c_metadata_array.metadata[i].value = _copy_slice(
- self.c_metadata_array.metadata[i].value)
- grpc_metadata_array_destroy(&self.c_metadata_array)
- self.c_metadata_array = new_c_metadata_array
+ return _MetadataIterator(self.c_metadata_array.count, self)
cdef class Operation:
@@ -547,14 +556,13 @@ cdef class Operation:
if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and
self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT):
raise TypeError("self must be an operation receiving metadata")
- return self._received_metadata
-
- @property
- def received_metadata_or_none(self):
- if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and
- self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT):
- return None
- return self._received_metadata
+ # TODO(https://github.com/grpc/grpc/issues/7950): Drop the "all Cython
+ # objects must be legitimate for use from Python at any time" policy in
+ # place today, shift the policy toward "Operation objects are only usable
+ # while their calls are active", and move this making-a-copy-because-this-
+ # data-needs-to-live-much-longer-than-the-call-from-which-it-arose to the
+ # lowest Python layer.
+ return tuple(self._received_metadata)
@property
def received_status_code(self):
@@ -601,9 +609,8 @@ def operation_send_initial_metadata(Metadata metadata, int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
op.c_op.flags = flags
- op.c_op.data.send_initial_metadata.count = metadata.c_metadata_array.count
- op.c_op.data.send_initial_metadata.metadata = (
- metadata.c_metadata_array.metadata)
+ op.c_op.data.send_initial_metadata.count = metadata.c_count
+ op.c_op.data.send_initial_metadata.metadata = metadata.c_metadata
op.references.append(metadata)
op.is_valid = True
return op
@@ -631,9 +638,8 @@ def operation_send_status_from_server(
op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
op.c_op.flags = flags
op.c_op.data.send_status_from_server.trailing_metadata_count = (
- metadata.c_metadata_array.count)
- op.c_op.data.send_status_from_server.trailing_metadata = (
- metadata.c_metadata_array.metadata)
+ metadata.c_count)
+ op.c_op.data.send_status_from_server.trailing_metadata = metadata.c_metadata
op.c_op.data.send_status_from_server.status = code
grpc_slice_unref(op._status_details)
op._status_details = _slice_from_bytes(details)
@@ -646,7 +652,7 @@ def operation_receive_initial_metadata(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
op.c_op.flags = flags
- op._received_metadata = Metadata([])
+ op._received_metadata = MetadataArray()
op.c_op.data.receive_initial_metadata.receive_initial_metadata = (
&op._received_metadata.c_metadata_array)
op.is_valid = True
@@ -669,7 +675,7 @@ def operation_receive_status_on_client(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
op.c_op.flags = flags
- op._received_metadata = Metadata([])
+ op._received_metadata = MetadataArray()
op.c_op.data.receive_status_on_client.trailing_metadata = (
&op._received_metadata.c_metadata_array)
op.c_op.data.receive_status_on_client.status = (
@@ -706,7 +712,7 @@ cdef class _OperationsIterator:
self.i = self.i + 1
return result
else:
- raise StopIteration
+ raise StopIteration()
cdef class Operations:
@@ -768,7 +774,7 @@ cdef class CompressionOptions:
def compression_algorithm_name(grpc_compression_algorithm algorithm):
- cdef char* name
+ cdef const char* name
with nogil:
grpc_compression_algorithm_name(algorithm, &name)
# Let Cython do the right thing with string casting
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index dd276fd57b..b8db27469f 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -44,7 +44,7 @@ cdef class Server:
cdef OperationTag operation_tag = OperationTag(tag)
operation_tag.operation_call = Call()
operation_tag.request_call_details = CallDetails()
- operation_tag.request_metadata = Metadata([])
+ operation_tag.request_metadata = MetadataArray()
operation_tag.references.extend([self, call_queue, server_queue])
operation_tag.is_new_request = True
operation_tag.batch_operations = Operations([])
diff --git a/src/python/grpcio/grpc/_grpcio_metadata.py b/src/python/grpcio/grpc/_grpcio_metadata.py
index a4eb358c4e..0887ac1722 100644
--- a/src/python/grpcio/grpc/_grpcio_metadata.py
+++ b/src/python/grpcio/grpc/_grpcio_metadata.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc/_grpcio_metadata.py.template`!!!
-__version__ = """1.7.0.dev0"""
+__version__ = """1.8.0.dev0"""
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 1cbf345ab6..bb7d990078 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -15,312 +15,301 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
- 'src/core/lib/profiling/basic_timers.c',
- 'src/core/lib/profiling/stap_timers.c',
- 'src/core/lib/support/alloc.c',
- 'src/core/lib/support/arena.c',
- 'src/core/lib/support/atm.c',
- 'src/core/lib/support/avl.c',
- 'src/core/lib/support/backoff.c',
- 'src/core/lib/support/cmdline.c',
- 'src/core/lib/support/cpu_iphone.c',
- 'src/core/lib/support/cpu_linux.c',
- 'src/core/lib/support/cpu_posix.c',
- 'src/core/lib/support/cpu_windows.c',
- 'src/core/lib/support/env_linux.c',
- 'src/core/lib/support/env_posix.c',
- 'src/core/lib/support/env_windows.c',
- 'src/core/lib/support/histogram.c',
- 'src/core/lib/support/host_port.c',
- 'src/core/lib/support/log.c',
- 'src/core/lib/support/log_android.c',
- 'src/core/lib/support/log_linux.c',
- 'src/core/lib/support/log_posix.c',
- 'src/core/lib/support/log_windows.c',
- 'src/core/lib/support/mpscq.c',
- 'src/core/lib/support/murmur_hash.c',
- 'src/core/lib/support/stack_lockfree.c',
- 'src/core/lib/support/string.c',
- 'src/core/lib/support/string_posix.c',
- 'src/core/lib/support/string_util_windows.c',
- 'src/core/lib/support/string_windows.c',
- 'src/core/lib/support/subprocess_posix.c',
- 'src/core/lib/support/subprocess_windows.c',
- 'src/core/lib/support/sync.c',
- 'src/core/lib/support/sync_posix.c',
- 'src/core/lib/support/sync_windows.c',
- 'src/core/lib/support/thd.c',
- 'src/core/lib/support/thd_posix.c',
- 'src/core/lib/support/thd_windows.c',
- 'src/core/lib/support/time.c',
- 'src/core/lib/support/time_posix.c',
- 'src/core/lib/support/time_precise.c',
- 'src/core/lib/support/time_windows.c',
- 'src/core/lib/support/tls_pthread.c',
- 'src/core/lib/support/tmpfile_msys.c',
- 'src/core/lib/support/tmpfile_posix.c',
- 'src/core/lib/support/tmpfile_windows.c',
- 'src/core/lib/support/wrap_memcpy.c',
- 'src/core/lib/surface/init.c',
- 'src/core/lib/channel/channel_args.c',
- 'src/core/lib/channel/channel_stack.c',
- 'src/core/lib/channel/channel_stack_builder.c',
- 'src/core/lib/channel/connected_channel.c',
- 'src/core/lib/channel/handshaker.c',
- 'src/core/lib/channel/handshaker_factory.c',
- 'src/core/lib/channel/handshaker_registry.c',
- 'src/core/lib/compression/compression.c',
- 'src/core/lib/compression/message_compress.c',
- 'src/core/lib/compression/stream_compression.c',
- 'src/core/lib/debug/stats.c',
- 'src/core/lib/debug/stats_data.c',
- 'src/core/lib/http/format_request.c',
- 'src/core/lib/http/httpcli.c',
- 'src/core/lib/http/parser.c',
- 'src/core/lib/iomgr/closure.c',
- 'src/core/lib/iomgr/combiner.c',
- 'src/core/lib/iomgr/endpoint.c',
- 'src/core/lib/iomgr/endpoint_pair_posix.c',
- 'src/core/lib/iomgr/endpoint_pair_uv.c',
- 'src/core/lib/iomgr/endpoint_pair_windows.c',
- 'src/core/lib/iomgr/error.c',
- 'src/core/lib/iomgr/ev_epoll1_linux.c',
- 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c',
- 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c',
- 'src/core/lib/iomgr/ev_epollex_linux.c',
- 'src/core/lib/iomgr/ev_epollsig_linux.c',
- 'src/core/lib/iomgr/ev_poll_posix.c',
- 'src/core/lib/iomgr/ev_posix.c',
- 'src/core/lib/iomgr/ev_windows.c',
- 'src/core/lib/iomgr/exec_ctx.c',
- 'src/core/lib/iomgr/executor.c',
- 'src/core/lib/iomgr/gethostname_fallback.c',
- 'src/core/lib/iomgr/gethostname_host_name_max.c',
- 'src/core/lib/iomgr/gethostname_sysconf.c',
- 'src/core/lib/iomgr/iocp_windows.c',
- 'src/core/lib/iomgr/iomgr.c',
- 'src/core/lib/iomgr/iomgr_posix.c',
- 'src/core/lib/iomgr/iomgr_uv.c',
- 'src/core/lib/iomgr/iomgr_windows.c',
- 'src/core/lib/iomgr/is_epollexclusive_available.c',
- 'src/core/lib/iomgr/load_file.c',
- 'src/core/lib/iomgr/lockfree_event.c',
- 'src/core/lib/iomgr/network_status_tracker.c',
- 'src/core/lib/iomgr/polling_entity.c',
- 'src/core/lib/iomgr/pollset_set_uv.c',
- 'src/core/lib/iomgr/pollset_set_windows.c',
- 'src/core/lib/iomgr/pollset_uv.c',
- 'src/core/lib/iomgr/pollset_windows.c',
- 'src/core/lib/iomgr/resolve_address_posix.c',
- 'src/core/lib/iomgr/resolve_address_uv.c',
- 'src/core/lib/iomgr/resolve_address_windows.c',
- 'src/core/lib/iomgr/resource_quota.c',
- 'src/core/lib/iomgr/sockaddr_utils.c',
- 'src/core/lib/iomgr/socket_factory_posix.c',
- 'src/core/lib/iomgr/socket_mutator.c',
- 'src/core/lib/iomgr/socket_utils_common_posix.c',
- 'src/core/lib/iomgr/socket_utils_linux.c',
- 'src/core/lib/iomgr/socket_utils_posix.c',
- 'src/core/lib/iomgr/socket_utils_uv.c',
- 'src/core/lib/iomgr/socket_utils_windows.c',
- 'src/core/lib/iomgr/socket_windows.c',
- 'src/core/lib/iomgr/tcp_client_posix.c',
- 'src/core/lib/iomgr/tcp_client_uv.c',
- 'src/core/lib/iomgr/tcp_client_windows.c',
- 'src/core/lib/iomgr/tcp_posix.c',
- 'src/core/lib/iomgr/tcp_server_posix.c',
- 'src/core/lib/iomgr/tcp_server_utils_posix_common.c',
- 'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c',
- 'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c',
- 'src/core/lib/iomgr/tcp_server_uv.c',
- 'src/core/lib/iomgr/tcp_server_windows.c',
- 'src/core/lib/iomgr/tcp_uv.c',
- 'src/core/lib/iomgr/tcp_windows.c',
- 'src/core/lib/iomgr/time_averaged_stats.c',
- 'src/core/lib/iomgr/timer_generic.c',
- 'src/core/lib/iomgr/timer_heap.c',
- 'src/core/lib/iomgr/timer_manager.c',
- 'src/core/lib/iomgr/timer_uv.c',
- 'src/core/lib/iomgr/udp_server.c',
- 'src/core/lib/iomgr/unix_sockets_posix.c',
- 'src/core/lib/iomgr/unix_sockets_posix_noop.c',
- 'src/core/lib/iomgr/wakeup_fd_cv.c',
- 'src/core/lib/iomgr/wakeup_fd_eventfd.c',
- 'src/core/lib/iomgr/wakeup_fd_nospecial.c',
- 'src/core/lib/iomgr/wakeup_fd_pipe.c',
- 'src/core/lib/iomgr/wakeup_fd_posix.c',
- 'src/core/lib/json/json.c',
- 'src/core/lib/json/json_reader.c',
- 'src/core/lib/json/json_string.c',
- 'src/core/lib/json/json_writer.c',
- 'src/core/lib/slice/b64.c',
- 'src/core/lib/slice/percent_encoding.c',
- 'src/core/lib/slice/slice.c',
- 'src/core/lib/slice/slice_buffer.c',
- 'src/core/lib/slice/slice_hash_table.c',
- 'src/core/lib/slice/slice_intern.c',
- 'src/core/lib/slice/slice_string_helpers.c',
- 'src/core/lib/surface/alarm.c',
- 'src/core/lib/surface/api_trace.c',
- 'src/core/lib/surface/byte_buffer.c',
- 'src/core/lib/surface/byte_buffer_reader.c',
- 'src/core/lib/surface/call.c',
- 'src/core/lib/surface/call_details.c',
- 'src/core/lib/surface/call_log_batch.c',
- 'src/core/lib/surface/channel.c',
- 'src/core/lib/surface/channel_init.c',
- 'src/core/lib/surface/channel_ping.c',
- 'src/core/lib/surface/channel_stack_type.c',
- 'src/core/lib/surface/completion_queue.c',
- 'src/core/lib/surface/completion_queue_factory.c',
- 'src/core/lib/surface/event_string.c',
+ 'src/core/lib/profiling/basic_timers.cc',
+ 'src/core/lib/profiling/stap_timers.cc',
+ 'src/core/lib/support/alloc.cc',
+ 'src/core/lib/support/arena.cc',
+ 'src/core/lib/support/atm.cc',
+ 'src/core/lib/support/avl.cc',
+ 'src/core/lib/support/cmdline.cc',
+ 'src/core/lib/support/cpu_iphone.cc',
+ 'src/core/lib/support/cpu_linux.cc',
+ 'src/core/lib/support/cpu_posix.cc',
+ 'src/core/lib/support/cpu_windows.cc',
+ 'src/core/lib/support/env_linux.cc',
+ 'src/core/lib/support/env_posix.cc',
+ 'src/core/lib/support/env_windows.cc',
+ 'src/core/lib/support/histogram.cc',
+ 'src/core/lib/support/host_port.cc',
+ 'src/core/lib/support/log.cc',
+ 'src/core/lib/support/log_android.cc',
+ 'src/core/lib/support/log_linux.cc',
+ 'src/core/lib/support/log_posix.cc',
+ 'src/core/lib/support/log_windows.cc',
+ 'src/core/lib/support/mpscq.cc',
+ 'src/core/lib/support/murmur_hash.cc',
+ 'src/core/lib/support/stack_lockfree.cc',
+ 'src/core/lib/support/string.cc',
+ 'src/core/lib/support/string_posix.cc',
+ 'src/core/lib/support/string_util_windows.cc',
+ 'src/core/lib/support/string_windows.cc',
+ 'src/core/lib/support/subprocess_posix.cc',
+ 'src/core/lib/support/subprocess_windows.cc',
+ 'src/core/lib/support/sync.cc',
+ 'src/core/lib/support/sync_posix.cc',
+ 'src/core/lib/support/sync_windows.cc',
+ 'src/core/lib/support/thd.cc',
+ 'src/core/lib/support/thd_posix.cc',
+ 'src/core/lib/support/thd_windows.cc',
+ 'src/core/lib/support/time.cc',
+ 'src/core/lib/support/time_posix.cc',
+ 'src/core/lib/support/time_precise.cc',
+ 'src/core/lib/support/time_windows.cc',
+ 'src/core/lib/support/tls_pthread.cc',
+ 'src/core/lib/support/tmpfile_msys.cc',
+ 'src/core/lib/support/tmpfile_posix.cc',
+ 'src/core/lib/support/tmpfile_windows.cc',
+ 'src/core/lib/support/wrap_memcpy.cc',
+ 'src/core/lib/surface/init.cc',
+ 'src/core/lib/backoff/backoff.cc',
+ 'src/core/lib/channel/channel_args.cc',
+ 'src/core/lib/channel/channel_stack.cc',
+ 'src/core/lib/channel/channel_stack_builder.cc',
+ 'src/core/lib/channel/connected_channel.cc',
+ 'src/core/lib/channel/handshaker.cc',
+ 'src/core/lib/channel/handshaker_factory.cc',
+ 'src/core/lib/channel/handshaker_registry.cc',
+ 'src/core/lib/compression/compression.cc',
+ 'src/core/lib/compression/message_compress.cc',
+ 'src/core/lib/compression/stream_compression.cc',
+ 'src/core/lib/compression/stream_compression_gzip.cc',
+ 'src/core/lib/compression/stream_compression_identity.cc',
+ 'src/core/lib/debug/stats.cc',
+ 'src/core/lib/debug/stats_data.cc',
+ 'src/core/lib/http/format_request.cc',
+ 'src/core/lib/http/httpcli.cc',
+ 'src/core/lib/http/parser.cc',
+ 'src/core/lib/iomgr/call_combiner.cc',
+ 'src/core/lib/iomgr/closure.cc',
+ 'src/core/lib/iomgr/combiner.cc',
+ 'src/core/lib/iomgr/endpoint.cc',
+ 'src/core/lib/iomgr/endpoint_pair_posix.cc',
+ 'src/core/lib/iomgr/endpoint_pair_uv.cc',
+ 'src/core/lib/iomgr/endpoint_pair_windows.cc',
+ 'src/core/lib/iomgr/error.cc',
+ 'src/core/lib/iomgr/ev_epoll1_linux.cc',
+ 'src/core/lib/iomgr/ev_epollex_linux.cc',
+ 'src/core/lib/iomgr/ev_epollsig_linux.cc',
+ 'src/core/lib/iomgr/ev_poll_posix.cc',
+ 'src/core/lib/iomgr/ev_posix.cc',
+ 'src/core/lib/iomgr/ev_windows.cc',
+ 'src/core/lib/iomgr/exec_ctx.cc',
+ 'src/core/lib/iomgr/executor.cc',
+ 'src/core/lib/iomgr/gethostname_fallback.cc',
+ 'src/core/lib/iomgr/gethostname_host_name_max.cc',
+ 'src/core/lib/iomgr/gethostname_sysconf.cc',
+ 'src/core/lib/iomgr/iocp_windows.cc',
+ 'src/core/lib/iomgr/iomgr.cc',
+ 'src/core/lib/iomgr/iomgr_posix.cc',
+ 'src/core/lib/iomgr/iomgr_uv.cc',
+ 'src/core/lib/iomgr/iomgr_windows.cc',
+ 'src/core/lib/iomgr/is_epollexclusive_available.cc',
+ 'src/core/lib/iomgr/load_file.cc',
+ 'src/core/lib/iomgr/lockfree_event.cc',
+ 'src/core/lib/iomgr/network_status_tracker.cc',
+ 'src/core/lib/iomgr/polling_entity.cc',
+ 'src/core/lib/iomgr/pollset_set_uv.cc',
+ 'src/core/lib/iomgr/pollset_set_windows.cc',
+ 'src/core/lib/iomgr/pollset_uv.cc',
+ 'src/core/lib/iomgr/pollset_windows.cc',
+ 'src/core/lib/iomgr/resolve_address_posix.cc',
+ 'src/core/lib/iomgr/resolve_address_uv.cc',
+ 'src/core/lib/iomgr/resolve_address_windows.cc',
+ 'src/core/lib/iomgr/resource_quota.cc',
+ 'src/core/lib/iomgr/sockaddr_utils.cc',
+ 'src/core/lib/iomgr/socket_factory_posix.cc',
+ 'src/core/lib/iomgr/socket_mutator.cc',
+ 'src/core/lib/iomgr/socket_utils_common_posix.cc',
+ 'src/core/lib/iomgr/socket_utils_linux.cc',
+ 'src/core/lib/iomgr/socket_utils_posix.cc',
+ 'src/core/lib/iomgr/socket_utils_uv.cc',
+ 'src/core/lib/iomgr/socket_utils_windows.cc',
+ 'src/core/lib/iomgr/socket_windows.cc',
+ 'src/core/lib/iomgr/tcp_client_posix.cc',
+ 'src/core/lib/iomgr/tcp_client_uv.cc',
+ 'src/core/lib/iomgr/tcp_client_windows.cc',
+ 'src/core/lib/iomgr/tcp_posix.cc',
+ 'src/core/lib/iomgr/tcp_server_posix.cc',
+ 'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
+ 'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
+ 'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
+ 'src/core/lib/iomgr/tcp_server_uv.cc',
+ 'src/core/lib/iomgr/tcp_server_windows.cc',
+ 'src/core/lib/iomgr/tcp_uv.cc',
+ 'src/core/lib/iomgr/tcp_windows.cc',
+ 'src/core/lib/iomgr/time_averaged_stats.cc',
+ 'src/core/lib/iomgr/timer_generic.cc',
+ 'src/core/lib/iomgr/timer_heap.cc',
+ 'src/core/lib/iomgr/timer_manager.cc',
+ 'src/core/lib/iomgr/timer_uv.cc',
+ 'src/core/lib/iomgr/udp_server.cc',
+ 'src/core/lib/iomgr/unix_sockets_posix.cc',
+ 'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
+ 'src/core/lib/iomgr/wakeup_fd_cv.cc',
+ 'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
+ 'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
+ 'src/core/lib/iomgr/wakeup_fd_pipe.cc',
+ 'src/core/lib/iomgr/wakeup_fd_posix.cc',
+ 'src/core/lib/json/json.cc',
+ 'src/core/lib/json/json_reader.cc',
+ 'src/core/lib/json/json_string.cc',
+ 'src/core/lib/json/json_writer.cc',
+ 'src/core/lib/slice/b64.cc',
+ 'src/core/lib/slice/percent_encoding.cc',
+ 'src/core/lib/slice/slice.cc',
+ 'src/core/lib/slice/slice_buffer.cc',
+ 'src/core/lib/slice/slice_hash_table.cc',
+ 'src/core/lib/slice/slice_intern.cc',
+ 'src/core/lib/slice/slice_string_helpers.cc',
+ 'src/core/lib/surface/alarm.cc',
+ 'src/core/lib/surface/api_trace.cc',
+ 'src/core/lib/surface/byte_buffer.cc',
+ 'src/core/lib/surface/byte_buffer_reader.cc',
+ 'src/core/lib/surface/call.cc',
+ 'src/core/lib/surface/call_details.cc',
+ 'src/core/lib/surface/call_log_batch.cc',
+ 'src/core/lib/surface/channel.cc',
+ 'src/core/lib/surface/channel_init.cc',
+ 'src/core/lib/surface/channel_ping.cc',
+ 'src/core/lib/surface/channel_stack_type.cc',
+ 'src/core/lib/surface/completion_queue.cc',
+ 'src/core/lib/surface/completion_queue_factory.cc',
+ 'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/lame_client.cc',
- 'src/core/lib/surface/metadata_array.c',
- 'src/core/lib/surface/server.c',
- 'src/core/lib/surface/validate_metadata.c',
- 'src/core/lib/surface/version.c',
- 'src/core/lib/transport/bdp_estimator.c',
- 'src/core/lib/transport/byte_stream.c',
- 'src/core/lib/transport/connectivity_state.c',
- 'src/core/lib/transport/error_utils.c',
- 'src/core/lib/transport/metadata.c',
- 'src/core/lib/transport/metadata_batch.c',
- 'src/core/lib/transport/pid_controller.c',
- 'src/core/lib/transport/service_config.c',
- 'src/core/lib/transport/static_metadata.c',
- 'src/core/lib/transport/status_conversion.c',
- 'src/core/lib/transport/timeout_encoding.c',
- 'src/core/lib/transport/transport.c',
- 'src/core/lib/transport/transport_op_string.c',
- 'src/core/lib/debug/trace.c',
- 'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
- 'src/core/ext/transport/chttp2/transport/bin_decoder.c',
- 'src/core/ext/transport/chttp2/transport/bin_encoder.c',
- 'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
- 'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
- 'src/core/ext/transport/chttp2/transport/flow_control.c',
- 'src/core/ext/transport/chttp2/transport/frame_data.c',
- 'src/core/ext/transport/chttp2/transport/frame_goaway.c',
- 'src/core/ext/transport/chttp2/transport/frame_ping.c',
- 'src/core/ext/transport/chttp2/transport/frame_rst_stream.c',
- 'src/core/ext/transport/chttp2/transport/frame_settings.c',
- 'src/core/ext/transport/chttp2/transport/frame_window_update.c',
- 'src/core/ext/transport/chttp2/transport/hpack_encoder.c',
- 'src/core/ext/transport/chttp2/transport/hpack_parser.c',
- 'src/core/ext/transport/chttp2/transport/hpack_table.c',
- 'src/core/ext/transport/chttp2/transport/http2_settings.c',
- 'src/core/ext/transport/chttp2/transport/huffsyms.c',
- 'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
- 'src/core/ext/transport/chttp2/transport/parsing.c',
- 'src/core/ext/transport/chttp2/transport/stream_lists.c',
- 'src/core/ext/transport/chttp2/transport/stream_map.c',
- 'src/core/ext/transport/chttp2/transport/varint.c',
- 'src/core/ext/transport/chttp2/transport/writing.c',
- 'src/core/ext/transport/chttp2/alpn/alpn.c',
- 'src/core/ext/filters/http/client/http_client_filter.c',
- 'src/core/ext/filters/http/http_filters_plugin.c',
- 'src/core/ext/filters/http/message_compress/message_compress_filter.c',
- 'src/core/ext/filters/http/server/http_server_filter.c',
- 'src/core/lib/http/httpcli_security_connector.c',
- 'src/core/lib/security/context/security_context.c',
- 'src/core/lib/security/credentials/composite/composite_credentials.c',
- 'src/core/lib/security/credentials/credentials.c',
- 'src/core/lib/security/credentials/credentials_metadata.c',
- 'src/core/lib/security/credentials/fake/fake_credentials.c',
- 'src/core/lib/security/credentials/google_default/credentials_generic.c',
- 'src/core/lib/security/credentials/google_default/google_default_credentials.c',
- 'src/core/lib/security/credentials/iam/iam_credentials.c',
- 'src/core/lib/security/credentials/jwt/json_token.c',
- 'src/core/lib/security/credentials/jwt/jwt_credentials.c',
- 'src/core/lib/security/credentials/jwt/jwt_verifier.c',
- 'src/core/lib/security/credentials/oauth2/oauth2_credentials.c',
- 'src/core/lib/security/credentials/plugin/plugin_credentials.c',
- 'src/core/lib/security/credentials/ssl/ssl_credentials.c',
- 'src/core/lib/security/transport/client_auth_filter.c',
- 'src/core/lib/security/transport/lb_targets_info.c',
- 'src/core/lib/security/transport/secure_endpoint.c',
- 'src/core/lib/security/transport/security_connector.c',
- 'src/core/lib/security/transport/security_handshaker.c',
- 'src/core/lib/security/transport/server_auth_filter.c',
- 'src/core/lib/security/transport/tsi_error.c',
- 'src/core/lib/security/util/json_util.c',
- 'src/core/lib/surface/init_secure.c',
- 'src/core/tsi/fake_transport_security.c',
- 'src/core/tsi/gts_transport_security.c',
- 'src/core/tsi/ssl_transport_security.c',
- 'src/core/tsi/transport_security_grpc.c',
- 'src/core/tsi/transport_security.c',
- 'src/core/tsi/transport_security_adapter.c',
- 'src/core/ext/transport/chttp2/server/chttp2_server.c',
- 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
- 'src/core/ext/filters/client_channel/channel_connectivity.c',
- 'src/core/ext/filters/client_channel/client_channel.c',
- 'src/core/ext/filters/client_channel/client_channel_factory.c',
- 'src/core/ext/filters/client_channel/client_channel_plugin.c',
- 'src/core/ext/filters/client_channel/connector.c',
- 'src/core/ext/filters/client_channel/http_connect_handshaker.c',
- 'src/core/ext/filters/client_channel/http_proxy.c',
- 'src/core/ext/filters/client_channel/lb_policy.c',
- 'src/core/ext/filters/client_channel/lb_policy_factory.c',
- 'src/core/ext/filters/client_channel/lb_policy_registry.c',
- 'src/core/ext/filters/client_channel/parse_address.c',
- 'src/core/ext/filters/client_channel/proxy_mapper.c',
- 'src/core/ext/filters/client_channel/proxy_mapper_registry.c',
- 'src/core/ext/filters/client_channel/resolver.c',
- 'src/core/ext/filters/client_channel/resolver_factory.c',
- 'src/core/ext/filters/client_channel/resolver_registry.c',
- 'src/core/ext/filters/client_channel/retry_throttle.c',
- 'src/core/ext/filters/client_channel/subchannel.c',
- 'src/core/ext/filters/client_channel/subchannel_index.c',
- 'src/core/ext/filters/client_channel/uri_parser.c',
- 'src/core/ext/filters/deadline/deadline_filter.c',
- 'src/core/ext/transport/chttp2/client/chttp2_connector.c',
- 'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
- 'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
- 'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
- 'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
- 'src/core/ext/transport/inproc/inproc_plugin.c',
- 'src/core/ext/transport/inproc/inproc_transport.c',
- 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c',
- 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c',
- 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c',
- 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c',
- 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c',
+ 'src/core/lib/surface/metadata_array.cc',
+ 'src/core/lib/surface/server.cc',
+ 'src/core/lib/surface/validate_metadata.cc',
+ 'src/core/lib/surface/version.cc',
+ 'src/core/lib/transport/bdp_estimator.cc',
+ 'src/core/lib/transport/byte_stream.cc',
+ 'src/core/lib/transport/connectivity_state.cc',
+ 'src/core/lib/transport/error_utils.cc',
+ 'src/core/lib/transport/metadata.cc',
+ 'src/core/lib/transport/metadata_batch.cc',
+ 'src/core/lib/transport/pid_controller.cc',
+ 'src/core/lib/transport/service_config.cc',
+ 'src/core/lib/transport/static_metadata.cc',
+ 'src/core/lib/transport/status_conversion.cc',
+ 'src/core/lib/transport/timeout_encoding.cc',
+ 'src/core/lib/transport/transport.cc',
+ 'src/core/lib/transport/transport_op_string.cc',
+ 'src/core/lib/debug/trace.cc',
+ 'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
+ 'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
+ 'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
+ 'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
+ 'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
+ 'src/core/ext/transport/chttp2/transport/flow_control.cc',
+ 'src/core/ext/transport/chttp2/transport/frame_data.cc',
+ 'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
+ 'src/core/ext/transport/chttp2/transport/frame_ping.cc',
+ 'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
+ 'src/core/ext/transport/chttp2/transport/frame_settings.cc',
+ 'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
+ 'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
+ 'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
+ 'src/core/ext/transport/chttp2/transport/hpack_table.cc',
+ 'src/core/ext/transport/chttp2/transport/http2_settings.cc',
+ 'src/core/ext/transport/chttp2/transport/huffsyms.cc',
+ 'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
+ 'src/core/ext/transport/chttp2/transport/parsing.cc',
+ 'src/core/ext/transport/chttp2/transport/stream_lists.cc',
+ 'src/core/ext/transport/chttp2/transport/stream_map.cc',
+ 'src/core/ext/transport/chttp2/transport/varint.cc',
+ 'src/core/ext/transport/chttp2/transport/writing.cc',
+ 'src/core/ext/transport/chttp2/alpn/alpn.cc',
+ 'src/core/ext/filters/http/client/http_client_filter.cc',
+ 'src/core/ext/filters/http/http_filters_plugin.cc',
+ 'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
+ 'src/core/ext/filters/http/server/http_server_filter.cc',
+ 'src/core/lib/http/httpcli_security_connector.cc',
+ 'src/core/lib/security/context/security_context.cc',
+ 'src/core/lib/security/credentials/composite/composite_credentials.cc',
+ 'src/core/lib/security/credentials/credentials.cc',
+ 'src/core/lib/security/credentials/credentials_metadata.cc',
+ 'src/core/lib/security/credentials/fake/fake_credentials.cc',
+ 'src/core/lib/security/credentials/google_default/credentials_generic.cc',
+ 'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
+ 'src/core/lib/security/credentials/iam/iam_credentials.cc',
+ 'src/core/lib/security/credentials/jwt/json_token.cc',
+ 'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
+ 'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
+ 'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
+ 'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
+ 'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
+ 'src/core/lib/security/transport/client_auth_filter.cc',
+ 'src/core/lib/security/transport/lb_targets_info.cc',
+ 'src/core/lib/security/transport/secure_endpoint.cc',
+ 'src/core/lib/security/transport/security_connector.cc',
+ 'src/core/lib/security/transport/security_handshaker.cc',
+ 'src/core/lib/security/transport/server_auth_filter.cc',
+ 'src/core/lib/security/transport/tsi_error.cc',
+ 'src/core/lib/security/util/json_util.cc',
+ 'src/core/lib/surface/init_secure.cc',
+ 'src/core/tsi/fake_transport_security.cc',
+ 'src/core/tsi/gts_transport_security.cc',
+ 'src/core/tsi/ssl_transport_security.cc',
+ 'src/core/tsi/transport_security_grpc.cc',
+ 'src/core/tsi/transport_security.cc',
+ 'src/core/tsi/transport_security_adapter.cc',
+ 'src/core/ext/transport/chttp2/server/chttp2_server.cc',
+ 'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
+ 'src/core/ext/filters/client_channel/backup_poller.cc',
+ 'src/core/ext/filters/client_channel/channel_connectivity.cc',
+ 'src/core/ext/filters/client_channel/client_channel.cc',
+ 'src/core/ext/filters/client_channel/client_channel_factory.cc',
+ 'src/core/ext/filters/client_channel/client_channel_plugin.cc',
+ 'src/core/ext/filters/client_channel/connector.cc',
+ 'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
+ 'src/core/ext/filters/client_channel/http_proxy.cc',
+ 'src/core/ext/filters/client_channel/lb_policy.cc',
+ 'src/core/ext/filters/client_channel/lb_policy_factory.cc',
+ 'src/core/ext/filters/client_channel/lb_policy_registry.cc',
+ 'src/core/ext/filters/client_channel/parse_address.cc',
+ 'src/core/ext/filters/client_channel/proxy_mapper.cc',
+ 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
+ 'src/core/ext/filters/client_channel/resolver.cc',
+ 'src/core/ext/filters/client_channel/resolver_factory.cc',
+ 'src/core/ext/filters/client_channel/resolver_registry.cc',
+ 'src/core/ext/filters/client_channel/retry_throttle.cc',
+ 'src/core/ext/filters/client_channel/subchannel.cc',
+ 'src/core/ext/filters/client_channel/subchannel_index.cc',
+ 'src/core/ext/filters/client_channel/uri_parser.cc',
+ 'src/core/ext/filters/deadline/deadline_filter.cc',
+ 'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
+ 'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
+ 'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
+ 'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
+ 'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
+ 'src/core/ext/transport/inproc/inproc_plugin.cc',
+ 'src/core/ext/transport/inproc/inproc_transport.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
- 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c',
- 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c',
- 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c',
- 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c',
- 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c',
- 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c',
- 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c',
- 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c',
- 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c',
- 'src/core/ext/filters/load_reporting/load_reporting.c',
- 'src/core/ext/filters/load_reporting/load_reporting_filter.c',
- 'src/core/ext/census/base_resources.c',
- 'src/core/ext/census/context.c',
- 'src/core/ext/census/gen/census.pb.c',
- 'src/core/ext/census/gen/trace_context.pb.c',
- 'src/core/ext/census/grpc_context.c',
- 'src/core/ext/census/grpc_filter.c',
- 'src/core/ext/census/grpc_plugin.c',
- 'src/core/ext/census/initialize.c',
- 'src/core/ext/census/intrusive_hash_map.c',
- 'src/core/ext/census/mlog.c',
- 'src/core/ext/census/operation.c',
- 'src/core/ext/census/placeholders.c',
- 'src/core/ext/census/resource.c',
- 'src/core/ext/census/trace_context.c',
- 'src/core/ext/census/tracing.c',
- 'src/core/ext/filters/max_age/max_age_filter.c',
- 'src/core/ext/filters/message_size/message_size_filter.c',
- 'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c',
- 'src/core/ext/filters/workarounds/workaround_utils.c',
- 'src/core/plugin_registry/grpc_plugin_registry.c',
+ 'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
+ 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
+ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
+ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
+ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
+ 'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc',
+ 'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
+ 'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
+ 'src/core/ext/filters/load_reporting/server_load_reporting_filter.cc',
+ 'src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc',
+ 'src/core/ext/census/grpc_context.cc',
+ 'src/core/ext/filters/max_age/max_age_filter.cc',
+ 'src/core/ext/filters/message_size/message_size_filter.cc',
+ 'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
+ 'src/core/ext/filters/workarounds/workaround_utils.cc',
+ 'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/boringssl/err_data.c',
'third_party/boringssl/crypto/aes/aes.c',
'third_party/boringssl/crypto/aes/key_wrap.c',
diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py
index 3194a893d7..61c4157375 100644
--- a/src/python/grpcio/grpc_version.py
+++ b/src/python/grpcio/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
-VERSION='1.7.0.dev0'
+VERSION='1.8.0.dev0'
diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py
index 510bf422a0..f2395eb26c 100644
--- a/src/python/grpcio/support.py
+++ b/src/python/grpcio/support.py
@@ -94,7 +94,7 @@ def diagnose_attribute_error(build_ext, error):
_ERROR_DIAGNOSES = {
errors.CompileError: diagnose_compile_error,
- AttributeError: diagnose_attribute_error
+ AttributeError: diagnose_attribute_error,
}
@@ -102,8 +102,10 @@ def diagnose_build_ext_error(build_ext, error, formatted):
diagnostic = _ERROR_DIAGNOSES.get(type(error))
if diagnostic is None:
raise commands.CommandError(
- "\n\nWe could not diagnose your build failure. Please file an issue at "
- "http://www.github.com/grpc/grpc with `[Python install]` in the title."
- "\n\n{}".format(formatted))
+ "\n\nWe could not diagnose your build failure. If you are unable to "
+ "proceed, please file an issue at http://www.github.com/grpc/grpc "
+ "with `[Python install]` in the title; please attach the whole log "
+ "(including everything that may have appeared above the Python "
+ "backtrace).\n\n{}".format(formatted))
else:
diagnostic(build_ext, error)
diff --git a/src/python/grpcio_health_checking/grpc_version.py b/src/python/grpcio_health_checking/grpc_version.py
index ef68bad17a..889297f020 100644
--- a/src/python/grpcio_health_checking/grpc_version.py
+++ b/src/python/grpcio_health_checking/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!!
-VERSION='1.7.0.dev0'
+VERSION='1.8.0.dev0'
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index 0299b4cca9..1f5e9c5130 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -34,7 +34,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
-],
+]
PACKAGE_DIRECTORIES = {
'': '.',
diff --git a/src/python/grpcio_reflection/grpc_version.py b/src/python/grpcio_reflection/grpc_version.py
index 55ab959cc5..192f4cc217 100644
--- a/src/python/grpcio_reflection/grpc_version.py
+++ b/src/python/grpcio_reflection/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
-VERSION='1.7.0.dev0'
+VERSION='1.8.0.dev0'
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index bed2311b59..9360550afb 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -35,7 +35,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
-],
+]
PACKAGE_DIRECTORIES = {
'': '.',
diff --git a/src/python/grpcio_testing/grpc_testing/__init__.py b/src/python/grpcio_testing/grpc_testing/__init__.py
index 917e11808e..994274500c 100644
--- a/src/python/grpcio_testing/grpc_testing/__init__.py
+++ b/src/python/grpcio_testing/grpc_testing/__init__.py
@@ -213,7 +213,7 @@ class StreamStreamChannelRpc(six.with_metaclass(abc.ABCMeta)):
raise NotImplementedError()
-class Channel(six.with_metaclass(abc.ABCMeta), grpc.Channel):
+class Channel(six.with_metaclass(abc.ABCMeta, grpc.Channel)):
"""A grpc.Channel double with which to test a system that invokes RPCs."""
@abc.abstractmethod
diff --git a/src/python/grpcio_testing/grpc_version.py b/src/python/grpcio_testing/grpc_version.py
index 41a75d46f6..83470c2825 100644
--- a/src/python/grpcio_testing/grpc_version.py
+++ b/src/python/grpcio_testing/grpc_version.py
@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
+# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
-VERSION = '1.5.0.dev0'
+VERSION='1.8.0.dev0'
diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py
index 162200112a..93f84572b7 100644
--- a/src/python/grpcio_tests/commands.py
+++ b/src/python/grpcio_tests/commands.py
@@ -67,55 +67,6 @@ class GatherProto(setuptools.Command):
open(path, 'a').close()
-class BuildProtoModules(setuptools.Command):
- """Command to generate project *_pb2.py modules from proto files."""
-
- description = 'build protobuf modules'
- user_options = [
- ('include=', None, 'path patterns to include in protobuf generation'),
- ('exclude=', None, 'path patterns to exclude from protobuf generation')
- ]
-
- def initialize_options(self):
- self.exclude = None
- self.include = r'.*\.proto$'
-
- def finalize_options(self):
- pass
-
- def run(self):
- import grpc_tools.protoc as protoc
-
- include_regex = re.compile(self.include)
- exclude_regex = re.compile(self.exclude) if self.exclude else None
- paths = []
- for walk_root, directories, filenames in os.walk(PROTO_STEM):
- for filename in filenames:
- path = os.path.join(walk_root, filename)
- if include_regex.match(path) and not (
- exclude_regex and exclude_regex.match(path)):
- paths.append(path)
-
- # TODO(kpayson): It would be nice to do this in a batch command,
- # but we currently have name conflicts in src/proto
- for path in paths:
- command = [
- 'grpc_tools.protoc',
- '-I {}'.format(PROTO_STEM),
- '--python_out={}'.format(PROTO_STEM),
- '--grpc_python_out={}'.format(PROTO_STEM),
- ] + [path]
- if protoc.main(command) != 0:
- sys.stderr.write(
- 'warning: Command:\n{}\nFailed'.format(command))
-
- # Generated proto directories dont include __init__.py, but
- # these are needed for python package resolution
- for walk_root, _, _ in os.walk(PROTO_STEM):
- path = os.path.join(walk_root, '__init__.py')
- open(path, 'a').close()
-
-
class BuildPy(build_py.build_py):
"""Custom project build command."""
diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py
index 9e54dc9f75..7065edd3bf 100644
--- a/src/python/grpcio_tests/grpc_version.py
+++ b/src/python/grpcio_tests/grpc_version.py
@@ -14,4 +14,4 @@
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
-VERSION='1.7.0.dev0'
+VERSION='1.8.0.dev0'
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py b/src/python/grpcio_tests/tests/_sanity/__init__.py
index 5772620b60..5772620b60 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py
+++ b/src/python/grpcio_tests/tests/_sanity/__init__.py
diff --git a/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py b/src/python/grpcio_tests/tests/_sanity/_sanity_test.py
index 19bc8801eb..b4079850ff 100644
--- a/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py
+++ b/src/python/grpcio_tests/tests/_sanity/_sanity_test.py
@@ -21,24 +21,25 @@ import six
import tests
-class Sanity(unittest.TestCase):
+class SanityTest(unittest.TestCase):
+
+ maxDiff = 32768
def testTestsJsonUpToDate(self):
"""Autodiscovers all test suites and checks that tests.json is up to date"""
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
- test_suite_names = [
+ test_suite_names = sorted({
test_case_class.id().rsplit('.', 1)[0]
for test_case_class in tests._loader.iterate_suite_cases(
loader.suite)
- ]
- test_suite_names = sorted(set(test_suite_names))
+ })
tests_json_string = pkg_resources.resource_string('tests', 'tests.json')
- if six.PY3:
- tests_json_string = tests_json_string.decode()
- tests_json = json.loads(tests_json_string)
- self.assertListEqual(test_suite_names, tests_json)
+ tests_json = json.loads(tests_json_string.decode()
+ if six.PY3 else tests_json_string)
+
+ self.assertSequenceEqual(tests_json, test_suite_names)
if __name__ == '__main__':
diff --git a/src/python/grpcio_tests/tests/http2/negative_http2_client.py b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
index 6d8a6bce77..8dab5b67f1 100644
--- a/src/python/grpcio_tests/tests/http2/negative_http2_client.py
+++ b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
@@ -17,7 +17,7 @@ import argparse
import grpc
import time
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
from src.proto.grpc.testing import messages_pb2
@@ -147,7 +147,7 @@ def _stub(server_host, server_port):
target = '{}:{}'.format(server_host, server_port)
channel = grpc.insecure_channel(target)
grpc.channel_ready_future(channel).result()
- return test_pb2.TestServiceStub(channel)
+ return test_pb2_grpc.TestServiceStub(channel)
def main():
diff --git a/src/python/grpcio_tests/tests/interop/client.py b/src/python/grpcio_tests/tests/interop/client.py
index 47ae96472d..e520c08290 100644
--- a/src/python/grpcio_tests/tests/interop/client.py
+++ b/src/python/grpcio_tests/tests/interop/client.py
@@ -19,7 +19,7 @@ import os
from google import auth as google_auth
from google.auth import jwt as google_auth_jwt
import grpc
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import methods
from tests.interop import resources
@@ -106,9 +106,9 @@ def _stub(args):
else:
channel = grpc.insecure_channel(target)
if args.test_case == "unimplemented_service":
- return test_pb2.UnimplementedServiceStub(channel)
+ return test_pb2_grpc.UnimplementedServiceStub(channel)
else:
- return test_pb2.TestServiceStub(channel)
+ return test_pb2_grpc.TestServiceStub(channel)
def _test_case_from_arg(test_case_arg):
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
index 71493bfec6..5b84001aab 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
@@ -33,7 +33,7 @@ from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
-import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2
+import tests.protoc_plugin.protos.service.test_service_pb2_grpc as service_pb2_grpc
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
@@ -138,7 +138,7 @@ def _CreateService():
"""
servicer_methods = _ServicerMethods()
- class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
@@ -157,11 +157,12 @@ def _CreateService():
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
+ getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
+ server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
+ stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
@@ -173,16 +174,17 @@ def _CreateIncompleteService():
servicer_methods implements none of the methods required of it.
"""
- class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
+ class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
pass
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
+ getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
+ server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
+ stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
@@ -223,10 +225,11 @@ class PythonPluginTest(unittest.TestCase):
def testImportAttributes(self):
# check that we can access the generated module and its members.
- self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None))
- self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(getattr(service_pb2_grpc, STUB_IDENTIFIER, None))
self.assertIsNotNone(
- getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
+ getattr(service_pb2_grpc, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
index 1aeb62a7c5..7868cdbfb3 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
@@ -12,22 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import collections
+import abc
from concurrent import futures
import contextlib
-import distutils.spawn
-import errno
import importlib
import os
-import os.path
+from os import path
import pkgutil
+import platform
import shutil
-import subprocess
import sys
import tempfile
-import threading
import unittest
-import platform
+
+import six
import grpc
from grpc_tools import protoc
@@ -37,292 +35,285 @@ _MESSAGES_IMPORT = b'import "messages.proto";'
_SPLIT_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing.split;'
_COMMON_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing;'
+_RELATIVE_PROTO_PATH = 'relative_proto_path'
+_RELATIVE_PYTHON_OUT = 'relative_python_out'
+
@contextlib.contextmanager
-def _system_path(path):
+def _system_path(path_insertion):
old_system_path = sys.path[:]
- sys.path = sys.path[0:1] + path + sys.path[1:]
+ sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
-class DummySplitServicer(object):
+# NOTE(nathaniel): https://twitter.com/exoplaneteer/status/677259364256747520
+# Life lesson "just always default to idempotence" reinforced.
+def _create_directory_tree(root, path_components_sequence):
+ created = set()
+ for path_components in path_components_sequence:
+ thus_far = ''
+ for path_component in path_components:
+ relative_path = path.join(thus_far, path_component)
+ if relative_path not in created:
+ os.makedirs(path.join(root, relative_path))
+ created.add(relative_path)
+ thus_far = path.join(thus_far, path_component)
+
+
+def _massage_proto_content(proto_content, test_name_bytes,
+ messages_proto_relative_file_name_bytes):
+ package_substitution = (b'package grpc_protoc_plugin.invocation_testing.' +
+ test_name_bytes + b';')
+ common_namespace_substituted = proto_content.replace(_COMMON_NAMESPACE,
+ package_substitution)
+ split_namespace_substituted = common_namespace_substituted.replace(
+ _SPLIT_NAMESPACE, package_substitution)
+ message_import_replaced = split_namespace_substituted.replace(
+ _MESSAGES_IMPORT,
+ b'import "' + messages_proto_relative_file_name_bytes + b'";')
+ return message_import_replaced
+
+
+def _packagify(directory):
+ for subdirectory, _, _ in os.walk(directory):
+ init_file_name = path.join(subdirectory, '__init__.py')
+ with open(init_file_name, 'wb') as init_file:
+ init_file.write(b'')
- def __init__(self, request_class, response_class):
- self.request_class = request_class
- self.response_class = response_class
+
+class _Servicer(object):
+
+ def __init__(self, response_class):
+ self._response_class = response_class
def Call(self, request, context):
- return self.response_class()
+ return self._response_class()
-class SeparateTestMixin(object):
+def _protoc(proto_path, python_out, grpc_python_out_flag, grpc_python_out,
+ absolute_proto_file_names):
+ args = [
+ '',
+ '--proto_path={}'.format(proto_path),
+ ]
+ if python_out is not None:
+ args.append('--python_out={}'.format(python_out))
+ if grpc_python_out is not None:
+ args.append('--grpc_python_out={}:{}'.format(grpc_python_out_flag,
+ grpc_python_out))
+ args.extend(absolute_proto_file_names)
+ return protoc.main(args)
- def testImportAttributes(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- pb2.Request
- pb2.Response
- if self.should_find_services_in_pb2:
- pb2.TestServiceServicer
- else:
- with self.assertRaises(AttributeError):
- pb2.TestServiceServicer
-
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- pb2_grpc.TestServiceServicer
- with self.assertRaises(AttributeError):
- pb2_grpc.Request
- with self.assertRaises(AttributeError):
- pb2_grpc.Response
-
- def testCall(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- server = grpc.server(
- futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- pb2_grpc.add_TestServiceServicer_to_server(
- DummySplitServicer(pb2.Request, pb2.Response), server)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = pb2_grpc.TestServiceStub(channel)
- request = pb2.Request()
- expected_response = pb2.Response()
- response = stub.Call(request)
- self.assertEqual(expected_response, response)
-
-
-class CommonTestMixin(object):
-
- def testImportAttributes(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- pb2.Request
- pb2.Response
- if self.should_find_services_in_pb2:
- pb2.TestServiceServicer
- else:
- with self.assertRaises(AttributeError):
- pb2.TestServiceServicer
-
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- pb2_grpc.TestServiceServicer
- with self.assertRaises(AttributeError):
- pb2_grpc.Request
- with self.assertRaises(AttributeError):
- pb2_grpc.Response
-
- def testCall(self):
- with _system_path([self.python_out_directory]):
- pb2 = importlib.import_module(self.pb2_import)
- with _system_path([self.grpc_python_out_directory]):
- pb2_grpc = importlib.import_module(self.pb2_grpc_import)
- server = grpc.server(
- futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
- pb2_grpc.add_TestServiceServicer_to_server(
- DummySplitServicer(pb2.Request, pb2.Response), server)
- port = server.add_insecure_port('[::]:0')
- server.start()
- channel = grpc.insecure_channel('localhost:{}'.format(port))
- stub = pb2_grpc.TestServiceStub(channel)
- request = pb2.Request()
- expected_response = pb2.Response()
- response = stub.Call(request)
- self.assertEqual(expected_response, response)
-
-
-@unittest.skipIf(platform.python_implementation() == "PyPy",
- "Skip test if run with PyPy")
-class SameSeparateTest(unittest.TestCase, SeparateTestMixin):
- def setUp(self):
- same_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
- self.directory = tempfile.mkdtemp(suffix='same_separate', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = os.path.join(self.directory,
- 'grpc_python_out')
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- os.makedirs(self.grpc_python_out_directory)
- same_proto_file = os.path.join(self.proto_directory,
- 'same_separate.proto')
- open(same_proto_file, 'wb').write(
- same_proto_contents.replace(
- _COMMON_NAMESPACE,
- b'package grpc_protoc_plugin.invocation_testing.same_separate;'))
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out=grpc_2_0:{}'.format(
- self.grpc_python_out_directory),
- same_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.grpc_python_out_directory, '__init__.py'),
- 'w').write('')
- open(os.path.join(self.python_out_directory, '__init__.py'),
- 'w').write('')
- self.pb2_import = 'same_separate_pb2'
- self.pb2_grpc_import = 'same_separate_pb2_grpc'
- self.should_find_services_in_pb2 = False
+class _Mid2016ProtocStyle(object):
- def tearDown(self):
- shutil.rmtree(self.directory)
+ def name(self):
+ return 'Mid2016ProtocStyle'
+ def grpc_in_pb2_expected(self):
+ return True
-@unittest.skipIf(platform.python_implementation() == "PyPy",
- "Skip test if run with PyPy")
-class SameCommonTest(unittest.TestCase, CommonTestMixin):
+ def protoc(self, proto_path, python_out, absolute_proto_file_names):
+ return (_protoc(proto_path, python_out, 'grpc_1_0', python_out,
+ absolute_proto_file_names),)
- def setUp(self):
- same_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
- self.directory = tempfile.mkdtemp(suffix='same_common', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = self.python_out_directory
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- same_proto_file = os.path.join(self.proto_directory,
- 'same_common.proto')
- open(same_proto_file, 'wb').write(
- same_proto_contents.replace(
- _COMMON_NAMESPACE,
- b'package grpc_protoc_plugin.invocation_testing.same_common;'))
-
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out={}'.format(self.grpc_python_out_directory),
- same_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.python_out_directory, '__init__.py'),
- 'w').write('')
- self.pb2_import = 'same_common_pb2'
- self.pb2_grpc_import = 'same_common_pb2_grpc'
- self.should_find_services_in_pb2 = True
- def tearDown(self):
- shutil.rmtree(self.directory)
+class _SingleProtocExecutionProtocStyle(object):
+ def name(self):
+ return 'SingleProtocExecutionProtocStyle'
-@unittest.skipIf(platform.python_implementation() == "PyPy",
- "Skip test if run with PyPy")
-class SplitCommonTest(unittest.TestCase, CommonTestMixin):
+ def grpc_in_pb2_expected(self):
+ return False
- def setUp(self):
- services_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_services',
- 'services.proto')
- messages_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_messages',
- 'messages.proto')
- self.directory = tempfile.mkdtemp(suffix='split_common', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = self.python_out_directory
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- services_proto_file = os.path.join(self.proto_directory,
- 'split_common_services.proto')
- messages_proto_file = os.path.join(self.proto_directory,
- 'split_common_messages.proto')
- open(services_proto_file, 'wb').write(
- services_proto_contents.replace(
- _MESSAGES_IMPORT, b'import "split_common_messages.proto";')
- .replace(
- _SPLIT_NAMESPACE,
- b'package grpc_protoc_plugin.invocation_testing.split_common;'))
- open(messages_proto_file, 'wb').write(
- messages_proto_contents.replace(
- _SPLIT_NAMESPACE,
- b'package grpc_protoc_plugin.invocation_testing.split_common;'))
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out={}'.format(self.grpc_python_out_directory),
- services_proto_file,
- messages_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.python_out_directory, '__init__.py'),
- 'w').write('')
- self.pb2_import = 'split_common_messages_pb2'
- self.pb2_grpc_import = 'split_common_services_pb2_grpc'
- self.should_find_services_in_pb2 = False
+ def protoc(self, proto_path, python_out, absolute_proto_file_names):
+ return (_protoc(proto_path, python_out, 'grpc_2_0', python_out,
+ absolute_proto_file_names),)
+
+
+class _ProtoBeforeGrpcProtocStyle(object):
+
+ def name(self):
+ return 'ProtoBeforeGrpcProtocStyle'
+
+ def grpc_in_pb2_expected(self):
+ return False
+
+ def protoc(self, proto_path, python_out, absolute_proto_file_names):
+ pb2_protoc_exit_code = _protoc(proto_path, python_out, None, None,
+ absolute_proto_file_names)
+ pb2_grpc_protoc_exit_code = _protoc(
+ proto_path, None, 'grpc_2_0', python_out, absolute_proto_file_names)
+ return pb2_protoc_exit_code, pb2_grpc_protoc_exit_code,
- def tearDown(self):
- shutil.rmtree(self.directory)
+class _GrpcBeforeProtoProtocStyle(object):
-@unittest.skipIf(platform.python_implementation() == "PyPy",
- "Skip test if run with PyPy")
-class SplitSeparateTest(unittest.TestCase, SeparateTestMixin):
+ def name(self):
+ return 'GrpcBeforeProtoProtocStyle'
+
+ def grpc_in_pb2_expected(self):
+ return False
+
+ def protoc(self, proto_path, python_out, absolute_proto_file_names):
+ pb2_grpc_protoc_exit_code = _protoc(
+ proto_path, None, 'grpc_2_0', python_out, absolute_proto_file_names)
+ pb2_protoc_exit_code = _protoc(proto_path, python_out, None, None,
+ absolute_proto_file_names)
+ return pb2_grpc_protoc_exit_code, pb2_protoc_exit_code,
+
+
+_PROTOC_STYLES = (_Mid2016ProtocStyle(), _SingleProtocExecutionProtocStyle(),
+ _ProtoBeforeGrpcProtocStyle(), _GrpcBeforeProtoProtocStyle(),)
+
+
+@unittest.skipIf(platform.python_implementation() == 'PyPy',
+ 'Skip test if run with PyPy!')
+class _Test(six.with_metaclass(abc.ABCMeta, unittest.TestCase)):
def setUp(self):
- services_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_services',
- 'services.proto')
- messages_proto_contents = pkgutil.get_data(
- 'tests.protoc_plugin.protos.invocation_testing.split_messages',
- 'messages.proto')
- self.directory = tempfile.mkdtemp(suffix='split_separate', dir='.')
- self.proto_directory = os.path.join(self.directory, 'proto_path')
- self.python_out_directory = os.path.join(self.directory, 'python_out')
- self.grpc_python_out_directory = os.path.join(self.directory,
- 'grpc_python_out')
- os.makedirs(self.proto_directory)
- os.makedirs(self.python_out_directory)
- os.makedirs(self.grpc_python_out_directory)
- services_proto_file = os.path.join(self.proto_directory,
- 'split_separate_services.proto')
- messages_proto_file = os.path.join(self.proto_directory,
- 'split_separate_messages.proto')
- open(services_proto_file, 'wb').write(
- services_proto_contents.replace(
- _MESSAGES_IMPORT, b'import "split_separate_messages.proto";')
- .replace(
- _SPLIT_NAMESPACE,
- b'package grpc_protoc_plugin.invocation_testing.split_separate;'
- ))
- open(messages_proto_file, 'wb').write(
- messages_proto_contents.replace(
- _SPLIT_NAMESPACE,
- b'package grpc_protoc_plugin.invocation_testing.split_separate;'
- ))
- protoc_result = protoc.main([
- '',
- '--proto_path={}'.format(self.proto_directory),
- '--python_out={}'.format(self.python_out_directory),
- '--grpc_python_out=grpc_2_0:{}'.format(
- self.grpc_python_out_directory),
- services_proto_file,
- messages_proto_file,
- ])
- if protoc_result != 0:
- raise Exception("unexpected protoc error")
- open(os.path.join(self.python_out_directory, '__init__.py'),
- 'w').write('')
- self.pb2_import = 'split_separate_messages_pb2'
- self.pb2_grpc_import = 'split_separate_services_pb2_grpc'
- self.should_find_services_in_pb2 = False
+ self._directory = tempfile.mkdtemp(suffix=self.NAME, dir='.')
+ self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
+ self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
+
+ os.makedirs(self._proto_path)
+ os.makedirs(self._python_out)
+
+ proto_directories_and_names = {
+ (self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES,
+ self.MESSAGES_PROTO_FILE_NAME,),
+ (self.SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES,
+ self.SERVICES_PROTO_FILE_NAME,),
+ }
+ messages_proto_relative_file_name_forward_slashes = '/'.join(
+ self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES + (
+ self.MESSAGES_PROTO_FILE_NAME,))
+ _create_directory_tree(self._proto_path, (
+ relative_proto_directory_names
+ for relative_proto_directory_names, _ in proto_directories_and_names
+ ))
+ self._absolute_proto_file_names = set()
+ for relative_directory_names, file_name in proto_directories_and_names:
+ absolute_proto_file_name = path.join(
+ self._proto_path, *relative_directory_names + (file_name,))
+ raw_proto_content = pkgutil.get_data(
+ 'tests.protoc_plugin.protos.invocation_testing',
+ path.join(*relative_directory_names + (file_name,)))
+ massaged_proto_content = _massage_proto_content(
+ raw_proto_content,
+ self.NAME.encode(),
+ messages_proto_relative_file_name_forward_slashes.encode())
+ with open(absolute_proto_file_name, 'wb') as proto_file:
+ proto_file.write(massaged_proto_content)
+ self._absolute_proto_file_names.add(absolute_proto_file_name)
def tearDown(self):
- shutil.rmtree(self.directory)
+ shutil.rmtree(self._directory)
+
+ def _protoc(self):
+ protoc_exit_codes = self.PROTOC_STYLE.protoc(
+ self._proto_path, self._python_out, self._absolute_proto_file_names)
+ for protoc_exit_code in protoc_exit_codes:
+ self.assertEqual(0, protoc_exit_code)
+
+ _packagify(self._python_out)
+
+ generated_modules = {}
+ expected_generated_full_module_names = {
+ self.EXPECTED_MESSAGES_PB2,
+ self.EXPECTED_SERVICES_PB2,
+ self.EXPECTED_SERVICES_PB2_GRPC,
+ }
+ with _system_path([self._python_out]):
+ for full_module_name in expected_generated_full_module_names:
+ module = importlib.import_module(full_module_name)
+ generated_modules[full_module_name] = module
+
+ self._messages_pb2 = generated_modules[self.EXPECTED_MESSAGES_PB2]
+ self._services_pb2 = generated_modules[self.EXPECTED_SERVICES_PB2]
+ self._services_pb2_grpc = generated_modules[
+ self.EXPECTED_SERVICES_PB2_GRPC]
+
+ def _services_modules(self):
+ if self.PROTOC_STYLE.grpc_in_pb2_expected():
+ return self._services_pb2, self._services_pb2_grpc,
+ else:
+ return self._services_pb2_grpc,
+
+ def test_imported_attributes(self):
+ self._protoc()
+
+ self._messages_pb2.Request
+ self._messages_pb2.Response
+ self._services_pb2.DESCRIPTOR.services_by_name['TestService']
+ for services_module in self._services_modules():
+ services_module.TestServiceStub
+ services_module.TestServiceServicer
+ services_module.add_TestServiceServicer_to_server
+
+ def test_call(self):
+ self._protoc()
+
+ for services_module in self._services_modules():
+ server = grpc.server(
+ futures.ThreadPoolExecutor(
+ max_workers=test_constants.POOL_SIZE))
+ services_module.add_TestServiceServicer_to_server(
+ _Servicer(self._messages_pb2.Response), server)
+ port = server.add_insecure_port('[::]:0')
+ server.start()
+ channel = grpc.insecure_channel('localhost:{}'.format(port))
+ stub = services_module.TestServiceStub(channel)
+ response = stub.Call(self._messages_pb2.Request())
+ self.assertEqual(self._messages_pb2.Response(), response)
+
+
+def _create_test_case_class(split_proto, protoc_style):
+ attributes = {}
+
+ name = '{}{}'.format('SplitProto' if split_proto else 'SameProto',
+ protoc_style.name())
+ attributes['NAME'] = name
+
+ if split_proto:
+ attributes['MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES'] = (
+ 'split_messages', 'sub',)
+ attributes['MESSAGES_PROTO_FILE_NAME'] = 'messages.proto'
+ attributes['SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES'] = (
+ 'split_services',)
+ attributes['SERVICES_PROTO_FILE_NAME'] = 'services.proto'
+ attributes['EXPECTED_MESSAGES_PB2'] = 'split_messages.sub.messages_pb2'
+ attributes['EXPECTED_SERVICES_PB2'] = 'split_services.services_pb2'
+ attributes['EXPECTED_SERVICES_PB2_GRPC'] = (
+ 'split_services.services_pb2_grpc')
+ else:
+ attributes['MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES'] = ()
+ attributes['MESSAGES_PROTO_FILE_NAME'] = 'same.proto'
+ attributes['SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES'] = ()
+ attributes['SERVICES_PROTO_FILE_NAME'] = 'same.proto'
+ attributes['EXPECTED_MESSAGES_PB2'] = 'same_pb2'
+ attributes['EXPECTED_SERVICES_PB2'] = 'same_pb2'
+ attributes['EXPECTED_SERVICES_PB2_GRPC'] = 'same_pb2_grpc'
+
+ attributes['PROTOC_STYLE'] = protoc_style
+
+ attributes['__module__'] = _Test.__module__
+
+ return type('{}Test'.format(name), (_Test,), attributes)
+
+
+def _create_test_case_classes():
+ for split_proto in (False, True,):
+ for protoc_style in _PROTOC_STYLES:
+ yield _create_test_case_class(split_proto, protoc_style)
+
+
+def load_tests(loader, tests, pattern):
+ tests = tuple(
+ loader.loadTestsFromTestCase(test_case_class)
+ for test_case_class in _create_test_case_classes())
+ return unittest.TestSuite(tests=tests)
if __name__ == '__main__':
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
index 83f21ecbbb..424b153ff8 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
@@ -12,19 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import argparse
import contextlib
-import distutils.spawn
-import errno
-import itertools
+import importlib
import os
-import pkg_resources
+from os import path
+import pkgutil
import shutil
-import subprocess
import sys
import tempfile
import threading
-import time
import unittest
from six import moves
@@ -33,12 +29,22 @@ from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
+from grpc_tools import protoc
from tests.unit.framework.common import test_constants
-import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
-import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
-import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
-import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2
+_RELATIVE_PROTO_PATH = 'relative_proto_path'
+_RELATIVE_PYTHON_OUT = 'relative_python_out'
+
+_PROTO_FILES_PATH_COMPONENTS = (
+ ('beta_grpc_plugin_test', 'payload', 'test_payload.proto',),
+ ('beta_grpc_plugin_test', 'requests', 'r', 'test_requests.proto',),
+ ('beta_grpc_plugin_test', 'responses', 'test_responses.proto',),
+ ('beta_grpc_plugin_test', 'service', 'test_service.proto',),)
+
+_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
+_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
+_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
+_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
@@ -47,12 +53,50 @@ SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
+@contextlib.contextmanager
+def _system_path(path_insertion):
+ old_system_path = sys.path[:]
+ sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
+ yield
+ sys.path = old_system_path
+
+
+def _create_directory_tree(root, path_components_sequence):
+ created = set()
+ for path_components in path_components_sequence:
+ thus_far = ''
+ for path_component in path_components:
+ relative_path = path.join(thus_far, path_component)
+ if relative_path not in created:
+ os.makedirs(path.join(root, relative_path))
+ created.add(relative_path)
+ thus_far = path.join(thus_far, path_component)
+
+
+def _massage_proto_content(raw_proto_content):
+ imports_substituted = raw_proto_content.replace(
+ b'import "tests/protoc_plugin/protos/',
+ b'import "beta_grpc_plugin_test/')
+ package_statement_substituted = imports_substituted.replace(
+ b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
+ return package_statement_substituted
+
+
+def _packagify(directory):
+ for subdirectory, _, _ in os.walk(directory):
+ init_file_name = path.join(subdirectory, '__init__.py')
+ with open(init_file_name, 'wb') as init_file:
+ init_file.write(b'')
+
+
class _ServicerMethods(object):
- def __init__(self):
+ def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
+ self._payload_pb2 = payload_pb2
+ self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
@@ -79,22 +123,22 @@ class _ServicerMethods(object):
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
- response = response_pb2.SimpleResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.SimpleResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
- response = response_pb2.StreamingInputCallResponse()
+ response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
@@ -105,8 +149,8 @@ class _ServicerMethods(object):
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
@@ -115,8 +159,8 @@ class _ServicerMethods(object):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
@@ -125,7 +169,7 @@ class _ServicerMethods(object):
@contextlib.contextmanager
-def _CreateService():
+def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
@@ -136,7 +180,7 @@ def _CreateService():
the service bound to the stub and and stub is the stub on which to invoke
RPCs.
"""
- servicer_methods = _ServicerMethods()
+ servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
@@ -161,12 +205,12 @@ def _CreateService():
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
- yield (servicer_methods, stub)
+ yield servicer_methods, stub,
server.stop(0)
@contextlib.contextmanager
-def _CreateIncompleteService():
+def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
@@ -192,16 +236,16 @@ def _CreateIncompleteService():
server.stop(0)
-def _streaming_input_request_iterator():
+def _streaming_input_request_iterator(payload_pb2, requests_pb2):
for _ in range(3):
- request = request_pb2.StreamingInputCallRequest()
+ request = requests_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
-def _streaming_output_request():
- request = request_pb2.StreamingOutputCallRequest()
+def _streaming_output_request(requests_pb2):
+ request = requests_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
@@ -209,11 +253,11 @@ def _streaming_output_request():
return request
-def _full_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
+def _full_duplex_request_iterator(requests_pb2):
+ request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
- request = request_pb2.StreamingOutputCallRequest()
+ request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
@@ -227,22 +271,78 @@ class PythonPluginTest(unittest.TestCase):
methods and does not exist for response-streaming methods.
"""
+ def setUp(self):
+ self._directory = tempfile.mkdtemp(dir='.')
+ self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
+ self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
+
+ os.makedirs(self._proto_path)
+ os.makedirs(self._python_out)
+
+ directories_path_components = {
+ proto_file_path_components[:-1]
+ for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS
+ }
+ _create_directory_tree(self._proto_path, directories_path_components)
+ self._proto_file_names = set()
+ for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS:
+ raw_proto_content = pkgutil.get_data(
+ 'tests.protoc_plugin.protos',
+ path.join(*proto_file_path_components[1:]))
+ massaged_proto_content = _massage_proto_content(raw_proto_content)
+ proto_file_name = path.join(self._proto_path,
+ *proto_file_path_components)
+ with open(proto_file_name, 'wb') as proto_file:
+ proto_file.write(massaged_proto_content)
+ self._proto_file_names.add(proto_file_name)
+
+ def tearDown(self):
+ shutil.rmtree(self._directory)
+
+ def _protoc(self):
+ args = [
+ '',
+ '--proto_path={}'.format(self._proto_path),
+ '--python_out={}'.format(self._python_out),
+ '--grpc_python_out=grpc_1_0:{}'.format(self._python_out),
+ ] + list(self._proto_file_names)
+ protoc_exit_code = protoc.main(args)
+ self.assertEqual(0, protoc_exit_code)
+
+ _packagify(self._python_out)
+
+ with _system_path([
+ self._python_out,
+ ]):
+ self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2)
+ self._requests_pb2 = importlib.import_module(_REQUESTS_PB2)
+ self._responses_pb2 = importlib.import_module(_RESPONSES_PB2)
+ self._service_pb2 = importlib.import_module(_SERVICE_PB2)
+
def testImportAttributes(self):
+ self._protoc()
+
# check that we can access the generated module and its members.
- self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None))
- self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
- getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None))
+ getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
- getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None))
+ getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
- with _CreateService():
- request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2):
+ self._requests_pb2.SimpleRequest(response_size=13)
def testIncompleteServicer(self):
- with _CreateIncompleteService() as (_, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateIncompleteService(self._service_pb2) as (_, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
try:
stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
except face.AbortionError as error:
@@ -250,15 +350,21 @@ class PythonPluginTest(unittest.TestCase):
error.code)
def testUnaryCall(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.future(
@@ -268,8 +374,11 @@ class PythonPluginTest(unittest.TestCase):
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.SHORT_TIMEOUT)
@@ -277,24 +386,33 @@ class PythonPluginTest(unittest.TestCase):
response_future.result()
def testUnaryCallFutureCancelled(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallFutureFailed(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.fail():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
@@ -304,8 +422,11 @@ class PythonPluginTest(unittest.TestCase):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
with methods.pause():
responses = stub.StreamingOutputCall(
request, test_constants.SHORT_TIMEOUT)
@@ -313,8 +434,11 @@ class PythonPluginTest(unittest.TestCase):
list(responses)
def testStreamingOutputCallCancelled(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
next(responses)
@@ -323,8 +447,11 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testStreamingOutputCallFailed(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
@@ -332,30 +459,46 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testStreamingInputCall(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
response = stub.StreamingInputCall(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
- _streaming_input_request_iterator(), 'not a real RpcContext!')
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
+ 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
- _streaming_input_request_iterator(), 'not a real RpcContext!')
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
+ 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
@@ -363,10 +506,14 @@ class PythonPluginTest(unittest.TestCase):
face.ExpirationError)
def testStreamingInputCallFutureCancelled(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
@@ -374,26 +521,38 @@ class PythonPluginTest(unittest.TestCase):
response_future.result()
def testStreamingInputCallFutureFailed(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.fail():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
- with _CreateService() as (methods, stub):
- responses = stub.FullDuplexCall(_full_duplex_request_iterator(),
- test_constants.LONG_TIMEOUT)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ responses = stub.FullDuplexCall(
+ _full_duplex_request_iterator(self._requests_pb2),
+ test_constants.LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
- _full_duplex_request_iterator(), 'not a real RpcContext!')
+ _full_duplex_request_iterator(self._requests_pb2),
+ 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
- request_iterator = _full_duplex_request_iterator()
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ request_iterator = _full_duplex_request_iterator(self._requests_pb2)
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator,
test_constants.SHORT_TIMEOUT)
@@ -401,8 +560,11 @@ class PythonPluginTest(unittest.TestCase):
list(responses)
def testFullDuplexCallCancelled(self):
- with _CreateService() as (methods, stub):
- request_iterator = _full_duplex_request_iterator()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request_iterator = _full_duplex_request_iterator(self._requests_pb2)
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
next(responses)
@@ -411,8 +573,11 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testFullDuplexCallFailed(self):
- request_iterator = _full_duplex_request_iterator()
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ request_iterator = _full_duplex_request_iterator(self._requests_pb2)
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
@@ -421,13 +586,16 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testHalfDuplexCall(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
+ request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
- request = request_pb2.StreamingOutputCallRequest()
+ request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
@@ -441,6 +609,8 @@ class PythonPluginTest(unittest.TestCase):
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
+ self._protoc()
+
condition = threading.Condition()
wait_cell = [False]
@@ -455,14 +625,15 @@ class PythonPluginTest(unittest.TestCase):
condition.notify_all()
def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
+ request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
- with _CreateService() as (methods, stub):
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with wait():
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.SHORT_TIMEOUT)
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/messages.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/sub/messages.proto
index 1b780c69ba..1b780c69ba 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/messages.proto
+++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/sub/messages.proto
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py
deleted file mode 100644
index 5772620b60..0000000000
--- a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/src/python/grpcio_tests/tests/qps/benchmark_client.py b/src/python/grpcio_tests/tests/qps/benchmark_client.py
index 5f4df79c5b..17fa61ea36 100644
--- a/src/python/grpcio_tests/tests/qps/benchmark_client.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_client.py
@@ -22,7 +22,7 @@ from six.moves import queue
import grpc
from src.proto.grpc.testing import messages_pb2
-from src.proto.grpc.testing import services_pb2
+from src.proto.grpc.testing import services_pb2_grpc
from tests.unit import resources
from tests.unit import test_common
@@ -58,7 +58,7 @@ class BenchmarkClient:
if config.payload_config.WhichOneof('payload') == 'simple_params':
self._generic = False
- self._stub = services_pb2.BenchmarkServiceStub(channel)
+ self._stub = services_pb2_grpc.BenchmarkServiceStub(channel)
payload = messages_pb2.Payload(
body='\0' * config.payload_config.simple_params.req_size)
self._request = messages_pb2.SimpleRequest(
diff --git a/src/python/grpcio_tests/tests/qps/benchmark_server.py b/src/python/grpcio_tests/tests/qps/benchmark_server.py
index 05101fdc6d..bb07844491 100644
--- a/src/python/grpcio_tests/tests/qps/benchmark_server.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_server.py
@@ -13,10 +13,10 @@
# limitations under the License.
from src.proto.grpc.testing import messages_pb2
-from src.proto.grpc.testing import services_pb2
+from src.proto.grpc.testing import services_pb2_grpc
-class BenchmarkServer(services_pb2.BenchmarkServiceServicer):
+class BenchmarkServer(services_pb2_grpc.BenchmarkServiceServicer):
"""Synchronous Server implementation for the Benchmark service."""
def UnaryCall(self, request, context):
@@ -29,7 +29,7 @@ class BenchmarkServer(services_pb2.BenchmarkServiceServicer):
yield messages_pb2.SimpleResponse(payload=payload)
-class GenericBenchmarkServer(services_pb2.BenchmarkServiceServicer):
+class GenericBenchmarkServer(services_pb2_grpc.BenchmarkServiceServicer):
"""Generic Server implementation for the Benchmark service."""
def __init__(self, resp_size):
diff --git a/src/python/grpcio_tests/tests/stress/client.py b/src/python/grpcio_tests/tests/stress/client.py
index d5ff0064fd..40caa3926a 100644
--- a/src/python/grpcio_tests/tests/stress/client.py
+++ b/src/python/grpcio_tests/tests/stress/client.py
@@ -20,7 +20,7 @@ import threading
import grpc
from six.moves import queue
from src.proto.grpc.testing import metrics_pb2_grpc
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import methods
from tests.interop import resources
@@ -133,7 +133,7 @@ def run_test(args):
for _ in xrange(args.num_channels_per_server):
channel = _get_channel(test_server_target, args)
for _ in xrange(args.num_stubs_per_channel):
- stub = test_pb2.TestServiceStub(channel)
+ stub = test_pb2_grpc.TestServiceStub(channel)
runner = test_runner.TestRunner(stub, test_cases, hist,
exception_queue, stop_event)
runners.append(runner)
diff --git a/src/python/grpcio_tests/tests/stress/metrics_server.py b/src/python/grpcio_tests/tests/stress/metrics_server.py
index 11ab6c3f4e..33a74b4a38 100644
--- a/src/python/grpcio_tests/tests/stress/metrics_server.py
+++ b/src/python/grpcio_tests/tests/stress/metrics_server.py
@@ -16,11 +16,12 @@
import time
from src.proto.grpc.testing import metrics_pb2
+from src.proto.grpc.testing import metrics_pb2_grpc
GAUGE_NAME = 'python_overall_qps'
-class MetricsServer(metrics_pb2.MetricsServiceServicer):
+class MetricsServer(metrics_pb2_grpc.MetricsServiceServicer):
def __init__(self, histogram):
self._start_time = time.time()
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index d61297b918..8512d5b96f 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -1,12 +1,17 @@
[
+ "_sanity._sanity_test.SanityTest",
"health_check._health_servicer_test.HealthServicerTest",
"interop._insecure_intraop_test.InsecureIntraopTest",
"interop._secure_intraop_test.SecureIntraopTest",
"protoc_plugin._python_plugin_test.PythonPluginTest",
- "protoc_plugin._split_definitions_test.SameCommonTest",
- "protoc_plugin._split_definitions_test.SameSeparateTest",
- "protoc_plugin._split_definitions_test.SplitCommonTest",
- "protoc_plugin._split_definitions_test.SplitSeparateTest",
+ "protoc_plugin._split_definitions_test.SameProtoGrpcBeforeProtoProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SameProtoMid2016ProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SameProtoProtoBeforeGrpcProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SameProtoSingleProtocExecutionProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SplitProtoGrpcBeforeProtoProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SplitProtoMid2016ProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SplitProtoProtoBeforeGrpcProtocStyleTest",
+ "protoc_plugin._split_definitions_test.SplitProtoSingleProtocExecutionProtocStyleTest",
"protoc_plugin.beta_python_plugin_test.PythonPluginTest",
"reflection._reflection_servicer_test.ReflectionServicerTest",
"testing._client_test.ClientTest",
@@ -26,6 +31,8 @@
"unit._credentials_test.CredentialsTest",
"unit._cython._cancel_many_calls_test.CancelManyCallsTest",
"unit._cython._channel_test.ChannelTest",
+ "unit._cython._no_messages_server_completion_queue_per_call_test.Test",
+ "unit._cython._no_messages_single_server_completion_queue_test.Test",
"unit._cython._read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest",
"unit._cython.cygrpc_test.InsecureServerInsecureClient",
"unit._cython.cygrpc_test.SecureServerSecureClient",
@@ -39,7 +46,6 @@
"unit._reconnect_test.ReconnectTest",
"unit._resource_exhausted_test.ResourceExhaustedTest",
"unit._rpc_test.RPCTest",
- "unit._sanity._sanity_test.Sanity",
"unit._thread_cleanup_test.CleanupThreadTest",
"unit.beta._beta_features_test.BetaFeaturesTest",
"unit.beta._beta_features_test.ContextManagementAndLifecycleTest",
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_common.py b/src/python/grpcio_tests/tests/unit/_cython/_common.py
new file mode 100644
index 0000000000..ac66d1db3d
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_cython/_common.py
@@ -0,0 +1,118 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Common utilities for tests of the Cython layer of gRPC Python."""
+
+import collections
+import threading
+
+from grpc._cython import cygrpc
+
+RPC_COUNT = 4000
+
+INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
+EMPTY_FLAGS = 0
+
+INVOCATION_METADATA = cygrpc.Metadata(
+ (cygrpc.Metadatum(b'client-md-key', b'client-md-key'),
+ cygrpc.Metadatum(b'client-md-key-bin', b'\x00\x01' * 3000),))
+
+INITIAL_METADATA = cygrpc.Metadata(
+ (cygrpc.Metadatum(b'server-initial-md-key', b'server-initial-md-value'),
+ cygrpc.Metadatum(b'server-initial-md-key-bin', b'\x00\x02' * 3000),))
+
+TRAILING_METADATA = cygrpc.Metadata(
+ (cygrpc.Metadatum(b'server-trailing-md-key', b'server-trailing-md-value'),
+ cygrpc.Metadatum(b'server-trailing-md-key-bin', b'\x00\x03' * 3000),))
+
+
+class QueueDriver(object):
+
+ def __init__(self, condition, completion_queue):
+ self._condition = condition
+ self._completion_queue = completion_queue
+ self._due = collections.defaultdict(int)
+ self._events = collections.defaultdict(list)
+
+ def add_due(self, tags):
+ if not self._due:
+
+ def in_thread():
+ while True:
+ event = self._completion_queue.poll()
+ with self._condition:
+ self._events[event.tag].append(event)
+ self._due[event.tag] -= 1
+ self._condition.notify_all()
+ if self._due[event.tag] <= 0:
+ self._due.pop(event.tag)
+ if not self._due:
+ return
+
+ thread = threading.Thread(target=in_thread)
+ thread.start()
+ for tag in tags:
+ self._due[tag] += 1
+
+ def event_with_tag(self, tag):
+ with self._condition:
+ while True:
+ if self._events[tag]:
+ return self._events[tag].pop(0)
+ else:
+ self._condition.wait()
+
+
+def execute_many_times(behavior):
+ return tuple(behavior() for _ in range(RPC_COUNT))
+
+
+class OperationResult(
+ collections.namedtuple('OperationResult', (
+ 'start_batch_result', 'completion_type', 'success',))):
+ pass
+
+
+SUCCESSFUL_OPERATION_RESULT = OperationResult(
+ cygrpc.CallError.ok, cygrpc.CompletionType.operation_complete, True)
+
+
+class RpcTest(object):
+
+ def setUp(self):
+ self.server_completion_queue = cygrpc.CompletionQueue()
+ self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
+ self.server.register_completion_queue(self.server_completion_queue)
+ port = self.server.add_http2_port(b'[::]:0')
+ self.server.start()
+ self.channel = cygrpc.Channel('localhost:{}'.format(port).encode(),
+ cygrpc.ChannelArgs([]))
+
+ self._server_shutdown_tag = 'server_shutdown_tag'
+ self.server_condition = threading.Condition()
+ self.server_driver = QueueDriver(self.server_condition,
+ self.server_completion_queue)
+ with self.server_condition:
+ self.server_driver.add_due({
+ self._server_shutdown_tag,
+ })
+
+ self.client_condition = threading.Condition()
+ self.client_completion_queue = cygrpc.CompletionQueue()
+ self.client_driver = QueueDriver(self.client_condition,
+ self.client_completion_queue)
+
+ def tearDown(self):
+ self.server.shutdown(self.server_completion_queue,
+ self._server_shutdown_tag)
+ self.server.cancel_all_calls()
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
new file mode 100644
index 0000000000..14cc66675c
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
@@ -0,0 +1,131 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Test a corner-case at the level of the Cython API."""
+
+import threading
+import unittest
+
+from grpc._cython import cygrpc
+
+from tests.unit._cython import _common
+
+
+class Test(_common.RpcTest, unittest.TestCase):
+
+ def _do_rpcs(self):
+ server_call_condition = threading.Condition()
+ server_call_completion_queue = cygrpc.CompletionQueue()
+ server_call_driver = _common.QueueDriver(server_call_condition,
+ server_call_completion_queue)
+
+ server_request_call_tag = 'server_request_call_tag'
+ server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
+ server_complete_rpc_tag = 'server_complete_rpc_tag'
+
+ with self.server_condition:
+ server_request_call_start_batch_result = self.server.request_call(
+ server_call_completion_queue, self.server_completion_queue,
+ server_request_call_tag)
+ self.server_driver.add_due({
+ server_request_call_tag,
+ })
+
+ client_call = self.channel.create_call(
+ None, _common.EMPTY_FLAGS, self.client_completion_queue,
+ b'/twinkies', None, _common.INFINITE_FUTURE)
+ client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
+ client_complete_rpc_tag = 'client_complete_rpc_tag'
+ with self.client_condition:
+ client_receive_initial_metadata_start_batch_result = (
+ client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_receive_initial_metadata(
+ _common.EMPTY_FLAGS),
+ ]), client_receive_initial_metadata_tag))
+ client_complete_rpc_start_batch_result = client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_send_initial_metadata(
+ _common.INVOCATION_METADATA, _common.EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(
+ _common.EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(
+ _common.EMPTY_FLAGS),
+ ]), client_complete_rpc_tag)
+ self.client_driver.add_due({
+ client_receive_initial_metadata_tag,
+ client_complete_rpc_tag,
+ })
+
+ server_request_call_event = self.server_driver.event_with_tag(
+ server_request_call_tag)
+
+ with server_call_condition:
+ server_send_initial_metadata_start_batch_result = (
+ server_request_call_event.operation_call.start_server_batch([
+ cygrpc.operation_send_initial_metadata(
+ _common.INITIAL_METADATA, _common.EMPTY_FLAGS),
+ ], server_send_initial_metadata_tag))
+ server_call_driver.add_due({
+ server_send_initial_metadata_tag,
+ })
+ server_send_initial_metadata_event = server_call_driver.event_with_tag(
+ server_send_initial_metadata_tag)
+
+ with server_call_condition:
+ server_complete_rpc_start_batch_result = (
+ server_request_call_event.operation_call.start_server_batch([
+ cygrpc.operation_receive_close_on_server(
+ _common.EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _common.TRAILING_METADATA, cygrpc.StatusCode.ok,
+ b'test details', _common.EMPTY_FLAGS),
+ ], server_complete_rpc_tag))
+ server_call_driver.add_due({
+ server_complete_rpc_tag,
+ })
+ server_complete_rpc_event = server_call_driver.event_with_tag(
+ server_complete_rpc_tag)
+
+ client_receive_initial_metadata_event = self.client_driver.event_with_tag(
+ client_receive_initial_metadata_tag)
+ client_complete_rpc_event = self.client_driver.event_with_tag(
+ client_complete_rpc_tag)
+
+ return (_common.OperationResult(server_request_call_start_batch_result,
+ server_request_call_event.type,
+ server_request_call_event.success),
+ _common.OperationResult(
+ client_receive_initial_metadata_start_batch_result,
+ client_receive_initial_metadata_event.type,
+ client_receive_initial_metadata_event.success),
+ _common.OperationResult(client_complete_rpc_start_batch_result,
+ client_complete_rpc_event.type,
+ client_complete_rpc_event.success),
+ _common.OperationResult(
+ server_send_initial_metadata_start_batch_result,
+ server_send_initial_metadata_event.type,
+ server_send_initial_metadata_event.success),
+ _common.OperationResult(server_complete_rpc_start_batch_result,
+ server_complete_rpc_event.type,
+ server_complete_rpc_event.success),)
+
+ def test_rpcs(self):
+ expecteds = [(_common.SUCCESSFUL_OPERATION_RESULT,) *
+ 5] * _common.RPC_COUNT
+ actuallys = _common.execute_many_times(self._do_rpcs)
+ self.assertSequenceEqual(expecteds, actuallys)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
new file mode 100644
index 0000000000..1e44bcc4dc
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
@@ -0,0 +1,126 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Test a corner-case at the level of the Cython API."""
+
+import threading
+import unittest
+
+from grpc._cython import cygrpc
+
+from tests.unit._cython import _common
+
+
+class Test(_common.RpcTest, unittest.TestCase):
+
+ def _do_rpcs(self):
+ server_request_call_tag = 'server_request_call_tag'
+ server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
+ server_complete_rpc_tag = 'server_complete_rpc_tag'
+
+ with self.server_condition:
+ server_request_call_start_batch_result = self.server.request_call(
+ self.server_completion_queue, self.server_completion_queue,
+ server_request_call_tag)
+ self.server_driver.add_due({
+ server_request_call_tag,
+ })
+
+ client_call = self.channel.create_call(
+ None, _common.EMPTY_FLAGS, self.client_completion_queue,
+ b'/twinkies', None, _common.INFINITE_FUTURE)
+ client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
+ client_complete_rpc_tag = 'client_complete_rpc_tag'
+ with self.client_condition:
+ client_receive_initial_metadata_start_batch_result = (
+ client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_receive_initial_metadata(
+ _common.EMPTY_FLAGS),
+ ]), client_receive_initial_metadata_tag))
+ client_complete_rpc_start_batch_result = client_call.start_client_batch(
+ cygrpc.Operations([
+ cygrpc.operation_send_initial_metadata(
+ _common.INVOCATION_METADATA, _common.EMPTY_FLAGS),
+ cygrpc.operation_send_close_from_client(
+ _common.EMPTY_FLAGS),
+ cygrpc.operation_receive_status_on_client(
+ _common.EMPTY_FLAGS),
+ ]), client_complete_rpc_tag)
+ self.client_driver.add_due({
+ client_receive_initial_metadata_tag,
+ client_complete_rpc_tag,
+ })
+
+ server_request_call_event = self.server_driver.event_with_tag(
+ server_request_call_tag)
+
+ with self.server_condition:
+ server_send_initial_metadata_start_batch_result = (
+ server_request_call_event.operation_call.start_server_batch([
+ cygrpc.operation_send_initial_metadata(
+ _common.INITIAL_METADATA, _common.EMPTY_FLAGS),
+ ], server_send_initial_metadata_tag))
+ self.server_driver.add_due({
+ server_send_initial_metadata_tag,
+ })
+ server_send_initial_metadata_event = self.server_driver.event_with_tag(
+ server_send_initial_metadata_tag)
+
+ with self.server_condition:
+ server_complete_rpc_start_batch_result = (
+ server_request_call_event.operation_call.start_server_batch([
+ cygrpc.operation_receive_close_on_server(
+ _common.EMPTY_FLAGS),
+ cygrpc.operation_send_status_from_server(
+ _common.TRAILING_METADATA, cygrpc.StatusCode.ok,
+ b'test details', _common.EMPTY_FLAGS),
+ ], server_complete_rpc_tag))
+ self.server_driver.add_due({
+ server_complete_rpc_tag,
+ })
+ server_complete_rpc_event = self.server_driver.event_with_tag(
+ server_complete_rpc_tag)
+
+ client_receive_initial_metadata_event = self.client_driver.event_with_tag(
+ client_receive_initial_metadata_tag)
+ client_complete_rpc_event = self.client_driver.event_with_tag(
+ client_complete_rpc_tag)
+
+ return (_common.OperationResult(server_request_call_start_batch_result,
+ server_request_call_event.type,
+ server_request_call_event.success),
+ _common.OperationResult(
+ client_receive_initial_metadata_start_batch_result,
+ client_receive_initial_metadata_event.type,
+ client_receive_initial_metadata_event.success),
+ _common.OperationResult(client_complete_rpc_start_batch_result,
+ client_complete_rpc_event.type,
+ client_complete_rpc_event.success),
+ _common.OperationResult(
+ server_send_initial_metadata_start_batch_result,
+ server_send_initial_metadata_event.type,
+ server_send_initial_metadata_event.success),
+ _common.OperationResult(server_complete_rpc_start_batch_result,
+ server_complete_rpc_event.type,
+ server_complete_rpc_event.success),)
+
+ def test_rpcs(self):
+ expecteds = [(_common.SUCCESSFUL_OPERATION_RESULT,) *
+ 5] * _common.RPC_COUNT
+ actuallys = _common.execute_many_times(self._do_rpcs)
+ self.assertSequenceEqual(expecteds, actuallys)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
index 9f72b1fcb5..6faab94be6 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -29,7 +29,7 @@ _SERIALIZED_RESPONSE = b'\x49\x50\x51'
_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST
_REQUEST_DESERIALIZER = lambda unused_serialized_request: object()
_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE
-_RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse: object()
+_RESPONSE_DESERIALIZER = lambda unused_serialized_response: object()
_SERVICE = 'test.TestService'
_UNARY_UNARY = 'UnaryUnary'
diff --git a/src/ruby/end2end/killed_client_thread_client.rb b/src/ruby/end2end/killed_client_thread_client.rb
index 7d6ed8c8d7..493c0eb56a 100755
--- a/src/ruby/end2end/killed_client_thread_client.rb
+++ b/src/ruby/end2end/killed_client_thread_client.rb
@@ -35,7 +35,7 @@ def main
:this_channel_is_insecure)
stub.echo(Echo::EchoRequest.new(request: 'hello'))
fail 'the clients rpc in this test shouldnt complete. ' \
- 'expecting SIGINT to happen in the middle of the call'
+ 'expecting SIGTERM to happen in the middle of the call'
end
thd.join
end
diff --git a/src/ruby/end2end/killed_client_thread_driver.rb b/src/ruby/end2end/killed_client_thread_driver.rb
index 09f05a4487..fce5d13e82 100755
--- a/src/ruby/end2end/killed_client_thread_driver.rb
+++ b/src/ruby/end2end/killed_client_thread_driver.rb
@@ -69,9 +69,9 @@ def main
call_started_cv.wait(call_started_mu) until call_started.val
end
- # SIGINT the child process now that it's
+ # SIGTERM the child process now that it's
# in the middle of an RPC (happening on a non-main thread)
- Process.kill('SIGINT', client_pid)
+ Process.kill('SIGTERM', client_pid)
STDERR.puts 'sent shutdown'
begin
@@ -88,8 +88,8 @@ def main
end
client_exit_code = $CHILD_STATUS
- if client_exit_code.termsig != 2 # SIGINT
- fail 'expected client exit from SIGINT ' \
+ if client_exit_code.termsig != 15 # SIGTERM
+ fail 'expected client exit from SIGTERM ' \
"but got child status: #{client_exit_code}"
end
diff --git a/src/ruby/end2end/load_grpc_with_gc_stress_driver.rb b/src/ruby/end2end/load_grpc_with_gc_stress_driver.rb
new file mode 100755
index 0000000000..3668b95a05
--- /dev/null
+++ b/src/ruby/end2end/load_grpc_with_gc_stress_driver.rb
@@ -0,0 +1,32 @@
+#!/usr/bin/env ruby
+#
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+this_dir = File.expand_path(File.dirname(__FILE__))
+protos_lib_dir = File.join(this_dir, 'lib')
+grpc_lib_dir = File.join(File.dirname(this_dir), 'lib')
+$LOAD_PATH.unshift(grpc_lib_dir) unless $LOAD_PATH.include?(grpc_lib_dir)
+$LOAD_PATH.unshift(protos_lib_dir) unless $LOAD_PATH.include?(protos_lib_dir)
+$LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir)
+
+GC.stress = 0x04
+
+require 'grpc'
+
+GRPC::Core::Channel.new('dummy_host', nil, :this_channel_is_insecure)
+GRPC::Core::Server.new({})
+GRPC::Core::ChannelCredentials.new
+GRPC::Core::CallCredentials.new(proc { |noop| noop })
+GRPC::Core::CompressionOptions.new
diff --git a/src/ruby/ext/grpc/extconf.rb b/src/ruby/ext/grpc/extconf.rb
index e5ab02b9fc..9d2cf2a08a 100644
--- a/src/ruby/ext/grpc/extconf.rb
+++ b/src/ruby/ext/grpc/extconf.rb
@@ -41,6 +41,7 @@ LIB_DIRS = [
]
windows = RUBY_PLATFORM =~ /mingw|mswin/
+bsd = RUBY_PLATFORM =~ /bsd/
grpc_root = File.expand_path(File.join(File.dirname(__FILE__), '../../../..'))
@@ -70,7 +71,8 @@ unless windows
puts 'Building internal gRPC into ' + grpc_lib_dir
nproc = 4
nproc = Etc.nprocessors * 2 if Etc.respond_to? :nprocessors
- system("make -j#{nproc} -C #{grpc_root} #{grpc_lib_dir}/libgrpc.a CONFIG=#{grpc_config} Q=")
+ make = bsd ? 'gmake' : 'make'
+ system("#{make} -j#{nproc} -C #{grpc_root} #{grpc_lib_dir}/libgrpc.a CONFIG=#{grpc_config} Q=")
exit 1 unless $? == 0
end
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index 5550bb7d5f..e920fc86c5 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -101,6 +101,7 @@ static void grpc_rb_call_destroy(void *p) {
return;
}
destroy_call((grpc_rb_call *)p);
+ xfree(p);
}
static size_t md_ary_datasize(const void *p) {
@@ -220,6 +221,7 @@ static VALUE grpc_rb_call_close(VALUE self) {
TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
if (call != NULL) {
destroy_call(call);
+ xfree(RTYPEDDATA_DATA(self));
RTYPEDDATA_DATA(self) = NULL;
}
return Qnil;
diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c
index 049a869bdc..4214a0811b 100644
--- a/src/ruby/ext/grpc/rb_call_credentials.c
+++ b/src/ruby/ext/grpc/rb_call_credentials.c
@@ -112,9 +112,12 @@ static void grpc_rb_call_credentials_callback_with_gil(void *param) {
gpr_free(params);
}
-static void grpc_rb_call_credentials_plugin_get_metadata(
+static int grpc_rb_call_credentials_plugin_get_metadata(
void *state, grpc_auth_metadata_context context,
- grpc_credentials_plugin_metadata_cb cb, void *user_data) {
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) {
callback_params *params = gpr_malloc(sizeof(callback_params));
params->get_metadata = (VALUE)state;
params->context = context;
@@ -123,6 +126,7 @@ static void grpc_rb_call_credentials_plugin_get_metadata(
grpc_rb_event_queue_enqueue(grpc_rb_call_credentials_callback_with_gil,
(void *)(params));
+ return 0; // Async return.
}
static void grpc_rb_call_credentials_plugin_destroy(void *state) {
diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c
index 933a3ac152..b53f09edec 100644
--- a/src/ruby/ext/grpc/rb_grpc.c
+++ b/src/ruby/ext/grpc/rb_grpc.c
@@ -315,8 +315,8 @@ void Init_grpc_c() {
return;
}
- bg_thread_init_rb_mu = rb_mutex_new();
rb_global_variable(&bg_thread_init_rb_mu);
+ bg_thread_init_rb_mu = rb_mutex_new();
grpc_rb_mGRPC = rb_define_module("GRPC");
grpc_rb_mGrpcCore = rb_define_module_under(grpc_rb_mGRPC, "Core");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 9671d794c5..128e912e23 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -22,44 +22,6 @@
#include "rb_grpc_imports.generated.h"
-grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import;
-grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import;
-grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import;
-grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
-grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
-grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
-grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import;
-grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import;
-grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import;
-grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
-census_initialize_type census_initialize_import;
-census_shutdown_type census_shutdown_import;
-census_supported_type census_supported_import;
-census_enabled_type census_enabled_import;
-census_context_create_type census_context_create_import;
-census_context_destroy_type census_context_destroy_import;
-census_context_get_status_type census_context_get_status_import;
-census_context_initialize_iterator_type census_context_initialize_iterator_import;
-census_context_next_tag_type census_context_next_tag_import;
-census_context_get_tag_type census_context_get_tag_import;
-census_context_encode_type census_context_encode_import;
-census_context_decode_type census_context_decode_import;
-census_trace_mask_type census_trace_mask_import;
-census_set_trace_mask_type census_set_trace_mask_import;
-census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import;
-census_start_client_rpc_op_type census_start_client_rpc_op_import;
-census_set_rpc_client_peer_type census_set_rpc_client_peer_import;
-census_start_server_rpc_op_type census_start_server_rpc_op_import;
-census_start_op_type census_start_op_import;
-census_end_op_type census_end_op_import;
-census_trace_print_type census_trace_print_import;
-census_trace_scan_start_type census_trace_scan_start_import;
-census_get_trace_record_type census_get_trace_record_import;
-census_trace_scan_end_type census_trace_scan_end_import;
-census_define_resource_type census_define_resource_import;
-census_delete_resource_type census_delete_resource_import;
-census_resource_id_type census_resource_id_import;
-census_record_values_type census_record_values_import;
grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import;
grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import;
grpc_stream_compression_algorithm_name_type grpc_stream_compression_algorithm_name_import;
@@ -87,12 +49,16 @@ grpc_completion_queue_next_type grpc_completion_queue_next_import;
grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import;
grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import;
grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import;
+grpc_completion_queue_thread_local_cache_init_type grpc_completion_queue_thread_local_cache_init_import;
+grpc_completion_queue_thread_local_cache_flush_type grpc_completion_queue_thread_local_cache_flush_import;
grpc_alarm_create_type grpc_alarm_create_import;
+grpc_alarm_set_type grpc_alarm_set_import;
grpc_alarm_cancel_type grpc_alarm_cancel_import;
grpc_alarm_destroy_type grpc_alarm_destroy_import;
grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import;
grpc_channel_num_external_connectivity_watchers_type grpc_channel_num_external_connectivity_watchers_import;
grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
+grpc_channel_support_connectivity_watcher_type grpc_channel_support_connectivity_watcher_import;
grpc_channel_create_call_type grpc_channel_create_call_import;
grpc_channel_ping_type grpc_channel_ping_import;
grpc_channel_register_call_type grpc_channel_register_call_import;
@@ -161,11 +127,27 @@ grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_impor
grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import;
grpc_secure_channel_create_type grpc_secure_channel_create_import;
grpc_server_credentials_release_type grpc_server_credentials_release_import;
+grpc_ssl_server_certificate_config_create_type grpc_ssl_server_certificate_config_create_import;
+grpc_ssl_server_certificate_config_destroy_type grpc_ssl_server_certificate_config_destroy_import;
grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import;
grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import;
+grpc_ssl_server_credentials_create_options_using_config_type grpc_ssl_server_credentials_create_options_using_config_import;
+grpc_ssl_server_credentials_create_options_using_config_fetcher_type grpc_ssl_server_credentials_create_options_using_config_fetcher_import;
+grpc_ssl_server_credentials_options_destroy_type grpc_ssl_server_credentials_options_destroy_import;
+grpc_ssl_server_credentials_create_with_options_type grpc_ssl_server_credentials_create_with_options_import;
grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import;
grpc_call_set_credentials_type grpc_call_set_credentials_import;
grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import;
+grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import;
+grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import;
+grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import;
+grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
+grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
+grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
+grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import;
+grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import;
+grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import;
+grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
grpc_slice_ref_type grpc_slice_ref_import;
grpc_slice_unref_type grpc_slice_unref_import;
grpc_slice_copy_type grpc_slice_copy_import;
@@ -328,44 +310,6 @@ gpr_sleep_until_type gpr_sleep_until_import;
gpr_timespec_to_micros_type gpr_timespec_to_micros_import;
void grpc_rb_load_imports(HMODULE library) {
- grpc_raw_byte_buffer_create_import = (grpc_raw_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_byte_buffer_create");
- grpc_raw_compressed_byte_buffer_create_import = (grpc_raw_compressed_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_compressed_byte_buffer_create");
- grpc_byte_buffer_copy_import = (grpc_byte_buffer_copy_type) GetProcAddress(library, "grpc_byte_buffer_copy");
- grpc_byte_buffer_length_import = (grpc_byte_buffer_length_type) GetProcAddress(library, "grpc_byte_buffer_length");
- grpc_byte_buffer_destroy_import = (grpc_byte_buffer_destroy_type) GetProcAddress(library, "grpc_byte_buffer_destroy");
- grpc_byte_buffer_reader_init_import = (grpc_byte_buffer_reader_init_type) GetProcAddress(library, "grpc_byte_buffer_reader_init");
- grpc_byte_buffer_reader_destroy_import = (grpc_byte_buffer_reader_destroy_type) GetProcAddress(library, "grpc_byte_buffer_reader_destroy");
- grpc_byte_buffer_reader_next_import = (grpc_byte_buffer_reader_next_type) GetProcAddress(library, "grpc_byte_buffer_reader_next");
- grpc_byte_buffer_reader_readall_import = (grpc_byte_buffer_reader_readall_type) GetProcAddress(library, "grpc_byte_buffer_reader_readall");
- grpc_raw_byte_buffer_from_reader_import = (grpc_raw_byte_buffer_from_reader_type) GetProcAddress(library, "grpc_raw_byte_buffer_from_reader");
- census_initialize_import = (census_initialize_type) GetProcAddress(library, "census_initialize");
- census_shutdown_import = (census_shutdown_type) GetProcAddress(library, "census_shutdown");
- census_supported_import = (census_supported_type) GetProcAddress(library, "census_supported");
- census_enabled_import = (census_enabled_type) GetProcAddress(library, "census_enabled");
- census_context_create_import = (census_context_create_type) GetProcAddress(library, "census_context_create");
- census_context_destroy_import = (census_context_destroy_type) GetProcAddress(library, "census_context_destroy");
- census_context_get_status_import = (census_context_get_status_type) GetProcAddress(library, "census_context_get_status");
- census_context_initialize_iterator_import = (census_context_initialize_iterator_type) GetProcAddress(library, "census_context_initialize_iterator");
- census_context_next_tag_import = (census_context_next_tag_type) GetProcAddress(library, "census_context_next_tag");
- census_context_get_tag_import = (census_context_get_tag_type) GetProcAddress(library, "census_context_get_tag");
- census_context_encode_import = (census_context_encode_type) GetProcAddress(library, "census_context_encode");
- census_context_decode_import = (census_context_decode_type) GetProcAddress(library, "census_context_decode");
- census_trace_mask_import = (census_trace_mask_type) GetProcAddress(library, "census_trace_mask");
- census_set_trace_mask_import = (census_set_trace_mask_type) GetProcAddress(library, "census_set_trace_mask");
- census_start_rpc_op_timestamp_import = (census_start_rpc_op_timestamp_type) GetProcAddress(library, "census_start_rpc_op_timestamp");
- census_start_client_rpc_op_import = (census_start_client_rpc_op_type) GetProcAddress(library, "census_start_client_rpc_op");
- census_set_rpc_client_peer_import = (census_set_rpc_client_peer_type) GetProcAddress(library, "census_set_rpc_client_peer");
- census_start_server_rpc_op_import = (census_start_server_rpc_op_type) GetProcAddress(library, "census_start_server_rpc_op");
- census_start_op_import = (census_start_op_type) GetProcAddress(library, "census_start_op");
- census_end_op_import = (census_end_op_type) GetProcAddress(library, "census_end_op");
- census_trace_print_import = (census_trace_print_type) GetProcAddress(library, "census_trace_print");
- census_trace_scan_start_import = (census_trace_scan_start_type) GetProcAddress(library, "census_trace_scan_start");
- census_get_trace_record_import = (census_get_trace_record_type) GetProcAddress(library, "census_get_trace_record");
- census_trace_scan_end_import = (census_trace_scan_end_type) GetProcAddress(library, "census_trace_scan_end");
- census_define_resource_import = (census_define_resource_type) GetProcAddress(library, "census_define_resource");
- census_delete_resource_import = (census_delete_resource_type) GetProcAddress(library, "census_delete_resource");
- census_resource_id_import = (census_resource_id_type) GetProcAddress(library, "census_resource_id");
- census_record_values_import = (census_record_values_type) GetProcAddress(library, "census_record_values");
grpc_compression_algorithm_parse_import = (grpc_compression_algorithm_parse_type) GetProcAddress(library, "grpc_compression_algorithm_parse");
grpc_compression_algorithm_name_import = (grpc_compression_algorithm_name_type) GetProcAddress(library, "grpc_compression_algorithm_name");
grpc_stream_compression_algorithm_name_import = (grpc_stream_compression_algorithm_name_type) GetProcAddress(library, "grpc_stream_compression_algorithm_name");
@@ -393,12 +337,16 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_completion_queue_pluck_import = (grpc_completion_queue_pluck_type) GetProcAddress(library, "grpc_completion_queue_pluck");
grpc_completion_queue_shutdown_import = (grpc_completion_queue_shutdown_type) GetProcAddress(library, "grpc_completion_queue_shutdown");
grpc_completion_queue_destroy_import = (grpc_completion_queue_destroy_type) GetProcAddress(library, "grpc_completion_queue_destroy");
+ grpc_completion_queue_thread_local_cache_init_import = (grpc_completion_queue_thread_local_cache_init_type) GetProcAddress(library, "grpc_completion_queue_thread_local_cache_init");
+ grpc_completion_queue_thread_local_cache_flush_import = (grpc_completion_queue_thread_local_cache_flush_type) GetProcAddress(library, "grpc_completion_queue_thread_local_cache_flush");
grpc_alarm_create_import = (grpc_alarm_create_type) GetProcAddress(library, "grpc_alarm_create");
+ grpc_alarm_set_import = (grpc_alarm_set_type) GetProcAddress(library, "grpc_alarm_set");
grpc_alarm_cancel_import = (grpc_alarm_cancel_type) GetProcAddress(library, "grpc_alarm_cancel");
grpc_alarm_destroy_import = (grpc_alarm_destroy_type) GetProcAddress(library, "grpc_alarm_destroy");
grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state");
grpc_channel_num_external_connectivity_watchers_import = (grpc_channel_num_external_connectivity_watchers_type) GetProcAddress(library, "grpc_channel_num_external_connectivity_watchers");
grpc_channel_watch_connectivity_state_import = (grpc_channel_watch_connectivity_state_type) GetProcAddress(library, "grpc_channel_watch_connectivity_state");
+ grpc_channel_support_connectivity_watcher_import = (grpc_channel_support_connectivity_watcher_type) GetProcAddress(library, "grpc_channel_support_connectivity_watcher");
grpc_channel_create_call_import = (grpc_channel_create_call_type) GetProcAddress(library, "grpc_channel_create_call");
grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping");
grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call");
@@ -467,11 +415,27 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_metadata_credentials_create_from_plugin_import = (grpc_metadata_credentials_create_from_plugin_type) GetProcAddress(library, "grpc_metadata_credentials_create_from_plugin");
grpc_secure_channel_create_import = (grpc_secure_channel_create_type) GetProcAddress(library, "grpc_secure_channel_create");
grpc_server_credentials_release_import = (grpc_server_credentials_release_type) GetProcAddress(library, "grpc_server_credentials_release");
+ grpc_ssl_server_certificate_config_create_import = (grpc_ssl_server_certificate_config_create_type) GetProcAddress(library, "grpc_ssl_server_certificate_config_create");
+ grpc_ssl_server_certificate_config_destroy_import = (grpc_ssl_server_certificate_config_destroy_type) GetProcAddress(library, "grpc_ssl_server_certificate_config_destroy");
grpc_ssl_server_credentials_create_import = (grpc_ssl_server_credentials_create_type) GetProcAddress(library, "grpc_ssl_server_credentials_create");
grpc_ssl_server_credentials_create_ex_import = (grpc_ssl_server_credentials_create_ex_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_ex");
+ grpc_ssl_server_credentials_create_options_using_config_import = (grpc_ssl_server_credentials_create_options_using_config_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_options_using_config");
+ grpc_ssl_server_credentials_create_options_using_config_fetcher_import = (grpc_ssl_server_credentials_create_options_using_config_fetcher_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_options_using_config_fetcher");
+ grpc_ssl_server_credentials_options_destroy_import = (grpc_ssl_server_credentials_options_destroy_type) GetProcAddress(library, "grpc_ssl_server_credentials_options_destroy");
+ grpc_ssl_server_credentials_create_with_options_import = (grpc_ssl_server_credentials_create_with_options_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_with_options");
grpc_server_add_secure_http2_port_import = (grpc_server_add_secure_http2_port_type) GetProcAddress(library, "grpc_server_add_secure_http2_port");
grpc_call_set_credentials_import = (grpc_call_set_credentials_type) GetProcAddress(library, "grpc_call_set_credentials");
grpc_server_credentials_set_auth_metadata_processor_import = (grpc_server_credentials_set_auth_metadata_processor_type) GetProcAddress(library, "grpc_server_credentials_set_auth_metadata_processor");
+ grpc_raw_byte_buffer_create_import = (grpc_raw_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_byte_buffer_create");
+ grpc_raw_compressed_byte_buffer_create_import = (grpc_raw_compressed_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_compressed_byte_buffer_create");
+ grpc_byte_buffer_copy_import = (grpc_byte_buffer_copy_type) GetProcAddress(library, "grpc_byte_buffer_copy");
+ grpc_byte_buffer_length_import = (grpc_byte_buffer_length_type) GetProcAddress(library, "grpc_byte_buffer_length");
+ grpc_byte_buffer_destroy_import = (grpc_byte_buffer_destroy_type) GetProcAddress(library, "grpc_byte_buffer_destroy");
+ grpc_byte_buffer_reader_init_import = (grpc_byte_buffer_reader_init_type) GetProcAddress(library, "grpc_byte_buffer_reader_init");
+ grpc_byte_buffer_reader_destroy_import = (grpc_byte_buffer_reader_destroy_type) GetProcAddress(library, "grpc_byte_buffer_reader_destroy");
+ grpc_byte_buffer_reader_next_import = (grpc_byte_buffer_reader_next_type) GetProcAddress(library, "grpc_byte_buffer_reader_next");
+ grpc_byte_buffer_reader_readall_import = (grpc_byte_buffer_reader_readall_type) GetProcAddress(library, "grpc_byte_buffer_reader_readall");
+ grpc_raw_byte_buffer_from_reader_import = (grpc_raw_byte_buffer_from_reader_type) GetProcAddress(library, "grpc_raw_byte_buffer_from_reader");
grpc_slice_ref_import = (grpc_slice_ref_type) GetProcAddress(library, "grpc_slice_ref");
grpc_slice_unref_import = (grpc_slice_unref_type) GetProcAddress(library, "grpc_slice_unref");
grpc_slice_copy_import = (grpc_slice_copy_type) GetProcAddress(library, "grpc_slice_copy");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index b64199be8e..b9b82e5e23 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -25,12 +25,11 @@
#include <windows.h>
-#include <grpc/byte_buffer.h>
-#include <grpc/census.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include <grpc/grpc_posix.h>
#include <grpc/grpc_security.h>
+#include <grpc/impl/codegen/byte_buffer.h>
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
@@ -47,127 +46,13 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
-typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_create_type)(grpc_slice *slices, size_t nslices);
-extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import;
-#define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import
-typedef grpc_byte_buffer *(*grpc_raw_compressed_byte_buffer_create_type)(grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
-extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import;
-#define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import
-typedef grpc_byte_buffer *(*grpc_byte_buffer_copy_type)(grpc_byte_buffer *bb);
-extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import;
-#define grpc_byte_buffer_copy grpc_byte_buffer_copy_import
-typedef size_t(*grpc_byte_buffer_length_type)(grpc_byte_buffer *bb);
-extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
-#define grpc_byte_buffer_length grpc_byte_buffer_length_import
-typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer);
-extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
-#define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import
-typedef int(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer);
-extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
-#define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import
-typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader);
-extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import;
-#define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import
-typedef int(*grpc_byte_buffer_reader_next_type)(grpc_byte_buffer_reader *reader, grpc_slice *slice);
-extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import;
-#define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import
-typedef grpc_slice(*grpc_byte_buffer_reader_readall_type)(grpc_byte_buffer_reader *reader);
-extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import;
-#define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import
-typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader);
-extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
-#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import
-typedef int(*census_initialize_type)(int features);
-extern census_initialize_type census_initialize_import;
-#define census_initialize census_initialize_import
-typedef void(*census_shutdown_type)(void);
-extern census_shutdown_type census_shutdown_import;
-#define census_shutdown census_shutdown_import
-typedef int(*census_supported_type)(void);
-extern census_supported_type census_supported_import;
-#define census_supported census_supported_import
-typedef int(*census_enabled_type)(void);
-extern census_enabled_type census_enabled_import;
-#define census_enabled census_enabled_import
-typedef census_context *(*census_context_create_type)(const census_context *base, const census_tag *tags, int ntags, census_context_status const **status);
-extern census_context_create_type census_context_create_import;
-#define census_context_create census_context_create_import
-typedef void(*census_context_destroy_type)(census_context *context);
-extern census_context_destroy_type census_context_destroy_import;
-#define census_context_destroy census_context_destroy_import
-typedef const census_context_status *(*census_context_get_status_type)(const census_context *context);
-extern census_context_get_status_type census_context_get_status_import;
-#define census_context_get_status census_context_get_status_import
-typedef void(*census_context_initialize_iterator_type)(const census_context *context, census_context_iterator *iterator);
-extern census_context_initialize_iterator_type census_context_initialize_iterator_import;
-#define census_context_initialize_iterator census_context_initialize_iterator_import
-typedef int(*census_context_next_tag_type)(census_context_iterator *iterator, census_tag *tag);
-extern census_context_next_tag_type census_context_next_tag_import;
-#define census_context_next_tag census_context_next_tag_import
-typedef int(*census_context_get_tag_type)(const census_context *context, const char *key, census_tag *tag);
-extern census_context_get_tag_type census_context_get_tag_import;
-#define census_context_get_tag census_context_get_tag_import
-typedef size_t(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size);
-extern census_context_encode_type census_context_encode_import;
-#define census_context_encode census_context_encode_import
-typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size);
-extern census_context_decode_type census_context_decode_import;
-#define census_context_decode census_context_decode_import
-typedef int(*census_trace_mask_type)(const census_context *context);
-extern census_trace_mask_type census_trace_mask_import;
-#define census_trace_mask census_trace_mask_import
-typedef void(*census_set_trace_mask_type)(int trace_mask);
-extern census_set_trace_mask_type census_set_trace_mask_import;
-#define census_set_trace_mask census_set_trace_mask_import
-typedef census_timestamp(*census_start_rpc_op_timestamp_type)(void);
-extern census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import;
-#define census_start_rpc_op_timestamp census_start_rpc_op_timestamp_import
-typedef census_context *(*census_start_client_rpc_op_type)(const census_context *context, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, const census_timestamp *start_time);
-extern census_start_client_rpc_op_type census_start_client_rpc_op_import;
-#define census_start_client_rpc_op census_start_client_rpc_op_import
-typedef void(*census_set_rpc_client_peer_type)(census_context *context, const char *peer);
-extern census_set_rpc_client_peer_type census_set_rpc_client_peer_import;
-#define census_set_rpc_client_peer census_set_rpc_client_peer_import
-typedef census_context *(*census_start_server_rpc_op_type)(const char *buffer, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, census_timestamp *start_time);
-extern census_start_server_rpc_op_type census_start_server_rpc_op_import;
-#define census_start_server_rpc_op census_start_server_rpc_op_import
-typedef census_context *(*census_start_op_type)(census_context *context, const char *family, const char *name, int trace_mask);
-extern census_start_op_type census_start_op_import;
-#define census_start_op census_start_op_import
-typedef void(*census_end_op_type)(census_context *context, int status);
-extern census_end_op_type census_end_op_import;
-#define census_end_op census_end_op_import
-typedef void(*census_trace_print_type)(census_context *context, uint32_t type, const char *buffer, size_t n);
-extern census_trace_print_type census_trace_print_import;
-#define census_trace_print census_trace_print_import
-typedef int(*census_trace_scan_start_type)(int consume);
-extern census_trace_scan_start_type census_trace_scan_start_import;
-#define census_trace_scan_start census_trace_scan_start_import
-typedef int(*census_get_trace_record_type)(census_trace_record *trace_record);
-extern census_get_trace_record_type census_get_trace_record_import;
-#define census_get_trace_record census_get_trace_record_import
-typedef void(*census_trace_scan_end_type)();
-extern census_trace_scan_end_type census_trace_scan_end_import;
-#define census_trace_scan_end census_trace_scan_end_import
-typedef int32_t(*census_define_resource_type)(const uint8_t *resource_pb, size_t resource_pb_size);
-extern census_define_resource_type census_define_resource_import;
-#define census_define_resource census_define_resource_import
-typedef void(*census_delete_resource_type)(int32_t resource_id);
-extern census_delete_resource_type census_delete_resource_import;
-#define census_delete_resource census_delete_resource_import
-typedef int32_t(*census_resource_id_type)(const char *name);
-extern census_resource_id_type census_resource_id_import;
-#define census_resource_id census_resource_id_import
-typedef void(*census_record_values_type)(census_context *context, census_value *values, size_t nvalues);
-extern census_record_values_type census_record_values_import;
-#define census_record_values census_record_values_import
typedef int(*grpc_compression_algorithm_parse_type)(grpc_slice value, grpc_compression_algorithm *algorithm);
extern grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import;
#define grpc_compression_algorithm_parse grpc_compression_algorithm_parse_import
-typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, char **name);
+typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, const char **name);
extern grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import;
#define grpc_compression_algorithm_name grpc_compression_algorithm_name_import
-typedef int(*grpc_stream_compression_algorithm_name_type)(grpc_stream_compression_algorithm algorithm, char **name);
+typedef int(*grpc_stream_compression_algorithm_name_type)(grpc_stream_compression_algorithm algorithm, const char **name);
extern grpc_stream_compression_algorithm_name_type grpc_stream_compression_algorithm_name_import;
#define grpc_stream_compression_algorithm_name grpc_stream_compression_algorithm_name_import
typedef grpc_compression_algorithm(*grpc_compression_algorithm_for_level_type)(grpc_compression_level level, uint32_t accepted_encodings);
@@ -242,13 +127,22 @@ extern grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import
typedef void(*grpc_completion_queue_destroy_type)(grpc_completion_queue *cq);
extern grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import;
#define grpc_completion_queue_destroy grpc_completion_queue_destroy_import
-typedef grpc_alarm *(*grpc_alarm_create_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *tag);
+typedef void(*grpc_completion_queue_thread_local_cache_init_type)(grpc_completion_queue *cq);
+extern grpc_completion_queue_thread_local_cache_init_type grpc_completion_queue_thread_local_cache_init_import;
+#define grpc_completion_queue_thread_local_cache_init grpc_completion_queue_thread_local_cache_init_import
+typedef int(*grpc_completion_queue_thread_local_cache_flush_type)(grpc_completion_queue *cq, void **tag, int *ok);
+extern grpc_completion_queue_thread_local_cache_flush_type grpc_completion_queue_thread_local_cache_flush_import;
+#define grpc_completion_queue_thread_local_cache_flush grpc_completion_queue_thread_local_cache_flush_import
+typedef grpc_alarm *(*grpc_alarm_create_type)(void *reserved);
extern grpc_alarm_create_type grpc_alarm_create_import;
#define grpc_alarm_create grpc_alarm_create_import
-typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm);
+typedef void(*grpc_alarm_set_type)(grpc_alarm *alarm, grpc_completion_queue *cq, gpr_timespec deadline, void *tag, void *reserved);
+extern grpc_alarm_set_type grpc_alarm_set_import;
+#define grpc_alarm_set grpc_alarm_set_import
+typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm, void *reserved);
extern grpc_alarm_cancel_type grpc_alarm_cancel_import;
#define grpc_alarm_cancel grpc_alarm_cancel_import
-typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm);
+typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm, void *reserved);
extern grpc_alarm_destroy_type grpc_alarm_destroy_import;
#define grpc_alarm_destroy grpc_alarm_destroy_import
typedef grpc_connectivity_state(*grpc_channel_check_connectivity_state_type)(grpc_channel *channel, int try_to_connect);
@@ -260,6 +154,9 @@ extern grpc_channel_num_external_connectivity_watchers_type grpc_channel_num_ext
typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag);
extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import;
#define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import
+typedef int(*grpc_channel_support_connectivity_watcher_type)(grpc_channel *channel);
+extern grpc_channel_support_connectivity_watcher_type grpc_channel_support_connectivity_watcher_import;
+#define grpc_channel_support_connectivity_watcher grpc_channel_support_connectivity_watcher_import
typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, grpc_slice method, const grpc_slice *host, gpr_timespec deadline, void *reserved);
extern grpc_channel_create_call_type grpc_channel_create_call_import;
#define grpc_channel_create_call grpc_channel_create_call_import
@@ -464,12 +361,30 @@ extern grpc_secure_channel_create_type grpc_secure_channel_create_import;
typedef void(*grpc_server_credentials_release_type)(grpc_server_credentials *creds);
extern grpc_server_credentials_release_type grpc_server_credentials_release_import;
#define grpc_server_credentials_release grpc_server_credentials_release_import
+typedef grpc_ssl_server_certificate_config *(*grpc_ssl_server_certificate_config_create_type)(const char *pem_root_certs, const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs);
+extern grpc_ssl_server_certificate_config_create_type grpc_ssl_server_certificate_config_create_import;
+#define grpc_ssl_server_certificate_config_create grpc_ssl_server_certificate_config_create_import
+typedef void(*grpc_ssl_server_certificate_config_destroy_type)(grpc_ssl_server_certificate_config *config);
+extern grpc_ssl_server_certificate_config_destroy_type grpc_ssl_server_certificate_config_destroy_import;
+#define grpc_ssl_server_certificate_config_destroy grpc_ssl_server_certificate_config_destroy_import
typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, int force_client_auth, void *reserved);
extern grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import;
#define grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create_import
typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_ex_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, void *reserved);
extern grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import;
#define grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex_import
+typedef grpc_ssl_server_credentials_options *(*grpc_ssl_server_credentials_create_options_using_config_type)(grpc_ssl_client_certificate_request_type client_certificate_request, grpc_ssl_server_certificate_config *certificate_config);
+extern grpc_ssl_server_credentials_create_options_using_config_type grpc_ssl_server_credentials_create_options_using_config_import;
+#define grpc_ssl_server_credentials_create_options_using_config grpc_ssl_server_credentials_create_options_using_config_import
+typedef grpc_ssl_server_credentials_options *(*grpc_ssl_server_credentials_create_options_using_config_fetcher_type)(grpc_ssl_client_certificate_request_type client_certificate_request, grpc_ssl_server_certificate_config_callback cb, void *user_data);
+extern grpc_ssl_server_credentials_create_options_using_config_fetcher_type grpc_ssl_server_credentials_create_options_using_config_fetcher_import;
+#define grpc_ssl_server_credentials_create_options_using_config_fetcher grpc_ssl_server_credentials_create_options_using_config_fetcher_import
+typedef void(*grpc_ssl_server_credentials_options_destroy_type)(grpc_ssl_server_credentials_options *options);
+extern grpc_ssl_server_credentials_options_destroy_type grpc_ssl_server_credentials_options_destroy_import;
+#define grpc_ssl_server_credentials_options_destroy grpc_ssl_server_credentials_options_destroy_import
+typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_with_options_type)(grpc_ssl_server_credentials_options *options);
+extern grpc_ssl_server_credentials_create_with_options_type grpc_ssl_server_credentials_create_with_options_import;
+#define grpc_ssl_server_credentials_create_with_options grpc_ssl_server_credentials_create_with_options_import
typedef int(*grpc_server_add_secure_http2_port_type)(grpc_server *server, const char *addr, grpc_server_credentials *creds);
extern grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import;
#define grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port_import
@@ -479,6 +394,36 @@ extern grpc_call_set_credentials_type grpc_call_set_credentials_import;
typedef void(*grpc_server_credentials_set_auth_metadata_processor_type)(grpc_server_credentials *creds, grpc_auth_metadata_processor processor);
extern grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import;
#define grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor_import
+typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_create_type)(grpc_slice *slices, size_t nslices);
+extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import;
+#define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import
+typedef grpc_byte_buffer *(*grpc_raw_compressed_byte_buffer_create_type)(grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression);
+extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import;
+#define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import
+typedef grpc_byte_buffer *(*grpc_byte_buffer_copy_type)(grpc_byte_buffer *bb);
+extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import;
+#define grpc_byte_buffer_copy grpc_byte_buffer_copy_import
+typedef size_t(*grpc_byte_buffer_length_type)(grpc_byte_buffer *bb);
+extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import;
+#define grpc_byte_buffer_length grpc_byte_buffer_length_import
+typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer);
+extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import;
+#define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import
+typedef int(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer);
+extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import;
+#define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import
+typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader);
+extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import;
+#define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import
+typedef int(*grpc_byte_buffer_reader_next_type)(grpc_byte_buffer_reader *reader, grpc_slice *slice);
+extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import;
+#define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import
+typedef grpc_slice(*grpc_byte_buffer_reader_readall_type)(grpc_byte_buffer_reader *reader);
+extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import;
+#define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import
+typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader);
+extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import;
+#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import
typedef grpc_slice(*grpc_slice_ref_type)(grpc_slice s);
extern grpc_slice_ref_type grpc_slice_ref_import;
#define grpc_slice_ref grpc_slice_ref_import
diff --git a/src/ruby/lib/grpc.rb b/src/ruby/lib/grpc.rb
index 98bfc0a0fa..37b0392072 100644
--- a/src/ruby/lib/grpc.rb
+++ b/src/ruby/lib/grpc.rb
@@ -24,6 +24,7 @@ require_relative 'grpc/generic/active_call'
require_relative 'grpc/generic/client_stub'
require_relative 'grpc/generic/service'
require_relative 'grpc/generic/rpc_server'
+require_relative 'grpc/generic/interceptors'
begin
file = File.open(ssl_roots_path)
diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb
index 10eb70b4a7..8c3aa284aa 100644
--- a/src/ruby/lib/grpc/generic/active_call.rb
+++ b/src/ruby/lib/grpc/generic/active_call.rb
@@ -154,6 +154,15 @@ module GRPC
Operation.new(self)
end
+ ##
+ # Returns a restricted view of this ActiveCall for use in interceptors
+ #
+ # @return [InterceptableView]
+ #
+ def interceptable
+ InterceptableView.new(self)
+ end
+
def receive_and_check_status
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
set_input_stream_done
@@ -515,15 +524,27 @@ module GRPC
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
- # @param gen_each_reply [Proc] generates the BiDi stream replies
- def run_server_bidi(gen_each_reply)
- bd = BidiCall.new(@call,
- @marshal,
- @unmarshal,
- metadata_received: @metadata_received,
- req_view: MultiReqView.new(self))
-
- bd.run_on_server(gen_each_reply, proc { set_input_stream_done })
+ # @param mth [Proc] generates the BiDi stream replies
+ # @param interception_ctx [InterceptionContext]
+ #
+ def run_server_bidi(mth, interception_ctx)
+ view = multi_req_view
+ bidi_call = BidiCall.new(
+ @call,
+ @marshal,
+ @unmarshal,
+ metadata_received: @metadata_received,
+ req_view: view
+ )
+ requests = bidi_call.read_next_loop(proc { set_input_stream_done }, false)
+ interception_ctx.intercept!(
+ :bidi_streamer,
+ call: view,
+ method: mth,
+ requests: requests
+ ) do
+ bidi_call.run_on_server(mth, requests)
+ end
end
# Waits till an operation completes
@@ -645,5 +666,9 @@ module GRPC
Operation = view_class(:cancel, :cancelled?, :deadline, :execute,
:metadata, :status, :start_call, :wait, :write_flag,
:write_flag=, :trailing_metadata)
+
+ # InterceptableView further limits access to an ActiveCall's methods
+ # for use in interceptors on the client, exposing only the deadline
+ InterceptableView = view_class(:deadline)
end
end
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index c2239d0178..3bdcc0062e 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -87,23 +87,32 @@ module GRPC
# This does not mean that must necessarily be one. E.g, the replies
# produced by gen_each_reply could ignore the received_msgs
#
- # @param gen_each_reply [Proc] generates the BiDi stream replies.
- # @param set_input_steam_done [Proc] call back to call when
- # the reads have been completely read through.
- def run_on_server(gen_each_reply, set_input_stream_done)
+ # @param [Proc] gen_each_reply generates the BiDi stream replies.
+ # @param [Enumerable] requests The enumerable of requests to run
+ def run_on_server(gen_each_reply, requests)
+ replies = nil
+
# Pass in the optional call object parameter if possible
if gen_each_reply.arity == 1
- replys = gen_each_reply.call(
- read_loop(set_input_stream_done, is_client: false))
+ replies = gen_each_reply.call(requests)
elsif gen_each_reply.arity == 2
- replys = gen_each_reply.call(
- read_loop(set_input_stream_done, is_client: false),
- @req_view)
+ replies = gen_each_reply.call(requests, @req_view)
else
fail 'Illegal arity of reply generator'
end
- write_loop(replys, is_client: false)
+ write_loop(replies, is_client: false)
+ end
+
+ ##
+ # Read the next stream iteration
+ #
+ # @param [Proc] finalize_stream callback to call when the reads have been
+ # completely read through.
+ # @param [Boolean] is_client If this is a client or server request
+ #
+ def read_next_loop(finalize_stream, is_client = false)
+ read_loop(finalize_stream, is_client: is_client)
end
private
diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb
index 75a95a4e94..9a50f8a99d 100644
--- a/src/ruby/lib/grpc/generic/client_stub.rb
+++ b/src/ruby/lib/grpc/generic/client_stub.rb
@@ -89,17 +89,23 @@ module GRPC
# used within a gRPC server.
# @param channel_args [Hash] the channel arguments. Note: this argument is
# ignored if the channel_override argument is provided.
+ # @param interceptors [Array<GRPC::ClientInterceptor>] An array of
+ # GRPC::ClientInterceptor objects that will be used for
+ # intercepting calls before they are executed
+ # Interceptors are an EXPERIMENTAL API.
def initialize(host, creds,
channel_override: nil,
timeout: nil,
propagate_mask: nil,
- channel_args: {})
+ channel_args: {},
+ interceptors: [])
@ch = ClientStub.setup_channel(channel_override, host, creds,
channel_args)
alt_host = channel_args[Core::Channel::SSL_TARGET]
@host = alt_host.nil? ? host : alt_host
@propagate_mask = propagate_mask
@timeout = timeout.nil? ? DEFAULT_TIMEOUT : timeout
+ @interceptors = InterceptorRegistry.new(interceptors)
end
# request_response sends a request to a GRPC server, and returns the
@@ -149,16 +155,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
- return c.request_response(req, metadata: metadata) unless return_op
-
- # return the operation view of the active_call; define #execute as a
- # new method for this instance that invokes #request_response.
- c.merge_metadata_to_send(metadata)
- op = c.operation
- op.define_singleton_method(:execute) do
- c.request_response(req, metadata: metadata)
+ interception_context = @interceptors.build_context
+ intercept_args = {
+ method: method,
+ request: req,
+ call: c.interceptable,
+ metadata: metadata
+ }
+ if return_op
+ # return the operation view of the active_call; define #execute as a
+ # new method for this instance that invokes #request_response.
+ c.merge_metadata_to_send(metadata)
+ op = c.operation
+ op.define_singleton_method(:execute) do
+ interception_context.intercept!(:request_response, intercept_args) do
+ c.request_response(req, metadata: metadata)
+ end
+ end
+ op
+ else
+ interception_context.intercept!(:request_response, intercept_args) do
+ c.request_response(req, metadata: metadata)
+ end
end
- op
end
# client_streamer sends a stream of requests to a GRPC server, and
@@ -213,16 +232,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
- return c.client_streamer(requests, metadata: metadata) unless return_op
-
- # return the operation view of the active_call; define #execute as a
- # new method for this instance that invokes #client_streamer.
- c.merge_metadata_to_send(metadata)
- op = c.operation
- op.define_singleton_method(:execute) do
- c.client_streamer(requests)
+ interception_context = @interceptors.build_context
+ intercept_args = {
+ method: method,
+ requests: requests,
+ call: c.interceptable,
+ metadata: metadata
+ }
+ if return_op
+ # return the operation view of the active_call; define #execute as a
+ # new method for this instance that invokes #client_streamer.
+ c.merge_metadata_to_send(metadata)
+ op = c.operation
+ op.define_singleton_method(:execute) do
+ interception_context.intercept!(:client_streamer, intercept_args) do
+ c.client_streamer(requests)
+ end
+ end
+ op
+ else
+ interception_context.intercept!(:client_streamer, intercept_args) do
+ c.client_streamer(requests, metadata: metadata)
+ end
end
- op
end
# server_streamer sends one request to the GRPC server, which yields a
@@ -292,16 +324,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
- return c.server_streamer(req, metadata: metadata, &blk) unless return_op
-
- # return the operation view of the active_call; define #execute
- # as a new method for this instance that invokes #server_streamer
- c.merge_metadata_to_send(metadata)
- op = c.operation
- op.define_singleton_method(:execute) do
- c.server_streamer(req, &blk)
+ interception_context = @interceptors.build_context
+ intercept_args = {
+ method: method,
+ request: req,
+ call: c.interceptable,
+ metadata: metadata
+ }
+ if return_op
+ # return the operation view of the active_call; define #execute
+ # as a new method for this instance that invokes #server_streamer
+ c.merge_metadata_to_send(metadata)
+ op = c.operation
+ op.define_singleton_method(:execute) do
+ interception_context.intercept!(:server_streamer, intercept_args) do
+ c.server_streamer(req, &blk)
+ end
+ end
+ op
+ else
+ interception_context.intercept!(:server_streamer, intercept_args) do
+ c.server_streamer(req, metadata: metadata, &blk)
+ end
end
- op
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
@@ -405,17 +450,29 @@ module GRPC
deadline: deadline,
parent: parent,
credentials: credentials)
- return c.bidi_streamer(requests, metadata: metadata,
- &blk) unless return_op
-
- # return the operation view of the active_call; define #execute
- # as a new method for this instance that invokes #bidi_streamer
- c.merge_metadata_to_send(metadata)
- op = c.operation
- op.define_singleton_method(:execute) do
- c.bidi_streamer(requests, &blk)
+ interception_context = @interceptors.build_context
+ intercept_args = {
+ method: method,
+ requests: requests,
+ call: c.interceptable,
+ metadata: metadata
+ }
+ if return_op
+ # return the operation view of the active_call; define #execute
+ # as a new method for this instance that invokes #bidi_streamer
+ c.merge_metadata_to_send(metadata)
+ op = c.operation
+ op.define_singleton_method(:execute) do
+ interception_context.intercept!(:bidi_streamer, intercept_args) do
+ c.bidi_streamer(requests, &blk)
+ end
+ end
+ op
+ else
+ interception_context.intercept!(:bidi_streamer, intercept_args) do
+ c.bidi_streamer(requests, metadata: metadata, &blk)
+ end
end
- op
end
private
diff --git a/src/ruby/lib/grpc/generic/interceptor_registry.rb b/src/ruby/lib/grpc/generic/interceptor_registry.rb
new file mode 100644
index 0000000000..b241eb9a86
--- /dev/null
+++ b/src/ruby/lib/grpc/generic/interceptor_registry.rb
@@ -0,0 +1,53 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# GRPC contains the General RPC module.
+module GRPC
+ ##
+ # Represents a registry of added interceptors available for enumeration.
+ # The registry can be used for both server and client interceptors.
+ # This class is internal to gRPC and not meant for public usage.
+ #
+ class InterceptorRegistry
+ ##
+ # An error raised when an interceptor is attempted to be added
+ # that does not extend GRPC::Interceptor
+ #
+ class DescendantError < StandardError; end
+
+ ##
+ # Initialize the registry with an empty interceptor list
+ # This is an EXPERIMENTAL API.
+ #
+ def initialize(interceptors = [])
+ @interceptors = []
+ interceptors.each do |i|
+ base = GRPC::Interceptor
+ unless i.class.ancestors.include?(base)
+ fail DescendantError, "Interceptors must descend from #{base}"
+ end
+ @interceptors << i
+ end
+ end
+
+ ##
+ # Builds an interception context from this registry
+ #
+ # @return [InterceptionContext]
+ #
+ def build_context
+ InterceptionContext.new(@interceptors)
+ end
+ end
+end
diff --git a/src/ruby/lib/grpc/generic/interceptors.rb b/src/ruby/lib/grpc/generic/interceptors.rb
new file mode 100644
index 0000000000..73faec4b9c
--- /dev/null
+++ b/src/ruby/lib/grpc/generic/interceptors.rb
@@ -0,0 +1,186 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+require_relative 'interceptor_registry'
+
+# GRPC contains the General RPC module.
+module GRPC
+ ##
+ # Base class for interception in GRPC
+ #
+ class Interceptor
+ ##
+ # @param [Hash] options A hash of options that will be used
+ # by the interceptor. This is an EXPERIMENTAL API.
+ #
+ def initialize(options = {})
+ @options = options || {}
+ end
+ end
+
+ ##
+ # ClientInterceptor allows for wrapping outbound gRPC client stub requests.
+ # This is an EXPERIMENTAL API.
+ #
+ class ClientInterceptor < Interceptor
+ ##
+ # Intercept a unary request response call
+ #
+ # @param [Object] request
+ # @param [GRPC::ActiveCall] call
+ # @param [Method] method
+ # @param [Hash] metadata
+ #
+ def request_response(request:, call:, method:, metadata:)
+ GRPC.logger.debug "Intercepting request response method #{method}" \
+ " for request #{request} with call #{call} and metadata: #{metadata}"
+ yield
+ end
+
+ ##
+ # Intercept a client streaming call
+ #
+ # @param [Enumerable] requests
+ # @param [GRPC::ActiveCall] call
+ # @param [Method] method
+ # @param [Hash] metadata
+ #
+ def client_streamer(requests:, call:, method:, metadata:)
+ GRPC.logger.debug "Intercepting client streamer method #{method}" \
+ " for requests #{requests} with call #{call} and metadata: #{metadata}"
+ yield
+ end
+
+ ##
+ # Intercept a server streaming call
+ #
+ # @param [Object] request
+ # @param [GRPC::ActiveCall] call
+ # @param [Method] method
+ # @param [Hash] metadata
+ #
+ def server_streamer(request:, call:, method:, metadata:)
+ GRPC.logger.debug "Intercepting server streamer method #{method}" \
+ " for request #{request} with call #{call} and metadata: #{metadata}"
+ yield
+ end
+
+ ##
+ # Intercept a BiDi streaming call
+ #
+ # @param [Enumerable] requests
+ # @param [GRPC::ActiveCall] call
+ # @param [Method] method
+ # @param [Hash] metadata
+ #
+ def bidi_streamer(requests:, call:, method:, metadata:)
+ GRPC.logger.debug "Intercepting bidi streamer method #{method}" \
+ " for requests #{requests} with call #{call} and metadata: #{metadata}"
+ yield
+ end
+ end
+
+ ##
+ # ServerInterceptor allows for wrapping gRPC server execution handling.
+ # This is an EXPERIMENTAL API.
+ #
+ class ServerInterceptor < Interceptor
+ ##
+ # Intercept a unary request response call.
+ #
+ # @param [Object] request
+ # @param [GRPC::ActiveCall::SingleReqView] call
+ # @param [Method] method
+ #
+ def request_response(request:, call:, method:)
+ GRPC.logger.debug "Intercepting request response method #{method}" \
+ " for request #{request} with call #{call}"
+ yield
+ end
+
+ ##
+ # Intercept a client streaming call
+ #
+ # @param [GRPC::ActiveCall::MultiReqView] call
+ # @param [Method] method
+ #
+ def client_streamer(call:, method:)
+ GRPC.logger.debug "Intercepting client streamer method #{method}" \
+ " with call #{call}"
+ yield
+ end
+
+ ##
+ # Intercept a server streaming call
+ #
+ # @param [Object] request
+ # @param [GRPC::ActiveCall::SingleReqView] call
+ # @param [Method] method
+ #
+ def server_streamer(request:, call:, method:)
+ GRPC.logger.debug "Intercepting server streamer method #{method}" \
+ " for request #{request} with call #{call}"
+ yield
+ end
+
+ ##
+ # Intercept a BiDi streaming call
+ #
+ # @param [Enumerable<Object>] requests
+ # @param [GRPC::ActiveCall::MultiReqView] call
+ # @param [Method] method
+ #
+ def bidi_streamer(requests:, call:, method:)
+ GRPC.logger.debug "Intercepting bidi streamer method #{method}" \
+ " for requests #{requests} with call #{call}"
+ yield
+ end
+ end
+
+ ##
+ # Represents the context in which an interceptor runs. Used to provide an
+ # injectable mechanism for handling interception. This is an EXPERIMENTAL API.
+ #
+ class InterceptionContext
+ ##
+ # @param [Array<GRPC::Interceptor>]
+ #
+ def initialize(interceptors = [])
+ @interceptors = interceptors.dup
+ end
+
+ ##
+ # Intercept the call and fire out to interceptors in a FIFO execution.
+ # This is an EXPERIMENTAL API.
+ #
+ # @param [Symbol] type The request type
+ # @param [Hash] args The arguments for the call
+ #
+ def intercept!(type, args = {})
+ return yield if @interceptors.none?
+
+ i = @interceptors.pop
+ return yield unless i
+
+ i.send(type, args) do
+ if @interceptors.any?
+ intercept!(type, args) do
+ yield
+ end
+ else
+ yield
+ end
+ end
+ end
+ end
+end
diff --git a/src/ruby/lib/grpc/generic/rpc_desc.rb b/src/ruby/lib/grpc/generic/rpc_desc.rb
index 6fb6c412fb..5fd1805aab 100644
--- a/src/ruby/lib/grpc/generic/rpc_desc.rb
+++ b/src/ruby/lib/grpc/generic/rpc_desc.rb
@@ -47,43 +47,85 @@ module GRPC
proc { |o| unmarshal_class.method(unmarshal_method).call(o) }
end
- def handle_request_response(active_call, mth)
+ def handle_request_response(active_call, mth, inter_ctx)
req = active_call.read_unary_request
- resp = mth.call(req, active_call.single_req_view)
- active_call.server_unary_response(
- resp, trailing_metadata: active_call.output_metadata)
+ call = active_call.single_req_view
+
+ inter_ctx.intercept!(
+ :request_response,
+ method: mth,
+ call: call,
+ request: req
+ ) do
+ resp = mth.call(req, call)
+ active_call.server_unary_response(
+ resp,
+ trailing_metadata: active_call.output_metadata
+ )
+ end
end
- def handle_client_streamer(active_call, mth)
- resp = mth.call(active_call.multi_req_view)
- active_call.server_unary_response(
- resp, trailing_metadata: active_call.output_metadata)
+ def handle_client_streamer(active_call, mth, inter_ctx)
+ call = active_call.multi_req_view
+
+ inter_ctx.intercept!(
+ :client_streamer,
+ method: mth,
+ call: call
+ ) do
+ resp = mth.call(call)
+ active_call.server_unary_response(
+ resp,
+ trailing_metadata: active_call.output_metadata
+ )
+ end
end
- def handle_server_streamer(active_call, mth)
+ def handle_server_streamer(active_call, mth, inter_ctx)
req = active_call.read_unary_request
- replys = mth.call(req, active_call.single_req_view)
- replys.each { |r| active_call.remote_send(r) }
- send_status(active_call, OK, 'OK', active_call.output_metadata)
+ call = active_call.single_req_view
+
+ inter_ctx.intercept!(
+ :server_streamer,
+ method: mth,
+ call: call,
+ request: req
+ ) do
+ replies = mth.call(req, call)
+ replies.each { |r| active_call.remote_send(r) }
+ send_status(active_call, OK, 'OK', active_call.output_metadata)
+ end
end
- def handle_bidi_streamer(active_call, mth)
- active_call.run_server_bidi(mth)
+ ##
+ # @param [GRPC::ActiveCall] active_call
+ # @param [Method] mth
+ # @param [Array<GRPC::InterceptionContext>] inter_ctx
+ #
+ def handle_bidi_streamer(active_call, mth, inter_ctx)
+ active_call.run_server_bidi(mth, inter_ctx)
send_status(active_call, OK, 'OK', active_call.output_metadata)
end
- def run_server_method(active_call, mth)
+ ##
+ # @param [GRPC::ActiveCall] active_call The current active call object
+ # for the request
+ # @param [Method] mth The current RPC method being called
+ # @param [GRPC::InterceptionContext] inter_ctx The interception context
+ # being executed
+ #
+ def run_server_method(active_call, mth, inter_ctx = InterceptionContext.new)
# While a server method is running, it might be cancelled, its deadline
# might be reached, the handler could throw an unknown error, or a
# well-behaved handler could throw a StatusError.
if request_response?
- handle_request_response(active_call, mth)
+ handle_request_response(active_call, mth, inter_ctx)
elsif client_streamer?
- handle_client_streamer(active_call, mth)
+ handle_client_streamer(active_call, mth, inter_ctx)
elsif server_streamer?
- handle_server_streamer(active_call, mth)
+ handle_server_streamer(active_call, mth, inter_ctx)
else # is a bidi_stream
- handle_bidi_streamer(active_call, mth)
+ handle_bidi_streamer(active_call, mth, inter_ctx)
end
rescue BadStatus => e
# this is raised by handlers that want GRPC to send an application error
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index 33b3cea1fc..d5fc11dc1c 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -196,11 +196,18 @@ module GRPC
#
# * server_args:
# A server arguments hash to be passed down to the underlying core server
+ #
+ # * interceptors:
+ # Am array of GRPC::ServerInterceptor objects that will be used for
+ # intercepting server handlers to provide extra functionality.
+ # Interceptors are an EXPERIMENTAL API.
+ #
def initialize(pool_size:DEFAULT_POOL_SIZE,
max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS,
poll_period:DEFAULT_POLL_PERIOD,
connect_md_proc:nil,
- server_args:{})
+ server_args:{},
+ interceptors:[])
@connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc)
@max_waiting_requests = max_waiting_requests
@poll_period = poll_period
@@ -212,6 +219,7 @@ module GRPC
# :stopped. State transitions can only proceed in that order.
@running_state = :not_started
@server = Core::Server.new(server_args)
+ @interceptors = InterceptorRegistry.new(interceptors)
end
# stops a running server
@@ -374,7 +382,11 @@ module GRPC
@pool.schedule(active_call) do |ac|
c, mth = ac
begin
- rpc_descs[mth].run_server_method(c, rpc_handlers[mth])
+ rpc_descs[mth].run_server_method(
+ c,
+ rpc_handlers[mth],
+ @interceptors.build_context
+ )
rescue StandardError
c.send_status(GRPC::Core::StatusCodes::INTERNAL,
'Server handler failed')
@@ -382,7 +394,7 @@ module GRPC
end
end
rescue Core::CallError, RuntimeError => e
- # these might happen for various reasonse. The correct behaviour of
+ # these might happen for various reasons. The correct behavior of
# the server is to log them and continue, if it's not shutting down.
if running_state == :running
GRPC.logger.warn("server call failed: #{e}")
diff --git a/src/ruby/lib/grpc/google_rpc_status_utils.rb b/src/ruby/lib/grpc/google_rpc_status_utils.rb
new file mode 100644
index 0000000000..f253b082b6
--- /dev/null
+++ b/src/ruby/lib/grpc/google_rpc_status_utils.rb
@@ -0,0 +1,35 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require_relative './grpc'
+require 'google/rpc/status_pb'
+
+# GRPC contains the General RPC module.
+module GRPC
+ # GoogleRpcStatusUtils provides utilities to convert between a
+ # GRPC::Core::Status and a deserialized Google::Rpc::Status proto
+ # Returns nil if the grpc-status-details-bin trailer could not be
+ # converted to a GoogleRpcStatus due to the server not providing
+ # the necessary trailers.
+ # Raises an error if the server did provide the necessary trailers
+ # but they fail to deseriliaze into a GoogleRpcStatus protobuf.
+ class GoogleRpcStatusUtils
+ def self.extract_google_rpc_status(status)
+ fail ArgumentError, 'bad type' unless status.is_a? Struct::Status
+ grpc_status_details_bin_trailer = 'grpc-status-details-bin'
+ return nil if status.metadata[grpc_status_details_bin_trailer].nil?
+ Google::Rpc::Status.decode(status.metadata[grpc_status_details_bin_trailer])
+ end
+ end
+end
diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb
index 228c01a92c..3001579ce7 100644
--- a/src/ruby/lib/grpc/version.rb
+++ b/src/ruby/lib/grpc/version.rb
@@ -14,5 +14,5 @@
# GRPC contains the General RPC module.
module GRPC
- VERSION = '1.7.0.dev'
+ VERSION = '1.8.0.dev'
end
diff --git a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb
index 683370121e..ab50d9b3a5 100644
--- a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb
+++ b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb
@@ -34,6 +34,7 @@ module Grpc
self.service_name = 'grpc.testing.duplicate.EchoTestService'
rpc :Echo, Grpc::Testing::EchoRequest, Grpc::Testing::EchoResponse
+ rpc :ResponseStream, Grpc::Testing::EchoRequest, stream(Grpc::Testing::EchoResponse)
end
Stub = Service.rpc_stub_class
diff --git a/src/ruby/qps/histogram.rb b/src/ruby/qps/histogram.rb
index 1a27e17218..e48fb842be 100644
--- a/src/ruby/qps/histogram.rb
+++ b/src/ruby/qps/histogram.rb
@@ -16,6 +16,8 @@
# Histogram class for use in performance testing and measurement
+require 'thread'
+
class Histogram
# Determine the bucket index for a given value
# @param {number} value The value to check
@@ -27,6 +29,7 @@ class Histogram
# @param {number} resolution The resolution of the histogram
# @param {number} max_possible The maximum value for the histogram
def initialize(resolution, max_possible)
+ @lock = Mutex.new
@resolution=resolution
@max_possible=max_possible
@sum=0
@@ -70,4 +73,16 @@ class Histogram
def contents
@buckets
end
+
+ def merge(hist)
+ @lock.synchronize do
+ @min_seen = hist.min_seen
+ @max_seen = hist.max_seen
+ @sum += hist.sum
+ @sum_of_squares += hist.sum_of_squares
+ @count += hist.count
+ received_bucket = hist.bucket.to_a
+ @buckets = @buckets.map.with_index{ |m,i| m + received_bucket[i].to_i }
+ end
+ end
end
diff --git a/src/ruby/qps/proxy-worker.rb b/src/ruby/qps/proxy-worker.rb
index d7a9f114bd..4c7c510fdb 100755
--- a/src/ruby/qps/proxy-worker.rb
+++ b/src/ruby/qps/proxy-worker.rb
@@ -31,8 +31,10 @@ require 'src/proto/grpc/testing/services_services_pb'
require 'src/proto/grpc/testing/proxy-service_services_pb'
class ProxyBenchmarkClientServiceImpl < Grpc::Testing::ProxyClientService::Service
- def initialize(port)
+ def initialize(port, c_ext, php_client_bin)
@mytarget = "localhost:" + port.to_s
+ @use_c_ext = c_ext
+ @php_client_bin = php_client_bin
end
def setup(config)
@config = config
@@ -40,26 +42,49 @@ class ProxyBenchmarkClientServiceImpl < Grpc::Testing::ProxyClientService::Servi
@histmax = config.histogram_params.max_possible
@histogram = Histogram.new(@histres, @histmax)
@start_time = Time.now
- # TODO(vjpai): Support multiple client channels by spawning off a PHP client per channel
- command = "php " + File.expand_path(File.dirname(__FILE__)) + "/../../php/tests/qps/client.php " + @mytarget
- puts "Starting command: " + command
- @php_pid = spawn(command)
+ @php_pid = Array.new(@config.client_channels)
+ (0..@config.client_channels-1).each do |chan|
+ Thread.new {
+ if @use_c_ext
+ puts "Use protobuf c extension"
+ command = "php -d extension=" + File.expand_path(File.dirname(__FILE__)) +
+ "/../../php/tests/qps/vendor/google/protobuf/php/ext/google/protobuf/modules/protobuf.so " +
+ "-d extension=" + File.expand_path(File.dirname(__FILE__)) + "/../../php/ext/grpc/modules/grpc.so " +
+ File.expand_path(File.dirname(__FILE__)) + "/" + @php_client_bin + " " + @mytarget + " #{chan%@config.server_targets.length}"
+ else
+ puts "Use protobuf php extension"
+ command = "php -d extension=" + File.expand_path(File.dirname(__FILE__)) + "/../../php/ext/grpc/modules/grpc.so " +
+ File.expand_path(File.dirname(__FILE__)) + "/" + @php_client_bin + " " + @mytarget + " #{chan%@config.server_targets.length}"
+ end
+ puts "[ruby proxy] Starting #{chan}th php-client command use c protobuf #{@use_c_ext}: " + command
+ @php_pid[chan] = spawn(command)
+ while true
+ sleep
+ end
+ }
+ end
end
def stop
- Process.kill("TERM", @php_pid)
- Process.wait(@php_pid)
+ (0..@config.client_channels-1).each do |chan|
+ Process.kill("TERM", @php_pid[chan])
+ Process.wait(@php_pid[chan])
+ end
end
def get_config(_args, _call)
- puts "Answering get_config"
@config
end
def report_time(call)
- puts "Starting a time reporting stream"
call.each_remote_read do |lat|
@histogram.add((lat.latency)*1e9)
end
Grpc::Testing::Void.new
end
+ def report_hist(call)
+ call.each_remote_read do |lat|
+ @histogram.merge(lat)
+ end
+ Grpc::Testing::Void.new
+ end
def mark(reset)
lat = Grpc::Testing::HistogramData.new(
bucket: @histogram.contents,
@@ -121,22 +146,31 @@ end
def proxymain
options = {
- 'driver_port' => 0
+ 'driver_port' => 0,
+ 'php_client_bin' => '../../php/tests/qps/client.php'
}
OptionParser.new do |opts|
opts.banner = 'Usage: [--driver_port <port>]'
opts.on('--driver_port PORT', '<port>') do |v|
options['driver_port'] = v
end
+ opts.on("-c", "--[no-]use_protobuf_c_extension", "Use protobuf C-extention") do |c|
+ options[:c_ext] = c
+ end
+ opts.on("-b", "--php_client_bin [FILE]",
+ "PHP client to execute; path relative to this script") do |c|
+ options['php_client_bin'] = c
+ end
end.parse!
# Configure any errors with client or server child threads to surface
Thread.abort_on_exception = true
- s = GRPC::RpcServer.new
+ # Make sure proxy_server can handle the large number of calls in benchmarks
+ s = GRPC::RpcServer.new(pool_size: 1024)
port = s.add_http2_port("0.0.0.0:" + options['driver_port'].to_s,
:this_port_is_insecure)
- bmc = ProxyBenchmarkClientServiceImpl.new(port)
+ bmc = ProxyBenchmarkClientServiceImpl.new(port, options[:c_ext], options['php_client_bin'])
s.handle(bmc)
s.handle(ProxyWorkerServiceImpl.new(s, bmc))
s.run
diff --git a/src/ruby/qps/src/proto/grpc/testing/proxy-service_pb.rb b/src/ruby/qps/src/proto/grpc/testing/proxy-service_pb.rb
index d238198cca..583b2ea655 100644
--- a/src/ruby/qps/src/proto/grpc/testing/proxy-service_pb.rb
+++ b/src/ruby/qps/src/proto/grpc/testing/proxy-service_pb.rb
@@ -4,6 +4,7 @@
require 'google/protobuf'
require 'src/proto/grpc/testing/control_pb'
+require 'src/proto/grpc/testing/stats_pb'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_message "grpc.testing.ProxyStat" do
optional :latency, :double, 1
diff --git a/src/ruby/qps/src/proto/grpc/testing/proxy-service_services_pb.rb b/src/ruby/qps/src/proto/grpc/testing/proxy-service_services_pb.rb
index 484cf05f92..e7bb59b8a0 100644
--- a/src/ruby/qps/src/proto/grpc/testing/proxy-service_services_pb.rb
+++ b/src/ruby/qps/src/proto/grpc/testing/proxy-service_services_pb.rb
@@ -32,6 +32,7 @@ module Grpc
rpc :GetConfig, Void, ClientConfig
rpc :ReportTime, stream(ProxyStat), Void
+ rpc :ReportHist, stream(HistogramData), Void
end
Stub = Service.rpc_stub_class
diff --git a/src/ruby/spec/channel_connection_spec.rb b/src/ruby/spec/channel_connection_spec.rb
index c76056606b..ce3e3b1c93 100644
--- a/src/ruby/spec/channel_connection_spec.rb
+++ b/src/ruby/spec/channel_connection_spec.rb
@@ -11,45 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-require 'grpc'
+require 'spec_helper'
require 'timeout'
include Timeout
include GRPC::Core
-# A test message
-class EchoMsg
- def self.marshal(_o)
- ''
- end
-
- def self.unmarshal(_o)
- EchoMsg.new
- end
-end
-
-# A test service with an echo implementation.
-class EchoService
- include GRPC::GenericService
- rpc :an_rpc, EchoMsg, EchoMsg
- attr_reader :received_md
-
- def initialize(**kw)
- @trailing_metadata = kw
- @received_md = []
- end
-
- def an_rpc(req, call)
- GRPC.logger.info('echo service received a request')
- call.output_metadata.update(@trailing_metadata)
- @received_md << call.metadata unless call.metadata.nil?
- req
- end
-end
-
-EchoStub = EchoService.rpc_stub_class
-
def start_server(port = 0)
@srv = GRPC::RpcServer.new(pool_size: 1)
server_port = @srv.add_http2_port("localhost:#{port}", :this_port_is_insecure)
diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb
index 1a9b47e2c3..adab8c9d14 100644
--- a/src/ruby/spec/client_server_spec.rb
+++ b/src/ruby/spec/client_server_spec.rb
@@ -29,14 +29,18 @@ shared_context 'setup: tags' do
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
ops = { CallOps::SEND_INITIAL_METADATA => metadata }
- svr_batch = server_call.run_batch(ops)
- expect(svr_batch.send_metadata).to be true
+ server_batch = server_call.run_batch(ops)
+ expect(server_batch.send_metadata).to be true
server_call
end
def new_client_call
@ch.create_call(nil, nil, '/method', nil, deadline)
end
+
+ def ok_status
+ Struct::Status.new(StatusCodes::OK, 'OK')
+ end
end
shared_examples 'basic GRPC message delivery is OK' do
@@ -70,19 +74,32 @@ shared_examples 'basic GRPC message delivery is OK' do
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
- CallOps::SEND_MESSAGE => sent_message
+ CallOps::SEND_MESSAGE => sent_message,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
- expect(batch_result.send_message).to be true
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_message).to be true
+ expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
- CallOps::RECV_MESSAGE => nil
+ CallOps::RECV_MESSAGE => nil,
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.message).to eq(sent_message)
+ server_batch = server_call.run_batch(server_ops)
+ expect(server_batch.message).to eq(sent_message)
+ expect(server_batch.send_close).to be true
+ expect(server_batch.send_status).to be true
+
+ # finish the call
+ final_client_batch = call.run_batch(
+ CallOps::RECV_INITIAL_METADATA => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.status.code).to eq(0)
end
it 'responses written by servers are received by the client' do
@@ -95,21 +112,36 @@ shared_examples 'basic GRPC message delivery is OK' do
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
- CallOps::SEND_MESSAGE => sent_message
+ CallOps::SEND_MESSAGE => sent_message,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
- expect(batch_result.send_message).to be true
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_message).to be true
+ expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil,
- CallOps::SEND_MESSAGE => reply_text
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_MESSAGE => reply_text,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.message).to eq(sent_message)
- expect(svr_batch.send_message).to be true
+ server_batch = server_call.run_batch(server_ops)
+ expect(server_batch.message).to eq(sent_message)
+ expect(server_batch.send_close).to be true
+ expect(server_batch.send_message).to be true
+ expect(server_batch.send_status).to be true
+
+ # finish the call
+ final_client_batch = call.run_batch(
+ CallOps::RECV_INITIAL_METADATA => nil,
+ CallOps::RECV_MESSAGE => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.message).to eq(reply_text)
+ expect(final_client_batch.status.code).to eq(0)
end
it 'compressed messages can be sent and received' do
@@ -125,30 +157,37 @@ shared_examples 'basic GRPC message delivery is OK' do
client_ops = {
CallOps::SEND_INITIAL_METADATA => md,
- CallOps::SEND_MESSAGE => long_request_str
+ CallOps::SEND_MESSAGE => long_request_str,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
- expect(batch_result.send_message).to be true
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_message).to be true
+ expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
server_thread.join
server_ops = {
CallOps::RECV_MESSAGE => nil,
- CallOps::SEND_MESSAGE => long_response_str
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_MESSAGE => long_response_str,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.message).to eq(long_request_str)
- expect(svr_batch.send_message).to be true
+ server_batch = server_call.run_batch(server_ops)
+ expect(server_batch.message).to eq(long_request_str)
+ expect(server_batch.send_close).to be true
+ expect(server_batch.send_message).to be true
+ expect(server_batch.send_status).to be true
client_ops = {
- CallOps::SEND_CLOSE_FROM_CLIENT => nil,
CallOps::RECV_INITIAL_METADATA => nil,
- CallOps::RECV_MESSAGE => nil
+ CallOps::RECV_MESSAGE => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_close).to be true
- expect(batch_result.message).to eq long_response_str
+ final_client_batch = call.run_batch(client_ops)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.message).to eq long_response_str
+ expect(final_client_batch.status.code).to eq(0)
end
it 'servers can ignore a client write and send a status' do
@@ -161,11 +200,13 @@ shared_examples 'basic GRPC message delivery is OK' do
client_ops = {
CallOps::SEND_INITIAL_METADATA => {},
- CallOps::SEND_MESSAGE => sent_message
+ CallOps::SEND_MESSAGE => sent_message,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
- expect(batch_result.send_message).to be true
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_message).to be true
+ expect(client_batch.send_close).to be true
# confirm the server can read the inbound message
the_status = Struct::Status.new(StatusCodes::OK, 'OK')
@@ -173,9 +214,15 @@ shared_examples 'basic GRPC message delivery is OK' do
server_ops = {
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.message).to eq nil
- expect(svr_batch.send_status).to be true
+ server_batch = server_call.run_batch(server_ops)
+ expect(server_batch.message).to eq nil
+ expect(server_batch.send_status).to be true
+
+ final_client_batch = call.run_batch(
+ CallOps::RECV_INITIAL_METADATA => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.status.code).to eq(0)
end
it 'completes calls by sending status to client and server' do
@@ -190,9 +237,9 @@ shared_examples 'basic GRPC message delivery is OK' do
CallOps::SEND_INITIAL_METADATA => {},
CallOps::SEND_MESSAGE => sent_message
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
- expect(batch_result.send_message).to be true
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_message).to be true
# confirm the server can read the inbound message and respond
the_status = Struct::Status.new(StatusCodes::OK, 'OK', {})
@@ -202,10 +249,10 @@ shared_examples 'basic GRPC message delivery is OK' do
CallOps::SEND_MESSAGE => reply_text,
CallOps::SEND_STATUS_FROM_SERVER => the_status
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.message).to eq sent_message
- expect(svr_batch.send_status).to be true
- expect(svr_batch.send_message).to be true
+ server_batch = server_call.run_batch(server_ops)
+ expect(server_batch.message).to eq sent_message
+ expect(server_batch.send_status).to be true
+ expect(server_batch.send_message).to be true
# confirm the client can receive the server response and status.
client_ops = {
@@ -214,17 +261,17 @@ shared_examples 'basic GRPC message delivery is OK' do
CallOps::RECV_MESSAGE => nil,
CallOps::RECV_STATUS_ON_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_close).to be true
- expect(batch_result.message).to eq reply_text
- expect(batch_result.status).to eq the_status
+ final_client_batch = call.run_batch(client_ops)
+ expect(final_client_batch.send_close).to be true
+ expect(final_client_batch.message).to eq reply_text
+ expect(final_client_batch.status).to eq the_status
# confirm the server can receive the client close.
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.send_close).to be true
+ final_server_batch = server_call.run_batch(server_ops)
+ expect(final_server_batch.send_close).to be true
end
def client_cancel_test(cancel_proc, expected_code,
@@ -240,9 +287,9 @@ shared_examples 'basic GRPC message delivery is OK' do
CallOps::SEND_INITIAL_METADATA => {},
CallOps::RECV_INITIAL_METADATA => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
- expect(batch_result.metadata).to eq({})
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.metadata).to eq({})
cancel_proc.call(call)
@@ -250,16 +297,16 @@ shared_examples 'basic GRPC message delivery is OK' do
server_ops = {
CallOps::RECV_CLOSE_ON_SERVER => nil
}
- svr_batch = server_call.run_batch(server_ops)
- expect(svr_batch.send_close).to be true
+ server_batch = server_call.run_batch(server_ops)
+ expect(server_batch.send_close).to be true
client_ops = {
CallOps::RECV_STATUS_ON_CLIENT => {}
}
- batch_result = call.run_batch(client_ops)
+ client_batch = call.run_batch(client_ops)
- expect(batch_result.status.code).to be expected_code
- expect(batch_result.status.details).to eq expected_details
+ expect(client_batch.status.code).to be expected_code
+ expect(client_batch.status.details).to eq expected_details
end
it 'clients can cancel a call on the server' do
@@ -325,10 +372,11 @@ shared_examples 'GRPC metadata delivery works OK' do
call = new_client_call
client_ops = {
- CallOps::SEND_INITIAL_METADATA => md
+ CallOps::SEND_INITIAL_METADATA => md,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
# confirm the server can receive the client metadata
rcv_thread.join
@@ -336,6 +384,21 @@ shared_examples 'GRPC metadata delivery works OK' do
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
+
+ # finish the call
+ final_server_batch = recvd_rpc.call.run_batch(
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_INITIAL_METADATA => nil,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status)
+ expect(final_server_batch.send_close).to be(true)
+ expect(final_server_batch.send_metadata).to be(true)
+ expect(final_server_batch.send_status).to be(true)
+
+ final_client_batch = call.run_batch(
+ CallOps::RECV_INITIAL_METADATA => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.status.code).to eq(0)
end
end
end
@@ -381,6 +444,9 @@ shared_examples 'GRPC metadata delivery works OK' do
recvd_rpc.call.run_batch(server_ops)
end
expect(&blk).to raise_error
+
+ # cancel the call so the server can shut down immediately
+ call.cancel
end
end
@@ -394,25 +460,37 @@ shared_examples 'GRPC metadata delivery works OK' do
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
- CallOps::SEND_INITIAL_METADATA => nil
+ CallOps::SEND_INITIAL_METADATA => nil,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- call.run_batch(client_ops)
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
- CallOps::SEND_INITIAL_METADATA => nil
+ # receive close and send status to finish the call
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_INITIAL_METADATA => nil,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
- server_call.run_batch(server_ops)
+ srv_batch = server_call.run_batch(server_ops)
+ expect(srv_batch.send_close).to be true
+ expect(srv_batch.send_metadata).to be true
+ expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
- CallOps::RECV_INITIAL_METADATA => nil
+ CallOps::RECV_INITIAL_METADATA => nil,
+ # receive status to finish the call
+ CallOps::RECV_STATUS_ON_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
- expect(batch_result.metadata).to eq({})
+ final_client_batch = call.run_batch(client_ops)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.status.code).to eq(0)
end
it 'sends all the pairs when keys and values are valid' do
@@ -426,26 +504,36 @@ shared_examples 'GRPC metadata delivery works OK' do
# client signals that it's done sending metadata to allow server to
# respond
client_ops = {
- CallOps::SEND_INITIAL_METADATA => nil
+ CallOps::SEND_INITIAL_METADATA => nil,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil
}
- call.run_batch(client_ops)
+ client_batch = call.run_batch(client_ops)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_close).to be true
# server gets the invocation but sends no metadata back
rcv_thread.join
expect(recvd_rpc).to_not eq nil
server_call = recvd_rpc.call
server_ops = {
- CallOps::SEND_INITIAL_METADATA => md
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_INITIAL_METADATA => md,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status
}
- server_call.run_batch(server_ops)
+ srv_batch = server_call.run_batch(server_ops)
+ expect(srv_batch.send_close).to be true
+ expect(srv_batch.send_metadata).to be true
+ expect(srv_batch.send_status).to be true
# client receives nothing as expected
client_ops = {
- CallOps::RECV_INITIAL_METADATA => nil
+ CallOps::RECV_INITIAL_METADATA => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil
}
- batch_result = call.run_batch(client_ops)
+ final_client_batch = call.run_batch(client_ops)
replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }]
- expect(batch_result.metadata).to eq(replace_symbols)
+ expect(final_client_batch.metadata).to eq(replace_symbols)
+ expect(final_client_batch.status.code).to eq(0)
end
end
end
@@ -510,8 +598,7 @@ describe 'the secure http client/server' do
initial_md_key = 'k2'
initial_md_val = 'v2'
- initial_md = {}
- initial_md[initial_md_key] = initial_md_val
+ initial_md = { initial_md_key => initial_md_val }
expected_md = creds_update_md.clone
fail 'bad test param' unless expected_md[initial_md_key].nil?
expected_md[initial_md_key] = initial_md_val
@@ -523,11 +610,12 @@ describe 'the secure http client/server' do
call = new_client_call
call.set_credentials! call_creds
- client_ops = {
- CallOps::SEND_INITIAL_METADATA => initial_md
- }
- batch_result = call.run_batch(client_ops)
- expect(batch_result.send_metadata).to be true
+
+ client_batch = call.run_batch(
+ CallOps::SEND_INITIAL_METADATA => initial_md,
+ CallOps::SEND_CLOSE_FROM_CLIENT => nil)
+ expect(client_batch.send_metadata).to be true
+ expect(client_batch.send_close).to be true
# confirm the server can receive the client metadata
rcv_thread.join
@@ -535,6 +623,24 @@ describe 'the secure http client/server' do
recvd_md = recvd_rpc.metadata
replace_symbols = Hash[expected_md.each_pair.collect { |x, y| [x.to_s, y] }]
expect(recvd_md).to eq(recvd_md.merge(replace_symbols))
+
+ credentials_update_test_finish_call(call, recvd_rpc.call)
+ end
+
+ def credentials_update_test_finish_call(client_call, server_call)
+ final_server_batch = server_call.run_batch(
+ CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_INITIAL_METADATA => nil,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status)
+ expect(final_server_batch.send_close).to be(true)
+ expect(final_server_batch.send_metadata).to be(true)
+ expect(final_server_batch.send_status).to be(true)
+
+ final_client_batch = client_call.run_batch(
+ CallOps::RECV_INITIAL_METADATA => nil,
+ CallOps::RECV_STATUS_ON_CLIENT => nil)
+ expect(final_client_batch.metadata).to eq({})
+ expect(final_client_batch.status.code).to eq(0)
end
it 'modifies metadata with CallCredentials' do
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index ec0c294174..120acc35af 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-require 'grpc'
+require 'spec_helper'
include GRPC::Core::StatusCodes
@@ -22,6 +22,21 @@ describe GRPC::ActiveCall do
CallOps = GRPC::Core::CallOps
WriteFlags = GRPC::Core::WriteFlags
+ def ok_status
+ Struct::Status.new(OK, 'OK')
+ end
+
+ def send_and_receive_close_and_status(client_call, server_call)
+ client_call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
+ server_call.run_batch(CallOps::RECV_CLOSE_ON_SERVER => nil,
+ CallOps::SEND_STATUS_FROM_SERVER => ok_status)
+ client_call.run_batch(CallOps::RECV_STATUS_ON_CLIENT => nil)
+ end
+
+ def inner_call_of_active_call(active_call)
+ active_call.instance_variable_get(:@call)
+ end
+
before(:each) do
@pass_through = proc { |x| x }
host = '0.0.0.0:0'
@@ -67,16 +82,26 @@ describe GRPC::ActiveCall do
end
end
end
+
+ describe '#interceptable' do
+ it 'exposes a fixed subset of the ActiveCall.methods' do
+ want = %w(deadline)
+ v = @client_call.interceptable
+ want.each do |w|
+ expect(v.methods.include?(w))
+ end
+ end
+ end
end
describe '#remote_send' do
- it 'allows a client to send a payload to the server' do
+ it 'allows a client to send a payload to the server', test: true do
call = make_test_call
ActiveCall.client_invoke(call)
- @client_call = ActiveCall.new(call, @pass_through,
- @pass_through, deadline)
+ client_call = ActiveCall.new(call, @pass_through,
+ @pass_through, deadline)
msg = 'message is a string'
- @client_call.remote_send(msg)
+ client_call.remote_send(msg)
# check that server rpc new was received
recvd_rpc = @server.request_call
@@ -86,8 +111,13 @@ describe GRPC::ActiveCall do
# Accept the call, and verify that the server reads the response ok.
server_call = ActiveCall.new(recvd_call, @pass_through,
@pass_through, deadline,
- metadata_received: true)
+ metadata_received: true,
+ started: false)
expect(server_call.remote_read).to eq(msg)
+ # finish the call
+ server_call.send_initial_metadata
+ call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
+ send_and_receive_close_and_status(call, recvd_call)
end
it 'marshals the payload using the marshal func' do
@@ -109,6 +139,9 @@ describe GRPC::ActiveCall do
@pass_through, deadline,
metadata_received: true)
expect(server_call.remote_read).to eq('marshalled:' + msg)
+ # finish the call
+ call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
+ send_and_receive_close_and_status(call, recvd_call)
end
TEST_WRITE_FLAGS = [WriteFlags::BUFFER_HINT, WriteFlags::NO_COMPRESS]
@@ -136,6 +169,9 @@ describe GRPC::ActiveCall do
@pass_through, deadline,
metadata_received: true)
expect(server_call.remote_read).to eq('marshalled:' + msg)
+ # finish the call
+ server_call.send_status(OK, '', true)
+ client_call.receive_and_check_status
end
end
end
@@ -177,7 +213,6 @@ describe GRPC::ActiveCall do
@pass_through,
@pass_through,
deadline)
-
expect(@client_call.metadata_sent).to eql(true)
expect(call).to(
receive(:run_batch).with(hash_including(
@@ -291,6 +326,10 @@ describe GRPC::ActiveCall do
expect(recvd_rpc.metadata).to_not be_nil
expect(recvd_rpc.metadata['k1']).to eq('v1')
expect(recvd_rpc.metadata['k2']).to eq('v2')
+ # finish the call
+ recvd_call.run_batch(CallOps::SEND_INITIAL_METADATA => {})
+ call.run_batch(CallOps::RECV_INITIAL_METADATA => nil)
+ send_and_receive_close_and_status(call, recvd_call)
end
end
@@ -324,6 +363,8 @@ describe GRPC::ActiveCall do
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response')
+ send_and_receive_close_and_status(
+ call, inner_call_of_active_call(server_call))
end
it 'saves no metadata when the server adds no metadata' do
@@ -338,6 +379,8 @@ describe GRPC::ActiveCall do
expect(client_call.metadata).to be_nil
client_call.remote_read
expect(client_call.metadata).to eq({})
+ send_and_receive_close_and_status(
+ call, inner_call_of_active_call(server_call))
end
it 'saves metadata add by the server' do
@@ -353,6 +396,8 @@ describe GRPC::ActiveCall do
client_call.remote_read
expected = { 'k1' => 'v1', 'k2' => 'v2' }
expect(client_call.metadata).to eq(expected)
+ send_and_receive_close_and_status(
+ call, inner_call_of_active_call(server_call))
end
it 'get a status from server when nothing else sent from server' do
@@ -409,6 +454,8 @@ describe GRPC::ActiveCall do
server_call = expect_server_to_receive(msg)
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('unmarshalled:server_response')
+ send_and_receive_close_and_status(
+ call, inner_call_of_active_call(server_call))
end
end
@@ -418,9 +465,11 @@ describe GRPC::ActiveCall do
client_call = ActiveCall.new(call, @pass_through,
@pass_through, deadline)
expect(client_call.each_remote_read).to be_a(Enumerator)
+ # finish the call
+ client_call.cancel
end
- it 'the returns an enumerator that can read n responses' do
+ it 'the returned enumerator can read n responses' do
call = make_test_call
ActiveCall.client_invoke(call)
client_call = ActiveCall.new(call, @pass_through,
@@ -435,6 +484,8 @@ describe GRPC::ActiveCall do
server_call.remote_send(reply)
expect(e.next).to eq(reply)
end
+ send_and_receive_close_and_status(
+ call, inner_call_of_active_call(server_call))
end
it 'the returns an enumerator that stops after an OK Status' do
@@ -453,7 +504,7 @@ describe GRPC::ActiveCall do
server_call.remote_send(reply)
expect(e.next).to eq(reply)
end
- server_call.send_status(OK, 'OK')
+ server_call.send_status(OK, 'OK', true)
expect { e.next }.to raise_error(StopIteration)
end
end
@@ -568,9 +619,11 @@ describe GRPC::ActiveCall do
msgs
end
+ int_ctx = GRPC::InterceptionContext.new
+
@server_thread = Thread.new do
@server_call.run_server_bidi(
- fake_gen_each_reply_with_no_call_param)
+ fake_gen_each_reply_with_no_call_param, int_ctx)
@server_call.send_status(@server_status)
end
end
@@ -583,10 +636,11 @@ describe GRPC::ActiveCall do
call_param.send_initial_metadata
msgs
end
+ int_ctx = GRPC::InterceptionContext.new
@server_thread = Thread.new do
@server_call.run_server_bidi(
- fake_gen_each_reply_with_call_param)
+ fake_gen_each_reply_with_call_param, int_ctx)
@server_call.send_status(@server_status)
end
end
diff --git a/src/ruby/spec/generic/client_interceptors_spec.rb b/src/ruby/spec/generic/client_interceptors_spec.rb
new file mode 100644
index 0000000000..f292715e4d
--- /dev/null
+++ b/src/ruby/spec/generic/client_interceptors_spec.rb
@@ -0,0 +1,153 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+require 'spec_helper'
+
+describe 'Client Interceptors' do
+ let(:interceptor) { TestClientInterceptor.new }
+ let(:interceptors_opts) { { interceptors: [interceptor] } }
+ let(:request) { EchoMsg.new }
+ let(:service) { EchoService }
+
+ before(:each) do
+ build_rpc_server
+ end
+
+ context 'when a client interceptor is added' do
+ context 'with a request/response call' do
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:request_response)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:request_response)
+ .once.and_call_original
+ expect(stub.an_rpc(request)).to be_a(EchoMsg)
+ end
+ end
+
+ it 'can modify outgoing metadata', server: true do
+ expect(interceptor).to receive(:request_response)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:request_response)
+ .with(request, metadata: { 'foo' => 'bar_from_request_response' })
+ .once.and_call_original
+ expect(stub.an_rpc(request)).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ context 'with a client streaming call' do
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:client_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:client_streamer)
+ .once.and_call_original
+ requests = [EchoMsg.new, EchoMsg.new]
+ expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg)
+ end
+ end
+
+ it 'can modify outgoing metadata', server: true do
+ expect(interceptor).to receive(:client_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ requests = [EchoMsg.new, EchoMsg.new]
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:client_streamer)
+ .with(requests, metadata: { 'foo' => 'bar_from_client_streamer' })
+ .once.and_call_original
+ expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ context 'with a server streaming call' do
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:server_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ request = EchoMsg.new
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:server_streamer)
+ .once.and_call_original
+ responses = stub.a_server_streaming_rpc(request)
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ it 'can modify outgoing metadata', server: true do
+ expect(interceptor).to receive(:server_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ request = EchoMsg.new
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:server_streamer)
+ .with(request, metadata: { 'foo' => 'bar_from_server_streamer' })
+ .once.and_call_original
+ responses = stub.a_server_streaming_rpc(request)
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ end
+ end
+ end
+
+ context 'with a bidi call' do
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:bidi_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:bidi_streamer)
+ .once.and_call_original
+ requests = [EchoMsg.new, EchoMsg.new]
+ responses = stub.a_bidi_rpc(requests)
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ it 'can modify outgoing metadata', server: true do
+ expect(interceptor).to receive(:bidi_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub, opts: interceptors_opts)
+ requests = [EchoMsg.new, EchoMsg.new]
+ expect_any_instance_of(GRPC::ActiveCall).to receive(:bidi_streamer)
+ .with(requests, metadata: { 'foo' => 'bar_from_bidi_streamer' })
+ .once.and_call_original
+ responses = stub.a_bidi_rpc(requests)
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/src/ruby/spec/generic/interceptor_registry_spec.rb b/src/ruby/spec/generic/interceptor_registry_spec.rb
new file mode 100644
index 0000000000..f93f5cec09
--- /dev/null
+++ b/src/ruby/spec/generic/interceptor_registry_spec.rb
@@ -0,0 +1,65 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+require 'spec_helper'
+
+describe GRPC::InterceptorRegistry do
+ let(:server) { RpcServer.new }
+ let(:interceptor) { TestServerInterceptor.new }
+ let(:interceptors) { [interceptor] }
+ let(:registry) { described_class.new(interceptors) }
+
+ describe 'initialization' do
+ subject { registry }
+
+ context 'with an interceptor extending GRPC::ServerInterceptor' do
+ it 'should add the interceptor to the registry' do
+ subject
+ is = registry.instance_variable_get('@interceptors')
+ expect(is.count).to eq 1
+ expect(is.first).to eq interceptor
+ end
+ end
+
+ context 'with multiple interceptors' do
+ let(:interceptor2) { TestServerInterceptor.new }
+ let(:interceptor3) { TestServerInterceptor.new }
+ let(:interceptors) { [interceptor, interceptor2, interceptor3] }
+
+ it 'should maintain order of insertion when iterated against' do
+ subject
+ is = registry.instance_variable_get('@interceptors')
+ expect(is.count).to eq 3
+ is.each_with_index do |i, idx|
+ case idx
+ when 0
+ expect(i).to eq interceptor
+ when 1
+ expect(i).to eq interceptor2
+ when 2
+ expect(i).to eq interceptor3
+ end
+ end
+ end
+ end
+
+ context 'with an interceptor not extending GRPC::ServerInterceptor' do
+ let(:interceptor) { Class }
+ let(:err) { GRPC::InterceptorRegistry::DescendantError }
+
+ it 'should raise an InvalidArgument exception' do
+ expect { subject }.to raise_error(err)
+ end
+ end
+ end
+end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index b887eaaf4e..05059fbecf 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -11,8 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-require 'grpc'
+require 'spec_helper'
def load_test_certs
test_root = File.join(File.dirname(File.dirname(__FILE__)), 'testdata')
@@ -28,17 +27,6 @@ def check_md(wanted_md, received_md)
end
end
-# A test message
-class EchoMsg
- def self.marshal(_o)
- ''
- end
-
- def self.unmarshal(_o)
- EchoMsg.new
- end
-end
-
# A test service with no methods.
class EmptyService
include GRPC::GenericService
@@ -50,27 +38,6 @@ class NoRpcImplementation
rpc :an_rpc, EchoMsg, EchoMsg
end
-# A test service with an echo implementation.
-class EchoService
- include GRPC::GenericService
- rpc :an_rpc, EchoMsg, EchoMsg
- attr_reader :received_md
-
- def initialize(**kw)
- @trailing_metadata = kw
- @received_md = []
- end
-
- def an_rpc(req, call)
- GRPC.logger.info('echo service received a request')
- call.output_metadata.update(@trailing_metadata)
- @received_md << call.metadata unless call.metadata.nil?
- req
- end
-end
-
-EchoStub = EchoService.rpc_stub_class
-
# A test service with an implementation that fails with BadStatus
class FailingService
include GRPC::GenericService
diff --git a/src/ruby/spec/generic/server_interceptors_spec.rb b/src/ruby/spec/generic/server_interceptors_spec.rb
new file mode 100644
index 0000000000..eb86686084
--- /dev/null
+++ b/src/ruby/spec/generic/server_interceptors_spec.rb
@@ -0,0 +1,218 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+require 'spec_helper'
+
+describe 'Server Interceptors' do
+ let(:interceptor) { TestServerInterceptor.new }
+ let(:request) { EchoMsg.new }
+ let(:trailing_metadata) { {} }
+ let(:service) { EchoService.new(trailing_metadata) }
+ let(:interceptors) { [] }
+
+ before(:each) do
+ build_rpc_server(server_opts: { interceptors: interceptors })
+ end
+
+ context 'when a server interceptor is added' do
+ let(:interceptors) { [interceptor] }
+ let(:client_metadata) { { client_md: 'test' } }
+ let(:client_call_opts) { { metadata: client_metadata, return_op: true } }
+
+ context 'with a request/response call' do
+ let(:trailing_metadata) { { server_om: 'from_request_response' } }
+
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:request_response)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect(stub.an_rpc(request)).to be_a(EchoMsg)
+ end
+ end
+
+ it 'can modify trailing metadata', server: true do
+ expect(interceptor).to receive(:request_response)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect_any_instance_of(GRPC::ActiveCall).to(
+ receive(:request_response).with(request, metadata: client_metadata)
+ .once.and_call_original
+ )
+ op = stub.an_rpc(request, client_call_opts)
+ msg = op.execute
+ expect(op.trailing_metadata).to eq(
+ 'interc' => 'from_request_response',
+ 'server_om' => 'from_request_response'
+ )
+ expect(msg).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ context 'with a client streaming call' do
+ let(:trailing_metadata) { { server_om: 'from_client_streamer' } }
+ let(:requests) { [EchoMsg.new, EchoMsg.new] }
+
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:client_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg)
+ end
+ end
+
+ it 'can modify trailing metadata', server: true do
+ expect(interceptor).to receive(:client_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect_any_instance_of(GRPC::ActiveCall).to(
+ receive(:client_streamer).with(requests)
+ .once.and_call_original
+ )
+ op = stub.a_client_streaming_rpc(requests, client_call_opts)
+ msg = op.execute
+ expect(op.trailing_metadata).to eq(
+ 'interc' => 'from_client_streamer',
+ 'server_om' => 'from_client_streamer'
+ )
+ expect(msg).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ context 'with a server streaming call' do
+ let(:trailing_metadata) { { server_om: 'from_server_streamer' } }
+ let(:request) { EchoMsg.new }
+
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:server_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ responses = stub.a_server_streaming_rpc(request)
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ it 'can modify trailing metadata', server: true do
+ expect(interceptor).to receive(:server_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect_any_instance_of(GRPC::ActiveCall).to(
+ receive(:server_streamer).with(request)
+ .once.and_call_original
+ )
+ op = stub.a_server_streaming_rpc(request, client_call_opts)
+ responses = op.execute
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ expect(op.trailing_metadata).to eq(
+ 'interc' => 'from_server_streamer',
+ 'server_om' => 'from_server_streamer'
+ )
+ end
+ end
+ end
+
+ context 'with a bidi call' do
+ let(:trailing_metadata) { { server_om: 'from_bidi_streamer' } }
+ let(:requests) { [EchoMsg.new, EchoMsg.new] }
+
+ it 'should be called', server: true do
+ expect(interceptor).to receive(:bidi_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ responses = stub.a_bidi_rpc(requests)
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ it 'can modify trailing metadata', server: true do
+ expect(interceptor).to receive(:bidi_streamer)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect_any_instance_of(GRPC::ActiveCall).to(
+ receive(:bidi_streamer).with(requests)
+ .once.and_call_original
+ )
+ op = stub.a_bidi_rpc(requests, client_call_opts)
+ responses = op.execute
+ responses.each do |r|
+ expect(r).to be_a(EchoMsg)
+ end
+ expect(op.trailing_metadata).to eq(
+ 'interc' => 'from_bidi_streamer',
+ 'server_om' => 'from_bidi_streamer'
+ )
+ end
+ end
+ end
+ end
+
+ context 'when multiple interceptors are added' do
+ let(:interceptor2) { TestServerInterceptor.new }
+ let(:interceptor3) { TestServerInterceptor.new }
+ let(:interceptors) do
+ [
+ interceptor,
+ interceptor2,
+ interceptor3
+ ]
+ end
+
+ it 'each should be called', server: true do
+ expect(interceptor).to receive(:request_response)
+ .once.and_call_original
+ expect(interceptor2).to receive(:request_response)
+ .once.and_call_original
+ expect(interceptor3).to receive(:request_response)
+ .once.and_call_original
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect(stub.an_rpc(request)).to be_a(EchoMsg)
+ end
+ end
+ end
+
+ context 'when an interceptor is not added' do
+ it 'should not be called', server: true do
+ expect(interceptor).to_not receive(:call)
+
+ run_services_on_server(@server, services: [service]) do
+ stub = build_insecure_stub(EchoStub)
+ expect(stub.an_rpc(request)).to be_a(EchoMsg)
+ end
+ end
+ end
+end
diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb
new file mode 100644
index 0000000000..6f2a06b1d9
--- /dev/null
+++ b/src/ruby/spec/google_rpc_status_utils_spec.rb
@@ -0,0 +1,292 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'grpc'
+require_relative '../lib/grpc/google_rpc_status_utils'
+require_relative '../pb/src/proto/grpc/testing/messages_pb'
+require_relative '../pb/src/proto/grpc/testing/messages_pb'
+require 'google/protobuf/well_known_types'
+
+include GRPC::Core
+
+describe 'conversion from a status struct to a google protobuf status' do
+ it 'fails if the input is not a status struct' do
+ begin
+ GRPC::GoogleRpcStatusUtils.extract_google_rpc_status('string')
+ rescue => e
+ exception = e
+ end
+ expect(exception.is_a?(ArgumentError)).to be true
+ expect(exception.message.include?('bad type')).to be true
+ end
+
+ it 'returns nil if the header key is missing' do
+ status = Struct::Status.new(1, 'details', key: 'val')
+ expect(status.metadata.nil?).to be false
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ status)).to be(nil)
+ end
+
+ it 'fails with some error if the header key fails to deserialize' do
+ status = Struct::Status.new(1, 'details',
+ 'grpc-status-details-bin' => 'string_val')
+ expect do
+ GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ end.to raise_error(StandardError)
+ end
+
+ it 'silently ignores erroneous mismatch between messages in '\
+ 'status struct and protobuf status' do
+ proto = Google::Rpc::Status.new(code: 1, message: 'proto message')
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(1, 'struct message',
+ 'grpc-status-details-bin' => encoded_proto)
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(rpc_status).to eq(proto)
+ end
+
+ it 'silently ignores erroneous mismatch between codes in status struct '\
+ 'and protobuf status' do
+ proto = Google::Rpc::Status.new(code: 1, message: 'matching message')
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(2, 'matching message',
+ 'grpc-status-details-bin' => encoded_proto)
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(rpc_status).to eq(proto)
+ end
+
+ it 'can succesfully convert a status struct into a google protobuf status '\
+ 'when there are no rpcstatus details' do
+ proto = Google::Rpc::Status.new(code: 1, message: 'matching message')
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(1, 'matching message',
+ 'grpc-status-details-bin' => encoded_proto)
+ out = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(out.code).to eq(1)
+ expect(out.message).to eq('matching message')
+ expect(out.details).to eq([])
+ end
+
+ it 'can succesfully convert a status struct into a google protobuf '\
+ 'status when there are multiple rpcstatus details' do
+ simple_request_any = Google::Protobuf::Any.new
+ simple_request = Grpc::Testing::SimpleRequest.new(
+ payload: Grpc::Testing::Payload.new(body: 'request'))
+ simple_request_any.pack(simple_request)
+ simple_response_any = Google::Protobuf::Any.new
+ simple_response = Grpc::Testing::SimpleResponse.new(
+ payload: Grpc::Testing::Payload.new(body: 'response'))
+ simple_response_any.pack(simple_response)
+ payload_any = Google::Protobuf::Any.new
+ payload = Grpc::Testing::Payload.new(body: 'payload')
+ payload_any.pack(payload)
+ proto = Google::Rpc::Status.new(code: 1,
+ message: 'matching message',
+ details: [
+ simple_request_any,
+ simple_response_any,
+ payload_any
+ ])
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(1, 'matching message',
+ 'grpc-status-details-bin' => encoded_proto)
+ out = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(out.code).to eq(1)
+ expect(out.message).to eq('matching message')
+ expect(out.details[0].unpack(
+ Grpc::Testing::SimpleRequest)).to eq(simple_request)
+ expect(out.details[1].unpack(
+ Grpc::Testing::SimpleResponse)).to eq(simple_response)
+ expect(out.details[2].unpack(
+ Grpc::Testing::Payload)).to eq(payload)
+ end
+end
+
+# Test message
+class EchoMsg
+ def self.marshal(_o)
+ ''
+ end
+
+ def self.unmarshal(_o)
+ EchoMsg.new
+ end
+end
+
+# A test service that fills in the "reserved" grpc-status-details-bin trailer,
+# for client-side testing of GoogleRpcStatus protobuf extraction from trailers.
+class GoogleRpcStatusTestService
+ include GRPC::GenericService
+ rpc :an_rpc, EchoMsg, EchoMsg
+
+ def initialize(encoded_rpc_status)
+ @encoded_rpc_status = encoded_rpc_status
+ end
+
+ def an_rpc(_, _)
+ # TODO: create a server-side utility API for sending a google rpc status.
+ # Applications are not expected to set the grpc-status-details-bin
+ # ("grpc"-fixed and reserved for library use) manually.
+ # Doing so here is only for testing of the client-side api for extracting
+ # a google rpc status, which is useful
+ # when the interacting with a server that does fill in this trailer.
+ fail GRPC::Unknown.new('test message',
+ 'grpc-status-details-bin' => @encoded_rpc_status)
+ end
+end
+
+GoogleRpcStatusTestStub = GoogleRpcStatusTestService.rpc_stub_class
+
+describe 'receving a google rpc status from a remote endpoint' do
+ def start_server(encoded_rpc_status)
+ @srv = GRPC::RpcServer.new(pool_size: 1)
+ @server_port = @srv.add_http2_port('localhost:0',
+ :this_port_is_insecure)
+ @srv.handle(GoogleRpcStatusTestService.new(encoded_rpc_status))
+ @server_thd = Thread.new { @srv.run }
+ @srv.wait_till_running
+ end
+
+ def stop_server
+ expect(@srv.stopped?).to be(false)
+ @srv.stop
+ @server_thd.join
+ expect(@srv.stopped?).to be(true)
+ end
+
+ before(:each) do
+ simple_request_any = Google::Protobuf::Any.new
+ simple_request = Grpc::Testing::SimpleRequest.new(
+ payload: Grpc::Testing::Payload.new(body: 'request'))
+ simple_request_any.pack(simple_request)
+ simple_response_any = Google::Protobuf::Any.new
+ simple_response = Grpc::Testing::SimpleResponse.new(
+ payload: Grpc::Testing::Payload.new(body: 'response'))
+ simple_response_any.pack(simple_response)
+ payload_any = Google::Protobuf::Any.new
+ payload = Grpc::Testing::Payload.new(body: 'payload')
+ payload_any.pack(payload)
+ @expected_proto = Google::Rpc::Status.new(
+ code: StatusCodes::UNKNOWN,
+ message: 'test message',
+ details: [simple_request_any, simple_response_any, payload_any])
+ start_server(Google::Rpc::Status.encode(@expected_proto))
+ end
+
+ after(:each) do
+ stop_server
+ end
+
+ it 'should receive be able to extract a google rpc status from the '\
+ 'status struct taken from a BadStatus exception' do
+ stub = GoogleRpcStatusTestStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ begin
+ stub.an_rpc(EchoMsg.new)
+ rescue GRPC::BadStatus => e
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ e.to_status)
+ end
+ expect(rpc_status).to eq(@expected_proto)
+ end
+
+ it 'should receive be able to extract a google rpc status from the '\
+ 'status struct taken from the op view of a call' do
+ stub = GoogleRpcStatusTestStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ op = stub.an_rpc(EchoMsg.new, return_op: true)
+ begin
+ op.execute
+ rescue GRPC::BadStatus => e
+ status_from_exception = e.to_status
+ end
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ op.status)
+ expect(rpc_status).to eq(@expected_proto)
+ # "to_status" on the bad status should give the same result
+ # as "status" on the "op view".
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ status_from_exception)).to eq(rpc_status)
+ end
+end
+
+# A test service that fails without explicitly setting the
+# grpc-status-details-bin trailer. Tests assumptions about value
+# of grpc-status-details-bin on the client side when the trailer wasn't
+# set explicitly.
+class NoStatusDetailsBinTestService
+ include GRPC::GenericService
+ rpc :an_rpc, EchoMsg, EchoMsg
+
+ def an_rpc(_, _)
+ fail GRPC::Unknown
+ end
+end
+
+NoStatusDetailsBinTestServiceStub = NoStatusDetailsBinTestService.rpc_stub_class
+
+describe 'when the endpoint doesnt send grpc-status-details-bin' do
+ def start_server
+ @srv = GRPC::RpcServer.new(pool_size: 1)
+ @server_port = @srv.add_http2_port('localhost:0',
+ :this_port_is_insecure)
+ @srv.handle(NoStatusDetailsBinTestService)
+ @server_thd = Thread.new { @srv.run }
+ @srv.wait_till_running
+ end
+
+ def stop_server
+ expect(@srv.stopped?).to be(false)
+ @srv.stop
+ @server_thd.join
+ expect(@srv.stopped?).to be(true)
+ end
+
+ before(:each) do
+ start_server
+ end
+
+ after(:each) do
+ stop_server
+ end
+
+ it 'should receive nil when we extract try to extract a google '\
+ 'rpc status from a BadStatus exception that didnt have it' do
+ stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ begin
+ stub.an_rpc(EchoMsg.new)
+ rescue GRPC::Unknown => e
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ e.to_status)
+ end
+ expect(rpc_status).to be(nil)
+ end
+
+ it 'should receive nil when we extract try to extract a google '\
+ 'rpc status from an op views status object that didnt have it' do
+ stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ op = stub.an_rpc(EchoMsg.new, return_op: true)
+ begin
+ op.execute
+ rescue GRPC::Unknown => e
+ status_from_exception = e.to_status
+ end
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ status_from_exception)).to be(nil)
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ op.status)).to be nil
+ end
+end
diff --git a/src/ruby/spec/spec_helper.rb b/src/ruby/spec/spec_helper.rb
index 6e1eba1945..8fe9e6e808 100644
--- a/src/ruby/spec/spec_helper.rb
+++ b/src/ruby/spec/spec_helper.rb
@@ -32,6 +32,9 @@ require 'rspec'
require 'logging'
require 'rspec/logging_helper'
+require_relative 'support/services'
+require_relative 'support/helpers'
+
# GRPC is the general RPC module
#
# Configure its logging for fine-grained log control during test runs
@@ -49,6 +52,7 @@ Logging.logger['GRPC::BidiCall'].level = :info
RSpec.configure do |config|
include RSpec::LoggingHelper
config.capture_log_messages # comment this out to see logs during test runs
+ include GRPC::Spec::Helpers
end
RSpec::Expectations.configuration.warn_about_potential_false_positives = false
diff --git a/src/ruby/spec/support/helpers.rb b/src/ruby/spec/support/helpers.rb
new file mode 100644
index 0000000000..65fffff9e7
--- /dev/null
+++ b/src/ruby/spec/support/helpers.rb
@@ -0,0 +1,73 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# GRPC contains the General RPC module.
+module GRPC
+ ##
+ # GRPC RSpec base module
+ #
+ module Spec
+ ##
+ # A module that is used for providing generic helpers across the
+ # GRPC test suite
+ #
+ module Helpers
+ # Shortcut syntax for a GRPC RPC Server
+ RpcServer = GRPC::RpcServer
+
+ ##
+ # Build an RPC server used for testing
+ #
+ def build_rpc_server(server_opts: {},
+ client_opts: {})
+ @server = RpcServer.new({ poll_period: 1 }.merge(server_opts))
+ @port = @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
+ @host = "0.0.0.0:#{@port}"
+ @client_opts = client_opts
+ @server
+ end
+
+ ##
+ # Run services on an RPC server, yielding to allow testing within
+ #
+ # @param [RpcServer] server
+ # @param [Array<Class>] services
+ #
+ def run_services_on_server(server, services: [])
+ services.each do |s|
+ server.handle(s)
+ end
+ t = Thread.new { server.run }
+ server.wait_till_running
+
+ yield
+
+ server.stop
+ t.join
+ end
+
+ ##
+ # Build an insecure stub from a given stub class
+ #
+ # @param [Class] klass
+ # @param [String] host
+ #
+ def build_insecure_stub(klass, host: nil, opts: nil)
+ host ||= @host
+ opts ||= @client_opts
+ klass.new(host, :this_channel_is_insecure, **opts)
+ end
+ end
+ end
+end
diff --git a/src/ruby/spec/support/services.rb b/src/ruby/spec/support/services.rb
new file mode 100644
index 0000000000..27cc8e61ac
--- /dev/null
+++ b/src/ruby/spec/support/services.rb
@@ -0,0 +1,147 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Test stubs for various scenarios
+require 'grpc'
+
+# A test message
+class EchoMsg
+ def self.marshal(_o)
+ ''
+ end
+
+ def self.unmarshal(_o)
+ EchoMsg.new
+ end
+end
+
+# A test service with an echo implementation.
+class EchoService
+ include GRPC::GenericService
+ rpc :an_rpc, EchoMsg, EchoMsg
+ rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg
+ rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg)
+ rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
+ attr_reader :received_md
+
+ def initialize(**kw)
+ @trailing_metadata = kw
+ @received_md = []
+ end
+
+ def an_rpc(req, call)
+ GRPC.logger.info('echo service received a request')
+ call.output_metadata.update(@trailing_metadata)
+ @received_md << call.metadata unless call.metadata.nil?
+ req
+ end
+
+ def a_client_streaming_rpc(call)
+ # iterate through requests so call can complete
+ call.output_metadata.update(@trailing_metadata)
+ call.each_remote_read.each { |r| p r }
+ EchoMsg.new
+ end
+
+ def a_server_streaming_rpc(_req, call)
+ call.output_metadata.update(@trailing_metadata)
+ [EchoMsg.new, EchoMsg.new]
+ end
+
+ def a_bidi_rpc(requests, call)
+ call.output_metadata.update(@trailing_metadata)
+ requests.each { |r| p r }
+ [EchoMsg.new, EchoMsg.new]
+ end
+end
+
+EchoStub = EchoService.rpc_stub_class
+
+# For testing server interceptors
+class TestServerInterceptor < GRPC::ServerInterceptor
+ def request_response(request:, call:, method:)
+ p "Received request/response call at method #{method}" \
+ " with request #{request} for call #{call}"
+ call.output_metadata[:interc] = 'from_request_response'
+ p "[GRPC::Ok] (#{method.owner.name}.#{method.name})"
+ yield
+ end
+
+ def client_streamer(call:, method:)
+ call.output_metadata[:interc] = 'from_client_streamer'
+ call.each_remote_read.each do |r|
+ p "In interceptor: #{r}"
+ end
+ p "Received client streamer call at method #{method} for call #{call}"
+ yield
+ end
+
+ def server_streamer(request:, call:, method:)
+ p "Received server streamer call at method #{method} with request" \
+ " #{request} for call #{call}"
+ call.output_metadata[:interc] = 'from_server_streamer'
+ yield
+ end
+
+ def bidi_streamer(requests:, call:, method:)
+ requests.each do |r|
+ p "Bidi request: #{r}"
+ end
+ p "Received bidi streamer call at method #{method} with requests" \
+ " #{requests} for call #{call}"
+ call.output_metadata[:interc] = 'from_bidi_streamer'
+ yield
+ end
+end
+
+# For testing client interceptors
+class TestClientInterceptor < GRPC::ClientInterceptor
+ def request_response(request:, call:, method:, metadata: {})
+ p "Intercepted request/response call at method #{method}" \
+ " with request #{request} for call #{call}" \
+ " and metadata: #{metadata}"
+ metadata['foo'] = 'bar_from_request_response'
+ yield
+ end
+
+ def client_streamer(requests:, call:, method:, metadata: {})
+ p "Received client streamer call at method #{method}" \
+ " with requests #{requests} for call #{call}" \
+ " and metadata: #{metadata}"
+ requests.each do |r|
+ p "In client interceptor: #{r}"
+ end
+ metadata['foo'] = 'bar_from_client_streamer'
+ yield
+ end
+
+ def server_streamer(request:, call:, method:, metadata: {})
+ p "Received server streamer call at method #{method}" \
+ " with request #{request} for call #{call}" \
+ " and metadata: #{metadata}"
+ metadata['foo'] = 'bar_from_server_streamer'
+ yield
+ end
+
+ def bidi_streamer(requests:, call:, method:, metadata: {})
+ p "Received bidi streamer call at method #{method}" \
+ "with requests #{requests} for call #{call}" \
+ " and metadata: #{metadata}"
+ requests.each do |r|
+ p "In client interceptor: #{r}"
+ end
+ metadata['foo'] = 'bar_from_bidi_streamer'
+ yield
+ end
+end
diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb
index ea0c4ae56c..c584a7cf59 100644
--- a/src/ruby/tools/version.rb
+++ b/src/ruby/tools/version.rb
@@ -14,6 +14,6 @@
module GRPC
module Tools
- VERSION = '1.7.0.dev'
+ VERSION = '1.8.0.dev'
end
end