aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Nicolas Noble <nicolasnoble@users.noreply.github.com>2017-11-06 12:36:46 -0800
committerGravatar GitHub <noreply@github.com>2017-11-06 12:36:46 -0800
commite759d2ad7abdb0702970eeccc5f033ff4b2a4c7f (patch)
treeb958e8ecb8856bdd1cf7fbfe35c98a0873b130fd /src/core
parent4f22c919e1207af206fc83e7378f04e047f7c78d (diff)
parent34992a63e0def16262d7fc9cac10ab527ac8eaa6 (diff)
Merge pull request #13255 from ctiller/50
Update clang-format to 5.0
Diffstat (limited to 'src/core')
-rw-r--r--src/core/ext/census/grpc_context.cc6
-rw-r--r--src/core/ext/filters/client_channel/channel_connectivity.cc73
-rw-r--r--src/core/ext/filters/client_channel/client_channel.cc505
-rw-r--r--src/core/ext/filters/client_channel/client_channel.h14
-rw-r--r--src/core/ext/filters/client_channel/client_channel_factory.h42
-rw-r--r--src/core/ext/filters/client_channel/client_channel_plugin.cc22
-rw-r--r--src/core/ext/filters/client_channel/connector.h40
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.cc87
-rw-r--r--src/core/ext/filters/client_channel/lb_policy.h134
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc53
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc528
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc20
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h18
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc28
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc108
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h40
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc166
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc210
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc108
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/subchannel_list.h52
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_factory.h80
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.cc14
-rw-r--r--src/core/ext/filters/client_channel/lb_policy_registry.h6
-rw-r--r--src/core/ext/filters/client_channel/parse_address.cc42
-rw-r--r--src/core/ext/filters/client_channel/parse_address.h12
-rw-r--r--src/core/ext/filters/client_channel/resolver.cc34
-rw-r--r--src/core/ext/filters/client_channel/resolver.h48
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc164
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h18
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc96
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc180
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h30
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc42
-rw-r--r--src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc118
-rw-r--r--src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h2
-rw-r--r--src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc100
-rw-r--r--src/core/ext/filters/client_channel/resolver_factory.h38
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.cc58
-rw-r--r--src/core/ext/filters/client_channel/resolver_registry.h20
-rw-r--r--src/core/ext/filters/client_channel/subchannel.cc317
-rw-r--r--src/core/ext/filters/client_channel/subchannel.h114
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.cc83
-rw-r--r--src/core/ext/filters/client_channel/subchannel_index.h28
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.cc36
-rw-r--r--src/core/ext/filters/client_channel/uri_parser.h20
-rw-r--r--src/core/ext/filters/http/client/http_client_filter.cc150
-rw-r--r--src/core/ext/filters/http/http_filters_plugin.cc32
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.cc145
-rw-r--r--src/core/ext/filters/http/message_compress/message_compress_filter.h2
-rw-r--r--src/core/ext/filters/http/server/http_server_filter.cc134
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.cc54
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_filter.h2
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc14
-rw-r--r--src/core/ext/filters/load_reporting/server_load_reporting_plugin.h10
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.cc12
-rw-r--r--src/core/ext/filters/workarounds/workaround_utils.h2
-rw-r--r--src/core/ext/transport/chttp2/alpn/alpn.cc6
-rw-r--r--src/core/ext/transport/chttp2/alpn/alpn.h4
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.cc64
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create.cc47
-rw-r--r--src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc25
-rw-r--r--src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc84
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.cc103
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.h6
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc6
-rw-r--r--src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc18
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc14
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.cc14
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_decoder.h14
-rw-r--r--src/core/ext/transport/chttp2/transport/bin_encoder.cc22
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.cc841
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h12
-rw-r--r--src/core/ext/transport/chttp2/transport/flow_control.cc6
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.cc48
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.h34
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.cc32
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.h20
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.cc24
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.h8
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.cc32
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.h16
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.cc32
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.h18
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.cc32
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.h12
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.cc133
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_encoder.h26
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.cc640
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.h44
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.cc42
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.h22
-rw-r--r--src/core/ext/transport/chttp2/transport/http2_settings.cc2
-rw-r--r--src/core/ext/transport/chttp2/transport/http2_settings.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.cc25
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h20
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h336
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.cc196
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.cc84
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.cc50
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_map.h28
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.cc102
-rw-r--r--src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc12
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.cc317
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.h6
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.cc287
-rw-r--r--src/core/ext/transport/inproc/inproc_transport.h6
-rw-r--r--src/core/lib/backoff/backoff.cc16
-rw-r--r--src/core/lib/backoff/backoff.h12
-rw-r--r--src/core/lib/channel/channel_args.cc152
-rw-r--r--src/core/lib/channel/channel_args.h74
-rw-r--r--src/core/lib/channel/channel_stack.cc131
-rw-r--r--src/core/lib/channel/channel_stack.h144
-rw-r--r--src/core/lib/channel/channel_stack_builder.cc156
-rw-r--r--src/core/lib/channel/channel_stack_builder.h88
-rw-r--r--src/core/lib/channel/connected_channel.cc130
-rw-r--r--src/core/lib/channel/connected_channel.h8
-rw-r--r--src/core/lib/channel/context.h4
-rw-r--r--src/core/lib/channel/handshaker_factory.cc6
-rw-r--r--src/core/lib/channel/handshaker_factory.h20
-rw-r--r--src/core/lib/compression/compression.cc18
-rw-r--r--src/core/lib/compression/stream_compression.cc20
-rw-r--r--src/core/lib/compression/stream_compression.h38
-rw-r--r--src/core/lib/compression/stream_compression_gzip.cc50
-rw-r--r--src/core/lib/compression/stream_compression_identity.cc26
-rw-r--r--src/core/lib/debug/stats.cc36
-rw-r--r--src/core/lib/debug/stats.h18
-rw-r--r--src/core/lib/debug/stats_data.cc47
-rw-r--r--src/core/lib/debug/stats_data.h38
-rw-r--r--src/core/lib/debug/trace.cc40
-rw-r--r--src/core/lib/debug/trace.h6
-rw-r--r--src/core/lib/http/format_request.cc20
-rw-r--r--src/core/lib/http/format_request.h8
-rw-r--r--src/core/lib/http/httpcli.cc145
-rw-r--r--src/core/lib/http/httpcli.h64
-rw-r--r--src/core/lib/http/httpcli_security_connector.cc92
-rw-r--r--src/core/lib/http/parser.cc76
-rw-r--r--src/core/lib/http/parser.h38
-rw-r--r--src/core/lib/iomgr/closure.cc76
-rw-r--r--src/core/lib/iomgr/closure.h94
-rw-r--r--src/core/lib/iomgr/combiner.cc90
-rw-r--r--src/core/lib/iomgr/combiner.h14
-rw-r--r--src/core/lib/iomgr/endpoint.h70
-rw-r--r--src/core/lib/iomgr/endpoint_pair.h8
-rw-r--r--src/core/lib/iomgr/endpoint_pair_posix.cc6
-rw-r--r--src/core/lib/iomgr/endpoint_pair_uv.cc4
-rw-r--r--src/core/lib/iomgr/endpoint_pair_windows.cc12
-rw-r--r--src/core/lib/iomgr/error.cc218
-rw-r--r--src/core/lib/iomgr/error.h44
-rw-r--r--src/core/lib/iomgr/error_internal.h4
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.cc230
-rw-r--r--src/core/lib/iomgr/ev_epoll1_linux.h2
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.cc402
-rw-r--r--src/core/lib/iomgr/ev_epollex_linux.h2
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.cc409
-rw-r--r--src/core/lib/iomgr/ev_epollsig_linux.h8
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.cc397
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.h4
-rw-r--r--src/core/lib/iomgr/ev_posix.cc114
-rw-r--r--src/core/lib/iomgr/ev_posix.h130
-rw-r--r--src/core/lib/iomgr/exec_ctx.cc32
-rw-r--r--src/core/lib/iomgr/exec_ctx.h26
-rw-r--r--src/core/lib/iomgr/executor.cc42
-rw-r--r--src/core/lib/iomgr/executor.h8
-rw-r--r--src/core/lib/iomgr/gethostname.h2
-rw-r--r--src/core/lib/iomgr/gethostname_fallback.cc2
-rw-r--r--src/core/lib/iomgr/gethostname_host_name_max.cc4
-rw-r--r--src/core/lib/iomgr/gethostname_sysconf.cc4
-rw-r--r--src/core/lib/iomgr/iocp_windows.cc14
-rw-r--r--src/core/lib/iomgr/iocp_windows.h4
-rw-r--r--src/core/lib/iomgr/iomgr.cc34
-rw-r--r--src/core/lib/iomgr/iomgr.h6
-rw-r--r--src/core/lib/iomgr/iomgr_internal.h10
-rw-r--r--src/core/lib/iomgr/load_file.cc16
-rw-r--r--src/core/lib/iomgr/load_file.h4
-rw-r--r--src/core/lib/iomgr/lockfree_event.cc32
-rw-r--r--src/core/lib/iomgr/lockfree_event.h18
-rw-r--r--src/core/lib/iomgr/network_status_tracker.cc4
-rw-r--r--src/core/lib/iomgr/network_status_tracker.h4
-rw-r--r--src/core/lib/iomgr/polling_entity.cc24
-rw-r--r--src/core/lib/iomgr/polling_entity.h26
-rw-r--r--src/core/lib/iomgr/pollset.h16
-rw-r--r--src/core/lib/iomgr/pollset_set.h30
-rw-r--r--src/core/lib/iomgr/pollset_uv.cc34
-rw-r--r--src/core/lib/iomgr/pollset_windows.cc36
-rw-r--r--src/core/lib/iomgr/pollset_windows.h8
-rw-r--r--src/core/lib/iomgr/resolve_address.h20
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.cc60
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.cc108
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.cc62
-rw-r--r--src/core/lib/iomgr/resource_quota.cc288
-rw-r--r--src/core/lib/iomgr/resource_quota.h72
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.cc99
-rw-r--r--src/core/lib/iomgr/sockaddr_utils.h30
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.cc40
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.h30
-rw-r--r--src/core/lib/iomgr/socket_mutator.cc36
-rw-r--r--src/core/lib/iomgr/socket_mutator.h22
-rw-r--r--src/core/lib/iomgr/socket_utils.h2
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.cc48
-rw-r--r--src/core/lib/iomgr/socket_utils_linux.cc6
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.cc6
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h36
-rw-r--r--src/core/lib/iomgr/socket_utils_uv.cc2
-rw-r--r--src/core/lib/iomgr/socket_utils_windows.cc4
-rw-r--r--src/core/lib/iomgr/socket_windows.cc36
-rw-r--r--src/core/lib/iomgr/socket_windows.h26
-rw-r--r--src/core/lib/iomgr/tcp_client.h10
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.cc96
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.h6
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.cc75
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.cc76
-rw-r--r--src/core/lib/iomgr/tcp_posix.cc224
-rw-r--r--src/core/lib/iomgr/tcp_posix.h12
-rw-r--r--src/core/lib/iomgr/tcp_server.h46
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.cc125
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix.h38
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.cc46
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc36
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc4
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.cc142
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.cc156
-rw-r--r--src/core/lib/iomgr/tcp_uv.cc165
-rw-r--r--src/core/lib/iomgr/tcp_uv.h6
-rw-r--r--src/core/lib/iomgr/tcp_windows.cc146
-rw-r--r--src/core/lib/iomgr/tcp_windows.h8
-rw-r--r--src/core/lib/iomgr/timer.h16
-rw-r--r--src/core/lib/iomgr/timer_generic.cc100
-rw-r--r--src/core/lib/iomgr/timer_generic.h8
-rw-r--r--src/core/lib/iomgr/timer_heap.cc41
-rw-r--r--src/core/lib/iomgr/timer_heap.h16
-rw-r--r--src/core/lib/iomgr/timer_manager.cc24
-rw-r--r--src/core/lib/iomgr/timer_uv.cc36
-rw-r--r--src/core/lib/iomgr/timer_uv.h4
-rw-r--r--src/core/lib/iomgr/udp_server.cc131
-rw-r--r--src/core/lib/iomgr/udp_server.h34
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.cc38
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.h12
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix_noop.cc10
-rw-r--r--src/core/lib/iomgr/wakeup_fd_posix.cc10
-rw-r--r--src/core/lib/json/json_reader.cc36
-rw-r--r--src/core/lib/json/json_reader.h36
-rw-r--r--src/core/lib/json/json_string.cc102
-rw-r--r--src/core/lib/json/json_writer.cc40
-rw-r--r--src/core/lib/json/json_writer.h30
-rw-r--r--src/core/lib/profiling/basic_timers.cc58
-rw-r--r--src/core/lib/profiling/stap_timers.cc10
-rw-r--r--src/core/lib/profiling/timers.h12
-rw-r--r--src/core/lib/security/context/security_context.cc129
-rw-r--r--src/core/lib/security/context/security_context.h52
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.cc124
-rw-r--r--src/core/lib/security/credentials/composite/composite_credentials.h18
-rw-r--r--src/core/lib/security/credentials/credentials.cc120
-rw-r--r--src/core/lib/security/credentials/credentials.h150
-rw-r--r--src/core/lib/security/credentials/credentials_metadata.cc12
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.cc63
-rw-r--r--src/core/lib/security/credentials/fake/fake_credentials.h10
-rw-r--r--src/core/lib/security/credentials/google_default/credentials_generic.cc6
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.cc56
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.h2
-rw-r--r--src/core/lib/security/credentials/iam/iam_credentials.cc32
-rw-r--r--src/core/lib/security/credentials/jwt/json_token.cc86
-rw-r--r--src/core/lib/security/credentials/jwt/json_token.h30
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.cc66
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_credentials.h6
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.cc278
-rw-r--r--src/core/lib/security/credentials/jwt/jwt_verifier.h58
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.cc185
-rw-r--r--src/core/lib/security/credentials/oauth2/oauth2_credentials.h40
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.cc73
-rw-r--r--src/core/lib/security/credentials/plugin/plugin_credentials.h12
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.cc143
-rw-r--r--src/core/lib/security/credentials/ssl/ssl_credentials.h12
-rw-r--r--src/core/lib/security/transport/auth_filters.h8
-rw-r--r--src/core/lib/security/transport/client_auth_filter.cc172
-rw-r--r--src/core/lib/security/transport/lb_targets_info.cc26
-rw-r--r--src/core/lib/security/transport/lb_targets_info.h6
-rw-r--r--src/core/lib/security/transport/secure_endpoint.cc141
-rw-r--r--src/core/lib/security/transport/secure_endpoint.h8
-rw-r--r--src/core/lib/security/transport/security_connector.cc497
-rw-r--r--src/core/lib/security/transport/security_connector.h162
-rw-r--r--src/core/lib/security/transport/security_handshaker.cc200
-rw-r--r--src/core/lib/security/transport/security_handshaker.h6
-rw-r--r--src/core/lib/security/transport/server_auth_filter.cc120
-rw-r--r--src/core/lib/security/transport/tsi_error.cc2
-rw-r--r--src/core/lib/security/transport/tsi_error.h2
-rw-r--r--src/core/lib/security/util/json_util.cc14
-rw-r--r--src/core/lib/security/util/json_util.h8
-rw-r--r--src/core/lib/slice/b64.cc30
-rw-r--r--src/core/lib/slice/b64.h8
-rw-r--r--src/core/lib/slice/percent_encoding.cc30
-rw-r--r--src/core/lib/slice/percent_encoding.h6
-rw-r--r--src/core/lib/slice/slice.cc104
-rw-r--r--src/core/lib/slice/slice_buffer.cc72
-rw-r--r--src/core/lib/slice/slice_hash_table.h22
-rw-r--r--src/core/lib/slice/slice_intern.cc64
-rw-r--r--src/core/lib/slice/slice_internal.h12
-rw-r--r--src/core/lib/slice/slice_string_helpers.cc18
-rw-r--r--src/core/lib/slice/slice_string_helpers.h6
-rw-r--r--src/core/lib/support/alloc.cc30
-rw-r--r--src/core/lib/support/arena.cc22
-rw-r--r--src/core/lib/support/arena.h6
-rw-r--r--src/core/lib/support/atm.cc2
-rw-r--r--src/core/lib/support/avl.cc107
-rw-r--r--src/core/lib/support/cmdline.cc110
-rw-r--r--src/core/lib/support/env.h6
-rw-r--r--src/core/lib/support/env_linux.cc18
-rw-r--r--src/core/lib/support/env_posix.cc8
-rw-r--r--src/core/lib/support/env_windows.cc8
-rw-r--r--src/core/lib/support/histogram.cc51
-rw-r--r--src/core/lib/support/host_port.cc14
-rw-r--r--src/core/lib/support/log.cc12
-rw-r--r--src/core/lib/support/log_android.cc14
-rw-r--r--src/core/lib/support/log_linux.cc14
-rw-r--r--src/core/lib/support/log_posix.cc19
-rw-r--r--src/core/lib/support/log_windows.cc14
-rw-r--r--src/core/lib/support/mpscq.cc24
-rw-r--r--src/core/lib/support/mpscq.h16
-rw-r--r--src/core/lib/support/murmur_hash.cc8
-rw-r--r--src/core/lib/support/murmur_hash.h2
-rw-r--r--src/core/lib/support/spinlock.h4
-rw-r--r--src/core/lib/support/stack_lockfree.cc16
-rw-r--r--src/core/lib/support/stack_lockfree.h8
-rw-r--r--src/core/lib/support/string.cc96
-rw-r--r--src/core/lib/support/string.h42
-rw-r--r--src/core/lib/support/string_posix.cc4
-rw-r--r--src/core/lib/support/string_util_windows.cc4
-rw-r--r--src/core/lib/support/string_windows.cc4
-rw-r--r--src/core/lib/support/subprocess_posix.cc20
-rw-r--r--src/core/lib/support/subprocess_windows.cc18
-rw-r--r--src/core/lib/support/sync.cc38
-rw-r--r--src/core/lib/support/sync_windows.cc28
-rw-r--r--src/core/lib/support/thd_posix.cc14
-rw-r--r--src/core/lib/support/thd_windows.cc20
-rw-r--r--src/core/lib/support/time_posix.cc2
-rw-r--r--src/core/lib/support/time_precise.cc8
-rw-r--r--src/core/lib/support/time_precise.h2
-rw-r--r--src/core/lib/support/tls_pthread.cc4
-rw-r--r--src/core/lib/support/tmpfile.h2
-rw-r--r--src/core/lib/support/tmpfile_msys.cc4
-rw-r--r--src/core/lib/support/tmpfile_posix.cc6
-rw-r--r--src/core/lib/support/tmpfile_windows.cc4
-rw-r--r--src/core/lib/support/wrap_memcpy.cc4
-rw-r--r--src/core/lib/surface/alarm.cc40
-rw-r--r--src/core/lib/surface/byte_buffer.cc24
-rw-r--r--src/core/lib/surface/byte_buffer_reader.cc18
-rw-r--r--src/core/lib/surface/call.cc538
-rw-r--r--src/core/lib/surface/call.h66
-rw-r--r--src/core/lib/surface/call_log_batch.cc16
-rw-r--r--src/core/lib/surface/call_test_only.h10
-rw-r--r--src/core/lib/surface/channel.cc133
-rw-r--r--src/core/lib/surface/channel.h42
-rw-r--r--src/core/lib/surface/channel_init.cc24
-rw-r--r--src/core/lib/surface/channel_init.h12
-rw-r--r--src/core/lib/surface/channel_ping.cc22
-rw-r--r--src/core/lib/surface/channel_stack_type.cc2
-rw-r--r--src/core/lib/surface/channel_stack_type.h2
-rw-r--r--src/core/lib/surface/completion_queue.cc488
-rw-r--r--src/core/lib/surface/completion_queue.h42
-rw-r--r--src/core/lib/surface/event_string.cc14
-rw-r--r--src/core/lib/surface/event_string.h2
-rw-r--r--src/core/lib/surface/init.cc18
-rw-r--r--src/core/lib/surface/init_secure.cc8
-rw-r--r--src/core/lib/surface/lame_client.cc66
-rw-r--r--src/core/lib/surface/server.cc524
-rw-r--r--src/core/lib/surface/server.h26
-rw-r--r--src/core/lib/surface/validate_metadata.cc18
-rw-r--r--src/core/lib/surface/validate_metadata.h4
-rw-r--r--src/core/lib/surface/version.cc4
-rw-r--r--src/core/lib/transport/bdp_estimator.cc9
-rw-r--r--src/core/lib/transport/bdp_estimator.h6
-rw-r--r--src/core/lib/transport/byte_stream.cc102
-rw-r--r--src/core/lib/transport/byte_stream.h66
-rw-r--r--src/core/lib/transport/connectivity_state.cc42
-rw-r--r--src/core/lib/transport/connectivity_state.h40
-rw-r--r--src/core/lib/transport/error_utils.cc20
-rw-r--r--src/core/lib/transport/error_utils.h10
-rw-r--r--src/core/lib/transport/metadata.cc136
-rw-r--r--src/core/lib/transport/metadata.h30
-rw-r--r--src/core/lib/transport/metadata_batch.cc118
-rw-r--r--src/core/lib/transport/metadata_batch.h80
-rw-r--r--src/core/lib/transport/pid_controller.cc2
-rw-r--r--src/core/lib/transport/static_metadata.cc4
-rw-r--r--src/core/lib/transport/static_metadata.h46
-rw-r--r--src/core/lib/transport/status_conversion.cc2
-rw-r--r--src/core/lib/transport/status_conversion.h2
-rw-r--r--src/core/lib/transport/timeout_encoding.cc20
-rw-r--r--src/core/lib/transport/timeout_encoding.h4
-rw-r--r--src/core/lib/transport/transport.cc132
-rw-r--r--src/core/lib/transport/transport.h156
-rw-r--r--src/core/lib/transport/transport_impl.h38
-rw-r--r--src/core/lib/transport/transport_op_string.cc36
-rw-r--r--src/core/tsi/fake_transport_security.cc216
-rw-r--r--src/core/tsi/fake_transport_security.h10
-rw-r--r--src/core/tsi/gts_transport_security.cc2
-rw-r--r--src/core/tsi/gts_transport_security.h6
-rw-r--r--src/core/tsi/ssl_transport_security.cc407
-rw-r--r--src/core/tsi/ssl_transport_security.h50
-rw-r--r--src/core/tsi/transport_security.cc90
-rw-r--r--src/core/tsi/transport_security.h108
-rw-r--r--src/core/tsi/transport_security_adapter.cc90
-rw-r--r--src/core/tsi/transport_security_adapter.h4
-rw-r--r--src/core/tsi/transport_security_grpc.cc22
-rw-r--r--src/core/tsi/transport_security_grpc.h38
-rw-r--r--src/core/tsi/transport_security_interface.h80
407 files changed, 12521 insertions, 12476 deletions
diff --git a/src/core/ext/census/grpc_context.cc b/src/core/ext/census/grpc_context.cc
index 34eafcab8e..fb4fcb34a8 100644
--- a/src/core/ext/census/grpc_context.cc
+++ b/src/core/ext/census/grpc_context.cc
@@ -21,7 +21,7 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
-void grpc_census_call_set_context(grpc_call *call, census_context *context) {
+void grpc_census_call_set_context(grpc_call* call, census_context* context) {
GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2,
(call, context));
if (context != NULL) {
@@ -29,7 +29,7 @@ void grpc_census_call_set_context(grpc_call *call, census_context *context) {
}
}
-census_context *grpc_census_call_get_context(grpc_call *call) {
+census_context* grpc_census_call_get_context(grpc_call* call) {
GRPC_API_TRACE("grpc_census_call_get_context(call=%p)", 1, (call));
- return (census_context *)grpc_call_context_get(call, GRPC_CONTEXT_TRACING);
+ return (census_context*)grpc_call_context_get(call, GRPC_CONTEXT_TRACING);
}
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 31a8fc39ce..82a5edca93 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -29,9 +29,9 @@
#include "src/core/lib/surface/completion_queue.h"
grpc_connectivity_state grpc_channel_check_connectivity_state(
- grpc_channel *channel, int try_to_connect) {
+ grpc_channel* channel, int try_to_connect) {
/* forward through to the underlying client channel */
- grpc_channel_element *client_channel_elem =
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_connectivity_state state;
@@ -66,15 +66,15 @@ typedef struct {
grpc_closure watcher_timer_init;
grpc_timer alarm;
grpc_connectivity_state state;
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
grpc_cq_completion completion_storage;
- grpc_channel *channel;
- grpc_error *error;
- void *tag;
+ grpc_channel* channel;
+ grpc_error* error;
+ void* tag;
} state_watcher;
-static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
- grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+static void delete_state_watcher(grpc_exec_ctx* exec_ctx, state_watcher* w) {
+ grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
@@ -86,10 +86,10 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
gpr_free(w);
}
-static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
- grpc_cq_completion *ignored) {
+static void finished_completion(grpc_exec_ctx* exec_ctx, void* pw,
+ grpc_cq_completion* ignored) {
bool should_delete = false;
- state_watcher *w = (state_watcher *)pw;
+ state_watcher* w = (state_watcher*)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@@ -106,12 +106,12 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
}
}
-static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
- bool due_to_completion, grpc_error *error) {
+static void partly_done(grpc_exec_ctx* exec_ctx, state_watcher* w,
+ bool due_to_completion, grpc_error* error) {
if (due_to_completion) {
grpc_timer_cancel(exec_ctx, &w->alarm);
} else {
- grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+ grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
grpc_client_channel_watch_connectivity_state(
exec_ctx, client_channel_elem,
@@ -161,31 +161,31 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
GRPC_ERROR_UNREF(error);
}
-static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw,
- grpc_error *error) {
- partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error));
+static void watch_complete(grpc_exec_ctx* exec_ctx, void* pw,
+ grpc_error* error) {
+ partly_done(exec_ctx, (state_watcher*)pw, true, GRPC_ERROR_REF(error));
}
-static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw,
- grpc_error *error) {
- partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error));
+static void timeout_complete(grpc_exec_ctx* exec_ctx, void* pw,
+ grpc_error* error) {
+ partly_done(exec_ctx, (state_watcher*)pw, false, GRPC_ERROR_REF(error));
}
-int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) {
- grpc_channel_element *client_channel_elem =
+int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return grpc_client_channel_num_external_connectivity_watchers(
client_channel_elem);
}
typedef struct watcher_timer_init_arg {
- state_watcher *w;
+ state_watcher* w;
gpr_timespec deadline;
} watcher_timer_init_arg;
-static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;
+static void watcher_timer_init(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ watcher_timer_init_arg* wa = (watcher_timer_init_arg*)arg;
grpc_timer_init(exec_ctx, &wa->w->alarm,
grpc_timespec_to_millis_round_up(wa->deadline),
@@ -193,19 +193,19 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(wa);
}
-int grpc_channel_support_connectivity_watcher(grpc_channel *channel) {
- grpc_channel_element *client_channel_elem =
+int grpc_channel_support_connectivity_watcher(grpc_channel* channel) {
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1;
}
void grpc_channel_watch_connectivity_state(
- grpc_channel *channel, grpc_connectivity_state last_observed_state,
- gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
- grpc_channel_element *client_channel_elem =
+ grpc_channel* channel, grpc_connectivity_state last_observed_state,
+ gpr_timespec deadline, grpc_completion_queue* cq, void* tag) {
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
+ state_watcher* w = (state_watcher*)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@@ -213,8 +213,9 @@ void grpc_channel_watch_connectivity_state(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
- 7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
- (int)deadline.clock_type, cq, tag));
+ 7,
+ (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
+ (int)deadline.clock_type, cq, tag));
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
@@ -230,8 +231,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
- watcher_timer_init_arg *wa =
- (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
+ watcher_timer_init_arg* wa =
+ (watcher_timer_init_arg*)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 9eae6e02c9..584872bfc1 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -76,29 +76,29 @@ typedef struct {
wait_for_ready_value wait_for_ready;
} method_parameters;
-static method_parameters *method_parameters_ref(
- method_parameters *method_params) {
+static method_parameters* method_parameters_ref(
+ method_parameters* method_params) {
gpr_ref(&method_params->refs);
return method_params;
}
-static void method_parameters_unref(method_parameters *method_params) {
+static void method_parameters_unref(method_parameters* method_params) {
if (gpr_unref(&method_params->refs)) {
gpr_free(method_params);
}
}
// Wrappers to pass to grpc_service_config_create_method_config_table().
-static void *method_parameters_ref_wrapper(void *value) {
- return method_parameters_ref((method_parameters *)value);
+static void* method_parameters_ref_wrapper(void* value) {
+ return method_parameters_ref((method_parameters*)value);
}
-static void method_parameters_unref_wrapper(grpc_exec_ctx *exec_ctx,
- void *value) {
- method_parameters_unref((method_parameters *)value);
+static void method_parameters_unref_wrapper(grpc_exec_ctx* exec_ctx,
+ void* value) {
+ method_parameters_unref((method_parameters*)value);
}
-static bool parse_wait_for_ready(grpc_json *field,
- wait_for_ready_value *wait_for_ready) {
+static bool parse_wait_for_ready(grpc_json* field,
+ wait_for_ready_value* wait_for_ready) {
if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
return false;
}
@@ -107,13 +107,13 @@ static bool parse_wait_for_ready(grpc_json *field,
return true;
}
-static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
+static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
if (field->type != GRPC_JSON_STRING) return false;
size_t len = strlen(field->value);
if (field->value[len - 1] != 's') return false;
- char *buf = gpr_strdup(field->value);
+ char* buf = gpr_strdup(field->value);
buf[len - 1] = '\0'; // Remove trailing 's'.
- char *decimal_point = strchr(buf, '.');
+ char* decimal_point = strchr(buf, '.');
int nanos = 0;
if (decimal_point != NULL) {
*decimal_point = '\0';
@@ -138,10 +138,10 @@ static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
return true;
}
-static void *method_parameters_create_from_json(const grpc_json *json) {
+static void* method_parameters_create_from_json(const grpc_json* json) {
wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
grpc_millis timeout = 0;
- for (grpc_json *field = json->child; field != NULL; field = field->next) {
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
if (field->key == NULL) continue;
if (strcmp(field->key, "waitForReady") == 0) {
if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate.
@@ -151,8 +151,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (!parse_timeout(field, &timeout)) return NULL;
}
}
- method_parameters *value =
- (method_parameters *)gpr_malloc(sizeof(method_parameters));
+ method_parameters* value =
+ (method_parameters*)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@@ -167,24 +167,24 @@ struct external_connectivity_watcher;
typedef struct client_channel_channel_data {
/** resolver for this channel */
- grpc_resolver *resolver;
+ grpc_resolver* resolver;
/** have we started resolving this channel */
bool started_resolving;
/** is deadline checking enabled? */
bool deadline_checking_enabled;
/** client channel factory */
- grpc_client_channel_factory *client_channel_factory;
+ grpc_client_channel_factory* client_channel_factory;
/** combiner protecting all variables below in this data structure */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
/** currently active load balancer */
- grpc_lb_policy *lb_policy;
+ grpc_lb_policy* lb_policy;
/** retry throttle data */
- grpc_server_retry_throttle_data *retry_throttle_data;
+ grpc_server_retry_throttle_data* retry_throttle_data;
/** maps method names to method_parameters structs */
- grpc_slice_hash_table *method_params_table;
+ grpc_slice_hash_table* method_params_table;
/** incoming resolver result - set by resolver.next() */
- grpc_channel_args *resolver_result;
+ grpc_channel_args* resolver_result;
/** a list of closures that are all waiting for resolver result to come in */
grpc_closure_list waiting_for_resolver_result_closures;
/** resolver callback */
@@ -194,42 +194,42 @@ typedef struct client_channel_channel_data {
/** when an lb_policy arrives, should we try to exit idle */
bool exit_idle_when_lb_policy_arrives;
/** owning stack */
- grpc_channel_stack *owning_stack;
+ grpc_channel_stack* owning_stack;
/** interested parties (owned) */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/* external_connectivity_watcher_list head is guarded by its own mutex, since
* counts need to be grabbed immediately without polling on a cq */
gpr_mu external_connectivity_watcher_list_mu;
- struct external_connectivity_watcher *external_connectivity_watcher_list_head;
+ struct external_connectivity_watcher* external_connectivity_watcher_list_head;
/* the following properties are guarded by a mutex since API's require them
to be instantaneously available */
gpr_mu info_mu;
- char *info_lb_policy_name;
+ char* info_lb_policy_name;
/** service config in JSON form */
- char *info_service_config_json;
+ char* info_service_config_json;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
resolver, to watch for state changes from the lb_policy. When a state
change is seen, we update the channel, and create a new watcher. */
typedef struct {
- channel_data *chand;
+ channel_data* chand;
grpc_closure on_changed;
grpc_connectivity_state state;
- grpc_lb_policy *lb_policy;
+ grpc_lb_policy* lb_policy;
} lb_policy_connectivity_watcher;
-static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
+static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
+ grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state);
-static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
- channel_data *chand,
+static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
+ channel_data* chand,
grpc_connectivity_state state,
- grpc_error *error,
- const char *reason) {
+ grpc_error* error,
+ const char* reason) {
/* TODO: Improve failure handling:
* - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
* - Hand over pending picks from old policies during the switch that happens
@@ -256,9 +256,9 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
reason);
}
-static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
+static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@@ -282,11 +282,11 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
gpr_free(w);
}
-static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_lb_policy *lb_policy,
+static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
+ grpc_lb_policy* lb_policy,
grpc_connectivity_state current_state) {
- lb_policy_connectivity_watcher *w =
- (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
+ lb_policy_connectivity_watcher* w =
+ (lb_policy_connectivity_watcher*)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@@ -297,8 +297,8 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
-static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
- channel_data *chand) {
+static void start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ channel_data* chand) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
}
@@ -310,19 +310,19 @@ static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
}
typedef struct {
- char *server_name;
- grpc_server_retry_throttle_data *retry_throttle_data;
+ char* server_name;
+ grpc_server_retry_throttle_data* retry_throttle_data;
} service_config_parsing_state;
-static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
- service_config_parsing_state *parsing_state =
- (service_config_parsing_state *)arg;
+static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
+ service_config_parsing_state* parsing_state =
+ (service_config_parsing_state*)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
int max_milli_tokens = 0;
int milli_token_ratio = 0;
- for (grpc_json *sub_field = field->child; sub_field != NULL;
+ for (grpc_json* sub_field = field->child; sub_field != NULL;
sub_field = sub_field->next) {
if (sub_field->key == NULL) return;
if (strcmp(sub_field->key, "maxTokens") == 0) {
@@ -338,7 +338,7 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
size_t whole_len = strlen(sub_field->value);
uint32_t multiplier = 1;
uint32_t decimal_value = 0;
- const char *decimal_point = strchr(sub_field->value, '.');
+ const char* decimal_point = strchr(sub_field->value, '.');
if (decimal_point != NULL) {
whole_len = (size_t)(decimal_point - sub_field->value);
multiplier = 1000;
@@ -369,25 +369,25 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
}
}
-static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- channel_data *chand = (channel_data *)arg;
+static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ channel_data* chand = (channel_data*)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
}
// Extract the following fields from the resolver result, if non-NULL.
bool lb_policy_updated = false;
- char *lb_policy_name_dup = NULL;
+ char* lb_policy_name_dup = NULL;
bool lb_policy_name_changed = false;
- grpc_lb_policy *new_lb_policy = NULL;
- char *service_config_json = NULL;
- grpc_server_retry_throttle_data *retry_throttle_data = NULL;
- grpc_slice_hash_table *method_params_table = NULL;
+ grpc_lb_policy* new_lb_policy = NULL;
+ char* service_config_json = NULL;
+ grpc_server_retry_throttle_data* retry_throttle_data = NULL;
+ grpc_slice_hash_table* method_params_table = NULL;
if (chand->resolver_result != NULL) {
// Find LB policy name.
- const char *lb_policy_name = NULL;
- const grpc_arg *channel_arg =
+ const char* lb_policy_name = NULL;
+ const grpc_arg* channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
@@ -398,8 +398,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
- grpc_lb_addresses *addresses =
- (grpc_lb_addresses *)channel_arg->value.pointer.p;
+ grpc_lb_addresses* addresses =
+ (grpc_lb_addresses*)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@@ -450,14 +450,14 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
if (channel_arg != NULL) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
service_config_json = gpr_strdup(channel_arg->value.string);
- grpc_service_config *service_config =
+ grpc_service_config* service_config =
grpc_service_config_create(service_config_json);
if (service_config != NULL) {
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
GPR_ASSERT(channel_arg != NULL);
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
- grpc_uri *uri =
+ grpc_uri* uri =
grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
service_config_parsing_state parsing_state;
@@ -560,7 +560,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
&chand->waiting_for_resolver_result_closures);
} else { // Not shutting down.
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
- grpc_error *state_error =
+ grpc_error* state_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (new_lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -592,12 +592,12 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- grpc_transport_op *op = (grpc_transport_op *)arg;
- grpc_channel_element *elem =
- (grpc_channel_element *)op->handler_private.extra_arg;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void start_transport_op_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ grpc_transport_op* op = (grpc_transport_op*)arg;
+ grpc_channel_element* elem =
+ (grpc_channel_element*)op->handler_private.extra_arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@@ -648,10 +648,10 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
}
-static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_start_transport_op(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_transport_op* op) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@@ -668,10 +668,10 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
-static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- const grpc_channel_info *info) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_get_channel_info(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ const grpc_channel_info* info) {
+ channel_data* chand = (channel_data*)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@@ -688,10 +688,10 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
}
/* Constructor for channel_data */
-static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@@ -712,7 +712,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client_channel");
grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
// Record client channel factory.
- const grpc_arg *arg = grpc_channel_args_find(args->channel_args,
+ const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -723,9 +723,9 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(
- (grpc_client_channel_factory *)arg->value.pointer.p);
+ (grpc_client_channel_factory*)arg->value.pointer.p);
chand->client_channel_factory =
- (grpc_client_channel_factory *)arg->value.pointer.p;
+ (grpc_client_channel_factory*)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@@ -736,8 +736,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"server uri arg must be a string");
}
- char *proxy_name = NULL;
- grpc_channel_args *new_args = NULL;
+ char* proxy_name = NULL;
+ grpc_channel_args* new_args = NULL;
grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
&proxy_name, &new_args);
// Instantiate resolver.
@@ -755,21 +755,22 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_resolver *resolver = (grpc_resolver *)arg;
+static void shutdown_resolver_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_resolver* resolver = (grpc_resolver*)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
/* Destructor for channel_data */
-static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
- grpc_combiner_scheduler(chand->combiner)),
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
if (chand->client_channel_factory != NULL) {
@@ -829,45 +830,45 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
grpc_millis deadline;
- gpr_arena *arena;
- grpc_call_stack *owning_call;
- grpc_call_combiner *call_combiner;
+ gpr_arena* arena;
+ grpc_call_stack* owning_call;
+ grpc_call_combiner* call_combiner;
- grpc_server_retry_throttle_data *retry_throttle_data;
- method_parameters *method_params;
+ grpc_server_retry_throttle_data* retry_throttle_data;
+ method_parameters* method_params;
- grpc_subchannel_call *subchannel_call;
- grpc_error *error;
+ grpc_subchannel_call* subchannel_call;
+ grpc_error* error;
- grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
+ grpc_lb_policy* lb_policy; // Holds ref while LB pick is pending.
grpc_closure lb_pick_closure;
grpc_closure lb_pick_cancel_closure;
- grpc_connected_subchannel *connected_subchannel;
+ grpc_connected_subchannel* connected_subchannel;
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
- grpc_polling_entity *pollent;
+ grpc_polling_entity* pollent;
- grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES];
+ grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
size_t waiting_for_pick_batches_count;
grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
- grpc_transport_stream_op_batch *initial_metadata_batch;
+ grpc_transport_stream_op_batch* initial_metadata_batch;
grpc_linked_mdelem lb_token_mdelem;
grpc_closure on_complete;
- grpc_closure *original_on_complete;
+ grpc_closure* original_on_complete;
} call_data;
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
return calld->subchannel_call;
}
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_add(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
+ call_data* calld, grpc_transport_stream_op_batch* batch) {
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->initial_metadata_batch == NULL);
calld->initial_metadata_batch = batch;
@@ -879,9 +880,9 @@ static void waiting_for_pick_batches_add(
}
// This is called via the call combiner, so access to calld is synchronized.
-static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void fail_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@@ -892,10 +893,10 @@ static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = (call_data *)elem->call_data;
+static void waiting_for_pick_batches_fail(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_error* error) {
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
@@ -923,9 +924,9 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
-static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *ignored) {
- call_data *calld = (call_data *)arg;
+static void run_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* ignored) {
+ call_data* calld = (call_data*)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@@ -935,13 +936,14 @@ static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void waiting_for_pick_batches_resume(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
- " pending batches to subchannel_call=%p",
+ gpr_log(GPR_DEBUG,
+ "chand=%p calld=%p: sending %" PRIuPTR
+ " pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
}
@@ -961,10 +963,10 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
// Applies service config to the call. Must be invoked once we know
// that the resolver has returned results to the channel.
-static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void apply_service_config_to_call_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@@ -974,7 +976,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
- calld->method_params = (method_parameters *)grpc_method_config_table_get(
+ calld->method_params = (method_parameters*)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
@@ -994,11 +996,11 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_error *error) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void create_subchannel_call_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_error* error) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
calld->pollent, // pollent
calld->path, // path
@@ -1008,7 +1010,7 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
calld->subchannel_call_context, // context
calld->call_combiner // call_combiner
};
- grpc_error *new_error = grpc_connected_subchannel_create_call(
+ grpc_error* new_error = grpc_connected_subchannel_create_call(
exec_ctx, calld->connected_subchannel, &call_args,
&calld->subchannel_call);
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -1025,10 +1027,10 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
}
// Invoked when a pick is completed, on both success or failure.
-static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_error *error) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void pick_done_locked(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_error* error) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (calld->connected_subchannel == NULL) {
// Failed to create subchannel.
GRPC_ERROR_UNREF(calld->error);
@@ -1054,10 +1056,10 @@ static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
// either (a) the pick was deferred pending a resolver result or (b) the
// pick was done asynchronously. Removes the call's polling entity from
// chand->interested_parties before invoking pick_done_locked().
-static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem, grpc_error *error) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void async_pick_done_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem, grpc_error* error) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
pick_done_locked(exec_ctx, elem, error);
@@ -1065,11 +1067,11 @@ static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void pick_callback_cancel_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
@@ -1084,11 +1086,11 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
// Unrefs the LB policy and invokes async_pick_done_locked().
-static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void pick_callback_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@@ -1102,10 +1104,10 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
// If the pick was completed synchronously, unrefs the LB policy and
// returns true.
-static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@@ -1162,7 +1164,7 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
}
typedef struct {
- grpc_call_element *elem;
+ grpc_call_element* elem;
bool finished;
grpc_closure closure;
grpc_closure cancel_closure;
@@ -1170,11 +1172,10 @@ typedef struct {
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)arg;
+static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
gpr_free(args);
return;
@@ -1187,9 +1188,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
// is called, it will be a no-op. We also immediately invoke
// async_pick_done_locked() to propagate the error back to the caller.
args->finished = true;
- grpc_call_element *elem = args->elem;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = args->elem;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: cancelling pick waiting for resolver result",
@@ -1205,14 +1206,13 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
"Pick cancelled", &error, 1));
}
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem);
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem);
-static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)arg;
+static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -1222,9 +1222,9 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
return;
}
args->finished = true;
- grpc_call_element *elem = args->elem;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = args->elem;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@@ -1271,17 +1271,17 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
calld);
}
- pick_after_resolver_result_args *args =
- (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args));
+ pick_after_resolver_result_args* args =
+ (pick_after_resolver_result_args*)gpr_zalloc(sizeof(*args));
args->elem = elem;
GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked,
args, grpc_combiner_scheduler(chand->combiner));
@@ -1294,11 +1294,11 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(chand->combiner)));
}
-static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *ignored) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(calld->connected_subchannel == NULL);
if (chand->lb_policy != NULL) {
// We already have an LB policy, so ask it for a pick.
@@ -1328,9 +1328,9 @@ static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
chand->interested_parties);
}
-static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@@ -1349,10 +1349,10 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void cc_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
@@ -1443,11 +1443,11 @@ done:
}
/* Constructor for call_data */
-static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* cc_init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@@ -1463,12 +1463,12 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cc_destroy_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@@ -1499,10 +1499,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
-static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent) {
- call_data *calld = (call_data *)elem->call_data;
+static void cc_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent) {
+ call_data* calld = (call_data*)elem->call_data;
calld->pollent = pollent;
}
@@ -1524,9 +1524,9 @@ const grpc_channel_filter grpc_client_channel_filter = {
"client-channel",
};
-static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- channel_data *chand = (channel_data *)arg;
+static void try_to_connect_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ channel_data* chand = (channel_data*)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@@ -1539,34 +1539,35 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect) {
+ channel_data* chand = (channel_data*)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
- grpc_combiner_scheduler(chand->combiner)),
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
+ grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
return out;
}
typedef struct external_connectivity_watcher {
- channel_data *chand;
+ channel_data* chand;
grpc_polling_entity pollent;
- grpc_closure *on_complete;
- grpc_closure *watcher_timer_init;
- grpc_connectivity_state *state;
+ grpc_closure* on_complete;
+ grpc_closure* watcher_timer_init;
+ grpc_connectivity_state* state;
grpc_closure my_closure;
- struct external_connectivity_watcher *next;
+ struct external_connectivity_watcher* next;
} external_connectivity_watcher;
-static external_connectivity_watcher *lookup_external_connectivity_watcher(
- channel_data *chand, grpc_closure *on_complete) {
+static external_connectivity_watcher* lookup_external_connectivity_watcher(
+ channel_data* chand, grpc_closure* on_complete) {
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
- external_connectivity_watcher *w =
+ external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL && w->on_complete != on_complete) {
w = w->next;
@@ -1576,7 +1577,7 @@ static external_connectivity_watcher *lookup_external_connectivity_watcher(
}
static void external_connectivity_watcher_list_append(
- channel_data *chand, external_connectivity_watcher *w) {
+ channel_data* chand, external_connectivity_watcher* w) {
GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete));
gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu);
@@ -1587,7 +1588,7 @@ static void external_connectivity_watcher_list_append(
}
static void external_connectivity_watcher_list_remove(
- channel_data *chand, external_connectivity_watcher *too_remove) {
+ channel_data* chand, external_connectivity_watcher* too_remove) {
GPR_ASSERT(
lookup_external_connectivity_watcher(chand, too_remove->on_complete));
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@@ -1596,7 +1597,7 @@ static void external_connectivity_watcher_list_remove(
gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu);
return;
}
- external_connectivity_watcher *w =
+ external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL) {
if (w->next == too_remove) {
@@ -1610,12 +1611,12 @@ static void external_connectivity_watcher_list_remove(
}
int grpc_client_channel_num_external_connectivity_watchers(
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
- external_connectivity_watcher *w =
+ external_connectivity_watcher* w =
chand->external_connectivity_watcher_list_head;
while (w != NULL) {
count++;
@@ -1626,10 +1627,10 @@ int grpc_client_channel_num_external_connectivity_watchers(
return count;
}
-static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
- grpc_closure *follow_up = w->on_complete;
+static void on_external_watch_complete_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ grpc_closure* follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
@@ -1639,10 +1640,10 @@ static void on_external_watch_complete_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
-static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error_ignored) {
- external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
- external_connectivity_watcher *found = NULL;
+static void watch_connectivity_state_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error_ignored) {
+ external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
+ external_connectivity_watcher* found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
@@ -1667,12 +1668,12 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
void grpc_client_channel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_polling_entity pollent, grpc_connectivity_state *state,
- grpc_closure *closure, grpc_closure *watcher_timer_init) {
- channel_data *chand = (channel_data *)elem->channel_data;
- external_connectivity_watcher *w =
- (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_polling_entity pollent, grpc_connectivity_state* state,
+ grpc_closure* closure, grpc_closure* watcher_timer_init) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ external_connectivity_watcher* w =
+ (external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index 152fe2365a..27862cf239 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -42,19 +42,19 @@ extern "C" {
extern const grpc_channel_filter grpc_client_channel_filter;
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect);
int grpc_client_channel_num_external_connectivity_watchers(
- grpc_channel_element *elem);
+ grpc_channel_element* elem);
void grpc_client_channel_watch_connectivity_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_polling_entity pollent, grpc_connectivity_state *state,
- grpc_closure *on_complete, grpc_closure *watcher_timer_init);
+ grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_polling_entity pollent, grpc_connectivity_state* state,
+ grpc_closure* on_complete, grpc_closure* watcher_timer_init);
/* Debug helper: pull the subchannel call from a call stack element */
-grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
- grpc_call_element *elem);
+grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
+ grpc_call_element* elem);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h
index 4273c90058..db8645cd00 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.h
+++ b/src/core/ext/filters/client_channel/client_channel_factory.h
@@ -44,39 +44,39 @@ typedef enum {
/** Constructor for new configured channels.
Creating decorators around this type is encouraged to adapt behavior. */
struct grpc_client_channel_factory {
- const grpc_client_channel_factory_vtable *vtable;
+ const grpc_client_channel_factory_vtable* vtable;
};
struct grpc_client_channel_factory_vtable {
- void (*ref)(grpc_client_channel_factory *factory);
- void (*unref)(grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory);
- grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory,
- const grpc_subchannel_args *args);
- grpc_channel *(*create_client_channel)(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory,
- const char *target,
+ void (*ref)(grpc_client_channel_factory* factory);
+ void (*unref)(grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory);
+ grpc_subchannel* (*create_subchannel)(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory,
+ const grpc_subchannel_args* args);
+ grpc_channel* (*create_client_channel)(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory,
+ const char* target,
grpc_client_channel_type type,
- const grpc_channel_args *args);
+ const grpc_channel_args* args);
};
-void grpc_client_channel_factory_ref(grpc_client_channel_factory *factory);
-void grpc_client_channel_factory_unref(grpc_exec_ctx *exec_ctx,
- grpc_client_channel_factory *factory);
+void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
+void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
+ grpc_client_channel_factory* factory);
/** Create a new grpc_subchannel */
-grpc_subchannel *grpc_client_channel_factory_create_subchannel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- const grpc_subchannel_args *args);
+grpc_subchannel* grpc_client_channel_factory_create_subchannel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
+ const grpc_subchannel_args* args);
/** Create a new grpc_channel */
-grpc_channel *grpc_client_channel_factory_create_channel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory,
- const char *target, grpc_client_channel_type type,
- const grpc_channel_args *args);
+grpc_channel* grpc_client_channel_factory_create_channel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
+ const char* target, grpc_client_channel_type type,
+ const grpc_channel_args* args);
grpc_arg grpc_client_channel_factory_create_channel_arg(
- grpc_client_channel_factory *factory);
+ grpc_client_channel_factory* factory);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index 4431d11519..0db894913c 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -34,16 +34,16 @@
#include "src/core/ext/filters/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
-static bool append_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder, void *arg) {
+static bool append_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
-static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *unused) {
- const grpc_channel_args *args =
+static bool set_default_host_if_unset(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* unused) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY) ||
@@ -51,12 +51,12 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
return true;
}
}
- char *default_authority = grpc_get_default_authority(
+ char* default_authority = grpc_get_default_authority(
exec_ctx, grpc_channel_stack_builder_get_target(builder));
if (default_authority != NULL) {
grpc_arg arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ (char*)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
new_args);
gpr_free(default_authority);
@@ -76,7 +76,7 @@ extern "C" void grpc_client_channel_init(void) {
set_default_host_if_unset, NULL);
grpc_channel_init_register_stage(
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
- (void *)&grpc_client_channel_filter);
+ (void*)&grpc_client_channel_filter);
grpc_http_connect_register_handshaker_factory();
grpc_register_tracer(&grpc_client_channel_trace);
#ifndef NDEBUG
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index b71e0aab00..12dc59bcdf 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -31,48 +31,48 @@ typedef struct grpc_connector grpc_connector;
typedef struct grpc_connector_vtable grpc_connector_vtable;
struct grpc_connector {
- const grpc_connector_vtable *vtable;
+ const grpc_connector_vtable* vtable;
};
typedef struct {
/** set of pollsets interested in this connection */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/** deadline for connection */
grpc_millis deadline;
/** channel arguments (to be passed to transport) */
- const grpc_channel_args *channel_args;
+ const grpc_channel_args* channel_args;
} grpc_connect_in_args;
typedef struct {
/** the connected transport */
- grpc_transport *transport;
+ grpc_transport* transport;
/** channel arguments (to be passed to the filters) */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
} grpc_connect_out_args;
struct grpc_connector_vtable {
- void (*ref)(grpc_connector *connector);
- void (*unref)(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+ void (*ref)(grpc_connector* connector);
+ void (*unref)(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
/** Implementation of grpc_connector_shutdown */
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- grpc_error *why);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ grpc_error* why);
/** Implementation of grpc_connector_connect */
- void (*connect)(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- const grpc_connect_in_args *in_args,
- grpc_connect_out_args *out_args, grpc_closure *notify);
+ void (*connect)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ const grpc_connect_in_args* in_args,
+ grpc_connect_out_args* out_args, grpc_closure* notify);
};
-grpc_connector *grpc_connector_ref(grpc_connector *connector);
-void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector);
+grpc_connector* grpc_connector_ref(grpc_connector* connector);
+void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
/** Connect using the connector: max one outstanding call at a time */
-void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- const grpc_connect_in_args *in_args,
- grpc_connect_out_args *out_args,
- grpc_closure *notify);
+void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ const grpc_connect_in_args* in_args,
+ grpc_connect_out_args* out_args,
+ grpc_closure* notify);
/** Cancel any pending connection */
-void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector,
- grpc_error *why);
+void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+ grpc_error* why);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index 8e6673d737..387c26ed5c 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -26,9 +26,9 @@ grpc_tracer_flag grpc_trace_lb_policy_refcount =
GRPC_TRACER_INITIALIZER(false, "lb_policy_refcount");
#endif
-void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable,
- grpc_combiner *combiner) {
+void grpc_lb_policy_init(grpc_lb_policy* policy,
+ const grpc_lb_policy_vtable* vtable,
+ grpc_combiner* combiner) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
@@ -37,7 +37,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
#ifndef NDEBUG
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
-#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
+#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char* purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
#else
@@ -47,7 +47,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
#define REF_MUTATE_PASS_ARGS(x)
#endif
-static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
+static gpr_atm ref_mutate(grpc_lb_policy* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
@@ -61,104 +61,105 @@ static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
return old_val;
}
-void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
-static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_lb_policy *policy = (grpc_lb_policy *)arg;
+static void shutdown_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_lb_policy* policy = (grpc_lb_policy*)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
- shutdown_locked, policy,
- grpc_combiner_scheduler(policy->combiner)),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(shutdown_locked, policy,
+ grpc_combiner_scheduler(policy->combiner)),
+ GRPC_ERROR_NONE);
} else {
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
}
-void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_weak_ref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
-void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
- grpc_combiner *combiner = policy->combiner;
+ grpc_combiner* combiner = policy->combiner;
policy->vtable->destroy(exec_ctx, policy);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
}
}
-int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context,
- void **user_data, grpc_closure *on_complete) {
+int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context,
+ void** user_data, grpc_closure* on_complete) {
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
context, user_data, on_complete);
}
-void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error) {
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
}
-void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
+ grpc_error* error) {
policy->vtable->cancel_picks_locked(exec_ctx, policy,
initial_metadata_flags_mask,
initial_metadata_flags_eq, error);
}
-void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy) {
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy) {
policy->vtable->exit_idle_locked(exec_ctx, policy);
}
-void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_closure *closure) {
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_closure* closure) {
policy->vtable->ping_one_locked(exec_ctx, policy, closure);
}
void grpc_lb_policy_notify_on_state_change_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connectivity_state *state, grpc_closure *closure) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_connectivity_state* state, grpc_closure* closure) {
policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
closure);
}
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_error **connectivity_error) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_error** connectivity_error) {
return policy->vtable->check_connectivity_locked(exec_ctx, policy,
connectivity_error);
}
-void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- const grpc_lb_policy_args *lb_policy_args) {
+void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ const grpc_lb_policy_args* lb_policy_args) {
policy->vtable->update_locked(exec_ctx, policy, lb_policy_args);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 010299c2f4..590094e67e 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -38,70 +38,70 @@ extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
#endif
struct grpc_lb_policy {
- const grpc_lb_policy_vtable *vtable;
+ const grpc_lb_policy_vtable* vtable;
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
};
/** Extra arguments for an LB pick */
typedef struct grpc_lb_policy_pick_args {
/** Initial metadata associated with the picking call. */
- grpc_metadata_batch *initial_metadata;
+ grpc_metadata_batch* initial_metadata;
/** Bitmask used for selective cancelling. See \a
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
* grpc_types.h */
uint32_t initial_metadata_flags;
/** Storage for LB token in \a initial_metadata, or NULL if not used */
- grpc_linked_mdelem *lb_token_mdelem_storage;
+ grpc_linked_mdelem* lb_token_mdelem_storage;
} grpc_lb_policy_pick_args;
struct grpc_lb_policy_vtable {
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
- void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+ void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
/** \see grpc_lb_policy_pick */
- int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete);
+ int (*pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete);
/** \see grpc_lb_policy_cancel_pick */
- void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error);
+ void (*cancel_pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_connected_subchannel** target,
+ grpc_error* error);
/** \see grpc_lb_policy_cancel_picks */
- void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
+ void (*cancel_picks_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error);
+ grpc_error* error);
/** \see grpc_lb_policy_ping_one */
- void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_closure *closure);
+ void (*ping_one_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_closure* closure);
/** Try to enter a READY connectivity state */
- void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+ void (*exit_idle_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity_locked)(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_error **connectivity_error);
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_error** connectivity_error);
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the policy. Calling with a NULL \a
state cancels the subscription. */
- void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connectivity_state *state,
- grpc_closure *closure);
+ void (*notify_on_state_change_locked)(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_connectivity_state* state,
+ grpc_closure* closure);
- void (*update_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args);
+ void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args);
};
#ifndef NDEBUG
@@ -119,29 +119,29 @@ struct grpc_lb_policy_vtable {
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason);
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const char *file, int line, const char *reason);
-void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
- const char *reason);
-void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const char *file, int line, const char *reason);
+void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
+ const char* reason);
+void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const char* file, int line, const char* reason);
+void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
+ const char* reason);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const char* file, int line, const char* reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
-void grpc_lb_policy_ref(grpc_lb_policy *policy);
-void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
-void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
-void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
+void grpc_lb_policy_ref(grpc_lb_policy* policy);
+void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
+void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
#endif
/** called by concrete implementations to initialize the base struct */
-void grpc_lb_policy_init(grpc_lb_policy *policy,
- const grpc_lb_policy_vtable *vtable,
- grpc_combiner *combiner);
+void grpc_lb_policy_init(grpc_lb_policy* policy,
+ const grpc_lb_policy_vtable* vtable,
+ grpc_combiner* combiner);
/** Finds an appropriate subchannel for a call, based on \a pick_args.
@@ -160,53 +160,53 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
Any IO should be done under the \a interested_parties \a grpc_pollset_set
in the \a grpc_lb_policy struct. */
-int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context,
- void **user_data, grpc_closure *on_complete);
+int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context,
+ void** user_data, grpc_closure* on_complete);
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
-void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_closure *closure);
+void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_closure* closure);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a
*target set to NULL. */
-void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- grpc_connected_subchannel **target,
- grpc_error *error);
+void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ grpc_connected_subchannel** target,
+ grpc_error* error);
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
when AND'd with \a initial_metadata_flags_mask */
-void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
+void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error);
+ grpc_error* error);
/** Try to enter a READY connectivity state */
-void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy);
+void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy);
/* Call notify when the connectivity state of a channel changes from \a *state.
* Updates \a *state with the new state of the policy */
void grpc_lb_policy_notify_on_state_change_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_connectivity_state *state, grpc_closure *closure);
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_connectivity_state* state, grpc_closure* closure);
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- grpc_error **connectivity_error);
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ grpc_error** connectivity_error);
/** Update \a policy with \a lb_policy_args. */
-void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *policy,
- const grpc_lb_policy_args *lb_policy_args);
+void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* policy,
+ const grpc_lb_policy_args* lb_policy_args);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 7ad322902b..d93a9c3710 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -25,31 +25,31 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
return GRPC_ERROR_NONE;
}
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
typedef struct {
// Stats object to update.
- grpc_grpclb_client_stats *client_stats;
+ grpc_grpclb_client_stats* client_stats;
// State for intercepting send_initial_metadata.
grpc_closure on_complete_for_send;
- grpc_closure *original_on_complete_for_send;
+ grpc_closure* original_on_complete_for_send;
bool send_initial_metadata_succeeded;
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
- grpc_closure *original_recv_initial_metadata_ready;
+ grpc_closure* original_recv_initial_metadata_ready;
bool recv_initial_metadata_succeeded;
} call_data;
-static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void on_complete_for_send(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@@ -57,9 +57,9 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
-static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@@ -67,25 +67,24 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_REF(error));
}
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
- (grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
- .value);
+ (grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;
}
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@@ -97,9 +96,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
static void start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
index c6a0d69c3f..abf613a23b 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
@@ -32,4 +32,4 @@ extern const grpc_channel_filter grpc_client_load_reporting_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 1b19650b61..01b243bc3e 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -130,17 +130,17 @@ grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
/* add lb_token of selected subchannel (address) to the call's initial
* metadata */
-static grpc_error *initial_metadata_add_lb_token(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata,
- grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) {
+static grpc_error* initial_metadata_add_lb_token(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
+ grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
GPR_ASSERT(lb_token_mdelem_storage != NULL);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
lb_token_mdelem_storage, lb_token);
}
-static void destroy_client_stats(void *arg) {
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
+static void destroy_client_stats(void* arg) {
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
}
typedef struct wrapped_rr_closure_arg {
@@ -149,42 +149,42 @@ typedef struct wrapped_rr_closure_arg {
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
* calls against the internal RR instance, respectively. */
- grpc_closure *wrapped_closure;
+ grpc_closure* wrapped_closure;
/* the pick's initial metadata, kept in order to append the LB token for the
* pick */
- grpc_metadata_batch *initial_metadata;
+ grpc_metadata_batch* initial_metadata;
/* the picked target, used to determine which LB token to add to the pick's
* initial metadata */
- grpc_connected_subchannel **target;
+ grpc_connected_subchannel** target;
/* the context to be populated for the subchannel call */
- grpc_call_context_element *context;
+ grpc_call_context_element* context;
/* Stats for client-side load reporting. Note that this holds a
* reference, which must be either passed on via context or unreffed. */
- grpc_grpclb_client_stats *client_stats;
+ grpc_grpclb_client_stats* client_stats;
/* the LB token associated with the pick */
grpc_mdelem lb_token;
/* storage for the lb token initial metadata mdelem */
- grpc_linked_mdelem *lb_token_mdelem_storage;
+ grpc_linked_mdelem* lb_token_mdelem_storage;
/* The RR instance related to the closure */
- grpc_lb_policy *rr_policy;
+ grpc_lb_policy* rr_policy;
/* heap memory to be freed upon closure execution. */
- void *free_when_done;
+ void* free_when_done;
} wrapped_rr_closure_arg;
/* The \a on_complete closure passed as part of the pick requires keeping a
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
-static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
+static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
@@ -202,7 +202,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_ERROR,
"No LB token for connected subchannel pick %p (from RR "
"instance %p).",
- (void *)*wc_arg->target, (void *)wc_arg->rr_policy);
+ (void*)*wc_arg->target, (void*)wc_arg->rr_policy);
abort();
}
// Pass on client stats via context. Passes ownership of the reference.
@@ -213,7 +213,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
}
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
+ gpr_log(GPR_INFO, "Unreffing RR %p", (void*)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
@@ -230,25 +230,25 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
* order to correctly unref the RR policy instance upon completion of the pick.
* See \a wrapped_rr_closure for details. */
typedef struct pending_pick {
- struct pending_pick *next;
+ struct pending_pick* next;
/* original pick()'s arguments */
grpc_lb_policy_pick_args pick_args;
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
- grpc_connected_subchannel **target;
+ grpc_connected_subchannel** target;
/* args for wrapped_on_complete */
wrapped_rr_closure_arg wrapped_on_complete_arg;
} pending_pick;
-static void add_pending_pick(pending_pick **root,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context,
- grpc_closure *on_complete) {
- pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
+static void add_pending_pick(pending_pick** root,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context,
+ grpc_closure* on_complete) {
+ pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@@ -267,14 +267,14 @@ static void add_pending_pick(pending_pick **root,
/* Same as the \a pending_pick struct but for ping operations */
typedef struct pending_ping {
- struct pending_ping *next;
+ struct pending_ping* next;
/* args for wrapped_notify */
wrapped_rr_closure_arg wrapped_notify_arg;
} pending_ping;
-static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
- pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
+static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
+ pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@@ -294,9 +294,9 @@ typedef struct glb_lb_policy {
grpc_lb_policy base;
/** who the client is trying to communicate with */
- const char *server_name;
- grpc_client_channel_factory *cc_factory;
- grpc_channel_args *args;
+ const char* server_name;
+ grpc_client_channel_factory* cc_factory;
+ grpc_channel_args* args;
/** timeout in milliseconds for the LB call. 0 means no deadline. */
int lb_call_timeout_ms;
@@ -306,13 +306,13 @@ typedef struct glb_lb_policy {
int lb_fallback_timeout_ms;
/** for communicating with the LB server */
- grpc_channel *lb_channel;
+ grpc_channel* lb_channel;
/** response generator to inject address updates into \a lb_channel */
- grpc_fake_resolver_response_generator *response_generator;
+ grpc_fake_resolver_response_generator* response_generator;
/** the RR policy to use of the backend servers returned by the LB server */
- grpc_lb_policy *rr_policy;
+ grpc_lb_policy* rr_policy;
bool started_picking;
@@ -324,7 +324,7 @@ typedef struct glb_lb_policy {
/** stores the deserialized response from the LB. May be NULL until one such
* response has arrived. */
- grpc_grpclb_serverlist *serverlist;
+ grpc_grpclb_serverlist* serverlist;
/** Index into serverlist for next pick.
* If the server at this index is a drop, we return a drop.
@@ -332,13 +332,13 @@ typedef struct glb_lb_policy {
size_t serverlist_index;
/** stores the backend addresses from the resolver */
- grpc_lb_addresses *fallback_backend_addresses;
+ grpc_lb_addresses* fallback_backend_addresses;
/** list of picks that are waiting on RR's policy connectivity */
- pending_pick *pending_picks;
+ pending_pick* pending_picks;
/** list of pings that are waiting on RR's policy connectivity */
- pending_ping *pending_pings;
+ pending_ping* pending_pings;
bool shutting_down;
@@ -373,7 +373,7 @@ typedef struct glb_lb_policy {
/* LB fallback timer callback. */
grpc_closure lb_on_fallback;
- grpc_call *lb_call; /* streaming call to the LB server, */
+ grpc_call* lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
grpc_metadata_array
@@ -381,10 +381,10 @@ typedef struct glb_lb_policy {
/* what's being sent to the LB server. Note that its value may vary if the LB
* server indicates a redirect. */
- grpc_byte_buffer *lb_request_payload;
+ grpc_byte_buffer* lb_request_payload;
/* response the LB server, if any. Processed in lb_on_response_received() */
- grpc_byte_buffer *lb_response_payload;
+ grpc_byte_buffer* lb_response_payload;
/* call status code and details, set in lb_on_server_status_received() */
grpc_status_code lb_call_status;
@@ -403,7 +403,7 @@ typedef struct glb_lb_policy {
/* Stats for client-side load reporting. Should be unreffed and
* recreated whenever lb_call is replaced. */
- grpc_grpclb_client_stats *client_stats;
+ grpc_grpclb_client_stats* client_stats;
/* Interval and timer for next client load report. */
grpc_millis client_stats_report_interval;
grpc_timer client_load_report_timer;
@@ -413,20 +413,20 @@ typedef struct glb_lb_policy {
* completion of sending the load report. */
grpc_closure client_load_report_closure;
/* Client load report message payload. */
- grpc_byte_buffer *client_load_report_payload;
+ grpc_byte_buffer* client_load_report_payload;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
struct rr_connectivity_data {
grpc_closure on_change;
grpc_connectivity_state state;
- glb_lb_policy *glb_policy;
+ glb_lb_policy* glb_policy;
};
-static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
+static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
bool log) {
if (server->drop) return false;
- const grpc_grpclb_ip_address *ip = &server->ip_address;
+ const grpc_grpclb_ip_address* ip = &server->ip_address;
if (server->port >> 16 != 0) {
if (log) {
gpr_log(GPR_ERROR,
@@ -448,17 +448,17 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
}
/* vtable for LB tokens in grpc_lb_addresses. */
-static void *lb_token_copy(void *token) {
+static void* lb_token_copy(void* token) {
return token == NULL
? NULL
- : (void *)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
+ : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
}
-static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) {
+static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
if (token != NULL) {
GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
}
}
-static int lb_token_cmp(void *token1, void *token2) {
+static int lb_token_cmp(void* token1, void* token2) {
if (token1 > token2) return 1;
if (token1 < token2) return -1;
return 0;
@@ -466,23 +466,23 @@ static int lb_token_cmp(void *token1, void *token2) {
static const grpc_lb_user_data_vtable lb_token_vtable = {
lb_token_copy, lb_token_destroy, lb_token_cmp};
-static void parse_server(const grpc_grpclb_server *server,
- grpc_resolved_address *addr) {
+static void parse_server(const grpc_grpclb_server* server,
+ grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server->drop) return;
const uint16_t netorder_port = htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
- const grpc_grpclb_ip_address *ip = &server->ip_address;
+ const grpc_grpclb_ip_address* ip = &server->ip_address;
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
+ struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
+ struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
@@ -490,15 +490,15 @@ static void parse_server(const grpc_grpclb_server *server,
}
/* Returns addresses extracted from \a serverlist. */
-static grpc_lb_addresses *process_serverlist_locked(
- grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) {
+static grpc_lb_addresses* process_serverlist_locked(
+ grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
size_t num_valid = 0;
/* first pass: count how many are valid in order to allocate the necessary
* memory in a single block */
for (size_t i = 0; i < serverlist->num_servers; ++i) {
if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
}
- grpc_lb_addresses *lb_addresses =
+ grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_valid, &lb_token_vtable);
/* second pass: actually populate the addresses and LB tokens (aka user data
* to the outside world) to be read by the RR policy during its creation.
@@ -507,14 +507,14 @@ static grpc_lb_addresses *process_serverlist_locked(
* incurr in an allocation due to the arbitrary number of server */
size_t addr_idx = 0;
for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
- const grpc_grpclb_server *server = serverlist->servers[sl_idx];
+ const grpc_grpclb_server* server = serverlist->servers[sl_idx];
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
GPR_ASSERT(addr_idx < num_valid);
/* address processing */
grpc_resolved_address addr;
parse_server(server, &addr);
/* lb token processing */
- void *user_data;
+ void* user_data;
if (server->has_load_balance_token) {
const size_t lb_token_max_length =
GPR_ARRAY_SIZE(server->load_balance_token);
@@ -522,17 +522,17 @@ static grpc_lb_addresses *process_serverlist_locked(
strnlen(server->load_balance_token, lb_token_max_length);
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
server->load_balance_token, lb_token_length);
- user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
- lb_token_mdstr)
+ user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
+ lb_token_mdstr)
.payload;
} else {
- char *uri = grpc_sockaddr_to_uri(&addr);
+ char* uri = grpc_sockaddr_to_uri(&addr);
gpr_log(GPR_INFO,
"Missing LB token for backend address '%s'. The empty token will "
"be used instead",
uri);
gpr_free(uri);
- user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
+ user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
}
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
@@ -545,8 +545,8 @@ static grpc_lb_addresses *process_serverlist_locked(
}
/* Returns the backend addresses extracted from the given addresses */
-static grpc_lb_addresses *extract_backend_addresses_locked(
- grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
+static grpc_lb_addresses* extract_backend_addresses_locked(
+ grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
/* first pass: count the number of backend addresses */
size_t num_backends = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -555,24 +555,24 @@ static grpc_lb_addresses *extract_backend_addresses_locked(
}
}
/* second pass: actually populate the addresses and (empty) LB tokens */
- grpc_lb_addresses *backend_addresses =
+ grpc_lb_addresses* backend_addresses =
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
size_t num_copied = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) continue;
- const grpc_resolved_address *addr = &addresses->addresses[i].address;
+ const grpc_resolved_address* addr = &addresses->addresses[i].address;
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
addr->len, false /* is_balancer */,
NULL /* balancer_name */,
- (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
+ (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
++num_copied;
}
return backend_addresses;
}
static void update_lb_connectivity_status_locked(
- grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
+ grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+ grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&glb_policy->state_tracker);
@@ -620,7 +620,7 @@ static void update_lb_connectivity_status_locked(
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(
GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.",
- grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy);
+ grpc_connectivity_state_name(rr_state), (void*)glb_policy->rr_policy);
}
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
rr_state_error,
@@ -633,13 +633,13 @@ static void update_lb_connectivity_status_locked(
* If \a force_async is true, then we will manually schedule the
* completion callback even if the pick is available immediately. */
static bool pick_from_internal_rr_locked(
- grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- const grpc_lb_policy_pick_args *pick_args, bool force_async,
- grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
+ grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+ const grpc_lb_policy_pick_args* pick_args, bool force_async,
+ grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
// Check for drops if we are not using fallback backend addresses.
if (glb_policy->serverlist != NULL) {
// Look at the index into the serverlist to see if we should drop this call.
- grpc_grpclb_server *server =
+ grpc_grpclb_server* server =
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
glb_policy->serverlist_index = 0; // Wrap-around.
@@ -672,7 +672,7 @@ static bool pick_from_internal_rr_locked(
// Pick via the RR policy.
const bool pick_done = grpc_lb_policy_pick_locked(
exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
- (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
+ (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
if (pick_done) {
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
@@ -703,9 +703,9 @@ static bool pick_from_internal_rr_locked(
return pick_done;
}
-static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
- grpc_lb_addresses *addresses;
+static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
+ grpc_lb_addresses* addresses;
if (glb_policy->serverlist != NULL) {
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
@@ -718,12 +718,12 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
}
GPR_ASSERT(addresses != NULL);
- grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
+ grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
- static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
+ static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
args->args = grpc_channel_args_copy_and_add_and_remove(
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
@@ -732,19 +732,19 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
return args;
}
-static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_args *args) {
+static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_args* args) {
grpc_channel_args_destroy(exec_ctx, args->args);
gpr_free(args);
}
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
-static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
- grpc_lb_policy_args *args) {
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error);
+static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+ grpc_lb_policy_args* args) {
GPR_ASSERT(glb_policy->rr_policy == NULL);
- grpc_lb_policy *new_rr_policy =
+ grpc_lb_policy* new_rr_policy =
grpc_lb_policy_create(exec_ctx, "round_robin", args);
if (new_rr_policy == NULL) {
gpr_log(GPR_ERROR,
@@ -753,11 +753,11 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
"to be used. Future updates from the LB will attempt to create new "
"instances.",
(unsigned long)glb_policy->serverlist->num_servers,
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
return;
}
glb_policy->rr_policy = new_rr_policy;
- grpc_error *rr_state_error = NULL;
+ grpc_error* rr_state_error = NULL;
const grpc_connectivity_state rr_state =
grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
&rr_state_error);
@@ -773,8 +773,8 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
/* Allocate the data for the tracking of the new RR policy's connectivity.
* It'll be deallocated in glb_rr_connectivity_changed() */
- rr_connectivity_data *rr_connectivity =
- (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
+ rr_connectivity_data* rr_connectivity =
+ (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
glb_rr_connectivity_changed_locked, rr_connectivity,
grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -789,7 +789,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
/* Update picks and pings in wait */
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = glb_policy->pending_picks)) {
glb_policy->pending_picks = pp->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
@@ -798,14 +798,14 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p",
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
}
pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
true /* force_async */, pp->target,
&pp->wrapped_on_complete_arg);
}
- pending_ping *pping;
+ pending_ping* pping;
while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
@@ -820,31 +820,31 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
}
/* glb_policy->rr_policy may be NULL (initial handover) */
-static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
if (glb_policy->shutting_down) return;
- grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
+ grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
GPR_ASSERT(args != NULL);
if (glb_policy->rr_policy != NULL) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)",
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
}
grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
} else {
create_rr_locked(exec_ctx, glb_policy, args);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)",
- (void *)glb_policy->rr_policy);
+ (void*)glb_policy->rr_policy);
}
}
lb_policy_args_destroy(exec_ctx, args);
}
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
- glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
+static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
+ glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"glb_rr_connectivity_cb");
@@ -872,22 +872,22 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
&rr_connectivity->on_change);
}
-static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
- void *balancer_name) {
+static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
+ void* balancer_name) {
gpr_free(balancer_name);
}
static grpc_slice_hash_table_entry targets_info_entry_create(
- const char *address, const char *balancer_name) {
+ const char* address, const char* balancer_name) {
grpc_slice_hash_table_entry entry;
entry.key = grpc_slice_from_copied_string(address);
entry.value = gpr_strdup(balancer_name);
return entry;
}
-static int balancer_name_cmp_fn(void *a, void *b) {
- const char *a_str = (const char *)a;
- const char *b_str = (const char *)b;
+static int balancer_name_cmp_fn(void* a, void* b) {
+ const char* a_str = (const char*)a;
+ const char* b_str = (const char*)b;
return strcmp(a_str, b_str);
}
@@ -899,10 +899,10 @@ static int balancer_name_cmp_fn(void *a, void *b) {
* - \a response_generator: in order to propagate updates from the resolver
* above the grpclb policy.
* - \a args: other args inherited from the grpclb policy. */
-static grpc_channel_args *build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args) {
+static grpc_channel_args* build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args) {
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
@@ -911,11 +911,11 @@ static grpc_channel_args *build_lb_channel_args(
* It's the resolver's responsibility to make sure this policy is only
* instantiated and used in that case. Otherwise, something has gone wrong. */
GPR_ASSERT(num_grpclb_addrs > 0);
- grpc_lb_addresses *lb_addresses =
+ grpc_lb_addresses* lb_addresses =
grpc_lb_addresses_create(num_grpclb_addrs, NULL);
- grpc_slice_hash_table_entry *targets_info_entries =
- (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
- num_grpclb_addrs);
+ grpc_slice_hash_table_entry* targets_info_entries =
+ (grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
+ num_grpclb_addrs);
size_t lb_addresses_idx = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -924,7 +924,7 @@ static grpc_channel_args *build_lb_channel_args(
gpr_log(GPR_ERROR,
"This LB policy doesn't support user data. It will be ignored");
}
- char *addr_str;
+ char* addr_str;
GPR_ASSERT(grpc_sockaddr_to_string(
&addr_str, &addresses->addresses[i].address, true) > 0);
targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
@@ -937,19 +937,19 @@ static grpc_channel_args *build_lb_channel_args(
addresses->addresses[i].balancer_name, NULL /* user data */);
}
GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
- grpc_slice_hash_table *targets_info =
+ grpc_slice_hash_table* targets_info =
grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
destroy_balancer_name, balancer_name_cmp_fn);
gpr_free(targets_info_entries);
- grpc_channel_args *lb_channel_args =
+ grpc_channel_args* lb_channel_args =
grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
response_generator, args);
grpc_arg lb_channel_addresses_arg =
grpc_lb_addresses_create_channel_arg(lb_addresses);
- grpc_channel_args *result = grpc_channel_args_copy_and_add(
+ grpc_channel_args* result = grpc_channel_args_copy_and_add(
lb_channel_args, &lb_channel_addresses_arg, 1);
grpc_slice_hash_table_unref(exec_ctx, targets_info);
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
@@ -957,11 +957,11 @@ static grpc_channel_args *build_lb_channel_args(
return result;
}
-static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
GPR_ASSERT(glb_policy->pending_picks == NULL);
GPR_ASSERT(glb_policy->pending_pings == NULL);
- gpr_free((void *)glb_policy->server_name);
+ gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
if (glb_policy->client_stats != NULL) {
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
@@ -978,14 +978,14 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(glb_policy);
}
-static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
glb_policy->shutting_down = true;
/* We need a copy of the lb_call pointer because we can't cancell the call
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
- grpc_call *lb_call = glb_policy->lb_call;
+ grpc_call* lb_call = glb_policy->lb_call;
/* glb_policy->lb_call and this local lb_call must be consistent at this point
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
@@ -1004,9 +1004,9 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_policy->fallback_timer_active = false;
}
- pending_pick *pp = glb_policy->pending_picks;
+ pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
- pending_ping *pping = glb_policy->pending_pings;
+ pending_ping* pping = glb_policy->pending_pings;
glb_policy->pending_pings = NULL;
if (glb_policy->rr_policy != NULL) {
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
@@ -1024,7 +1024,7 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
*pp->target = NULL;
GRPC_CLOSURE_SCHED(
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
@@ -1034,7 +1034,7 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
while (pping != NULL) {
- pending_ping *next = pping->next;
+ pending_ping* next = pping->next;
GRPC_CLOSURE_SCHED(
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
@@ -1053,14 +1053,14 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
// - Otherwise, without an RR instance, picks stay pending at this policy's
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- pending_pick *pp = glb_policy->pending_picks;
+static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
@@ -1089,16 +1089,16 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
// - Otherwise, without an RR instance, picks stay pending at this policy's
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
// we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
+static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
- pending_pick *pp = glb_policy->pending_picks;
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+ pending_pick* pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
@@ -1118,12 +1118,12 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy);
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy);
+static void start_picking_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
/* start a timer to fall back */
if (glb_policy->lb_fallback_timeout_ms > 0 &&
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
@@ -1143,18 +1143,18 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
query_for_backends_locked(exec_ctx, glb_policy);
}
-static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
}
-static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
+static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
@@ -1163,7 +1163,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
"won't work without it. Failing"));
return 0;
}
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
bool pick_done = false;
if (glb_policy->rr_policy != NULL) {
const grpc_connectivity_state rr_connectivity_state =
@@ -1178,7 +1178,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"grpclb %p NOT picking from from RR %p: RR conn state=%s",
- (void *)glb_policy, (void *)glb_policy->rr_policy,
+ (void*)glb_policy, (void*)glb_policy->rr_policy,
grpc_connectivity_state_name(rr_connectivity_state));
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
@@ -1187,11 +1187,11 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
} else { // RR not in shutdown
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
- (void *)glb_policy, (void *)glb_policy->rr_policy);
+ (void*)glb_policy, (void*)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
- wrapped_rr_closure_arg *wc_arg =
- (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
+ wrapped_rr_closure_arg* wc_arg =
+ (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy;
@@ -1213,7 +1213,7 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_log(GPR_DEBUG,
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
"picks",
- (void *)(glb_policy));
+ (void*)(glb_policy));
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
on_complete);
@@ -1226,16 +1226,16 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
static grpc_connectivity_state glb_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_error **connectivity_error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_error** connectivity_error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
return grpc_connectivity_state_get(&glb_policy->state_tracker,
connectivity_error);
}
-static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_closure* closure) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
} else {
@@ -1246,23 +1246,23 @@ static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
-static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
+static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
+ grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &glb_policy->state_tracker, current, notify);
}
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
- (void *)glb_policy);
+ (void*)glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
@@ -1270,8 +1270,8 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
}
-static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
if (glb_policy->retry_timer_active) {
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
@@ -1285,7 +1285,7 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
.next_attempt_start_time;
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
- (void *)glb_policy);
+ (void*)glb_policy);
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
if (timeout > 0) {
gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.",
@@ -1306,11 +1306,11 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
"lb_on_server_status_received_locked");
}
-static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
-static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
const grpc_millis next_client_load_report_time =
grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
@@ -1321,9 +1321,9 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
&glb_policy->client_load_report_closure);
}
-static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
@@ -1335,9 +1335,9 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
schedule_next_client_load_report(exec_ctx, glb_policy);
}
-static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
- grpc_grpclb_dropped_call_counts *drop_entries =
- (grpc_grpclb_dropped_call_counts *)
+static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ (grpc_grpclb_dropped_call_counts*)
request->client_stats.calls_finished_with_drop.arg;
return request->client_stats.num_calls_started == 0 &&
request->client_stats.num_calls_finished == 0 &&
@@ -1347,9 +1347,9 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
(drop_entries == NULL || drop_entries->num_entries == 0);
}
-static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@@ -1361,7 +1361,7 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
// Construct message payload.
GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
- grpc_grpclb_request *request =
+ grpc_grpclb_request* request =
grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
// Skip client load report if the counters were all zero in the last
// report and they are still zero in this one.
@@ -1397,12 +1397,12 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error);
-static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error);
+static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
GPR_ASSERT(glb_policy->lb_call == NULL);
@@ -1431,7 +1431,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
- grpc_grpclb_request *request =
+ grpc_grpclb_request* request =
grpc_grpclb_request_create(glb_policy->server_name);
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
glb_policy->lb_request_payload =
@@ -1457,8 +1457,8 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
glb_policy->last_client_load_report_counters_were_zero = false;
}
-static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_unref(glb_policy->lb_call);
glb_policy->lb_call = NULL;
@@ -1477,8 +1477,8 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
/*
* Auxiliary functions and LB client callbacks.
*/
-static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy) {
+static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
if (glb_policy->shutting_down) return;
@@ -1487,8 +1487,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)",
- (void *)glb_policy, (void *)glb_policy->lb_channel,
- (void *)glb_policy->lb_call);
+ (void*)glb_policy, (void*)glb_policy->lb_channel,
+ (void*)glb_policy->lb_call);
}
GPR_ASSERT(glb_policy->lb_call != NULL);
@@ -1496,7 +1496,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
@@ -1552,12 +1552,12 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
-static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
- grpc_op *op = ops;
+ grpc_op* op = ops;
if (glb_policy->lb_response_payload != NULL) {
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
@@ -1568,7 +1568,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_byte_buffer_reader_destroy(&bbr);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
- grpc_grpclb_initial_response *response = NULL;
+ grpc_grpclb_initial_response* response = NULL;
if (!glb_policy->seen_initial_response &&
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
NULL) {
@@ -1596,7 +1596,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_grpclb_initial_response_destroy(response);
glb_policy->seen_initial_response = true;
} else {
- grpc_grpclb_serverlist *serverlist =
+ grpc_grpclb_serverlist* serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
GPR_ASSERT(glb_policy->lb_call != NULL);
@@ -1606,7 +1606,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
parse_server(serverlist->servers[i], &addr);
- char *ipport;
+ char* ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
gpr_free(ipport);
@@ -1679,9 +1679,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
glb_policy->fallback_timer_active = false;
/* If we receive a serverlist after the timer fires but before this callback
* actually runs, don't fall back. */
@@ -1690,7 +1690,7 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"Falling back to use backends from resolver (grpclb %p)",
- (void *)glb_policy);
+ (void*)glb_policy);
}
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
rr_handover_locked(exec_ctx, glb_policy);
@@ -1700,18 +1700,18 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
"grpclb_fallback_timer");
}
-static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
- char *status_details =
+ char* status_details =
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
gpr_log(GPR_INFO,
"Status from LB server received. Status = %d, Details = '%s', "
"(call: %p), error %p",
glb_policy->lb_call_status, status_details,
- (void *)glb_policy->lb_call, (void *)error);
+ (void*)glb_policy->lb_call, (void*)error);
gpr_free(status_details);
}
/* We need to perform cleanups no matter what. */
@@ -1724,9 +1724,9 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
- glb_lb_policy *glb_policy,
- const grpc_lb_addresses *addresses) {
+static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
+ glb_lb_policy* glb_policy,
+ const grpc_lb_addresses* addresses) {
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses =
@@ -1737,10 +1737,10 @@ static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
- const grpc_arg *arg =
+static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (glb_policy->lb_channel == NULL) {
@@ -1755,12 +1755,12 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_ERROR,
"No valid LB addresses channel arg for grpclb %p update, "
"ignoring.",
- (void *)glb_policy);
+ (void*)glb_policy);
}
return;
}
- const grpc_lb_addresses *addresses =
- (const grpc_lb_addresses *)arg->value.pointer.p;
+ const grpc_lb_addresses* addresses =
+ (const grpc_lb_addresses*)arg->value.pointer.p;
// If a non-empty serverlist hasn't been received from the balancer,
// propagate the update to fallback_backend_addresses.
if (glb_policy->serverlist == NULL) {
@@ -1769,7 +1769,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
GPR_ASSERT(glb_policy->lb_channel != NULL);
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
- grpc_channel_args *lb_channel_args = build_lb_channel_args(
+ grpc_channel_args* lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
grpc_fake_resolver_response_generator_set_response(
exec_ctx, glb_policy->response_generator, lb_channel_args);
@@ -1779,7 +1779,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
if (!glb_policy->watching_lb_channel) {
glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
glb_policy->lb_channel, true /* try to connect */);
- grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element(
+ grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
glb_policy->watching_lb_channel = true;
@@ -1796,10 +1796,10 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
// Invoked as part of the update process. It continues watching the LB channel
// until it shuts down or becomes READY. It's invoked even if the LB channel
// stayed READY throughout the update (for example if the update is identical).
-static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
+static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
@@ -1808,7 +1808,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
/* resub. */
- grpc_channel_element *client_channel_elem =
+ grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(glb_policy->lb_channel));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
@@ -1860,29 +1860,29 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_notify_on_state_change_locked,
glb_update_locked};
-static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
+static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args) {
/* Count the number of gRPC-LB addresses. There must be at least one. */
- const grpc_arg *arg =
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
return NULL;
}
- grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
- glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
+ glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
GPR_ASSERT(arg != NULL);
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
- grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
+ grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
GPR_ASSERT(uri->path[0] != '\0');
glb_policy->server_name =
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
@@ -1906,8 +1906,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
// since we use this to trigger the client_load_reporting filter.
grpc_arg new_arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
- static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
+ (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
+ static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
@@ -1919,9 +1919,9 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
/* Create a client channel over them to communicate with a LB service */
glb_policy->response_generator =
grpc_fake_resolver_response_generator_create();
- grpc_channel_args *lb_channel_args = build_lb_channel_args(
+ grpc_channel_args* lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
- char *uri_str;
+ char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
@@ -1932,7 +1932,7 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
gpr_free(uri_str);
if (glb_policy->lb_channel == NULL) {
- gpr_free((void *)glb_policy->server_name);
+ gpr_free((void*)glb_policy->server_name);
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
gpr_free(glb_policy);
return NULL;
@@ -1947,16 +1947,16 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return &glb_policy->base;
}
-static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
+static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
-static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
+static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
-grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
+grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
return &glb_lb_policy_factory;
}
@@ -1964,15 +1964,15 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
// Only add client_load_reporting filter if the grpclb LB policy is used.
static bool maybe_add_client_load_reporting_filter(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
- const grpc_channel_args *args =
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- const grpc_arg *channel_arg =
+ const grpc_arg* channel_arg =
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
strcmp(channel_arg->value.string, "grpclb") == 0) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
return true;
}
@@ -1986,7 +1986,7 @@ extern "C" void grpc_lb_policy_grpclb_init() {
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
- (void *)&grpc_client_load_reporting_filter);
+ (void*)&grpc_client_load_reporting_filter);
}
extern "C" void grpc_lb_policy_grpclb_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
index 15c8a680b7..b6135a4768 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@@ -28,7 +28,7 @@ extern "C" {
/** Returns a load balancing factory for the glb policy, which tries to connect
* to a load balancing server to decide the next successfully connected
* subchannel to pick. */
-grpc_lb_policy_factory *grpc_glb_lb_factory_create();
+grpc_lb_policy_factory* grpc_glb_lb_factory_create();
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index f2967182e2..aacaec197d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
@@ -25,20 +25,20 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/support/string.h"
-grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
- grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
- grpc_client_channel_factory *client_channel_factory,
- grpc_channel_args *args) {
- grpc_channel *lb_channel = grpc_client_channel_factory_create_channel(
+grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
+ grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+ grpc_client_channel_factory* client_channel_factory,
+ grpc_channel_args* args) {
+ grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, args);
return lb_channel;
}
-grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args) {
+grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_fake_resolver_response_generator_arg(response_generator)};
/* We remove:
@@ -62,7 +62,7 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
*
* - The fake resolver generator, because we are replacing it with the one
* from the grpclb policy, used to propagate updates to the LB channel. */
- static const char *keys_to_remove[] = {
+ static const char* keys_to_remove[] = {
GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
return grpc_channel_args_copy_and_add_and_remove(
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index e8599d1f51..39cbf53428 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -34,19 +34,19 @@ extern "C" {
* from resolving the LB service's name (eg, ipv4:10.0.0.1:1234,10.2.3.4:9876).
* \a client_channel_factory will be used for the creation of the LB channel,
* alongside the channel args passed in \a args. */
-grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
- grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
- grpc_client_channel_factory *client_channel_factory,
- grpc_channel_args *args);
+grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
+ grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+ grpc_client_channel_factory* client_channel_factory,
+ grpc_channel_args* args);
-grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args);
+grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 2681b2a079..2dcf29fe0e 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -28,19 +28,19 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
-grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
- grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses,
- grpc_client_channel_factory *client_channel_factory,
- grpc_channel_args *args) {
- grpc_channel_args *new_args = args;
- grpc_channel_credentials *channel_credentials =
+grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
+ grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+ grpc_client_channel_factory* client_channel_factory,
+ grpc_channel_args* args) {
+ grpc_channel_args* new_args = args;
+ grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
if (channel_credentials != NULL) {
/* Substitute the channel credentials with a version without call
* credentials: the load balancer is not necessarily trusted to handle
* bearer token credentials */
- static const char *keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
- grpc_channel_credentials *creds_sans_call_creds =
+ static const char* keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS};
+ grpc_channel_credentials* creds_sans_call_creds =
grpc_channel_credentials_duplicate_without_call_credentials(
channel_credentials);
GPR_ASSERT(creds_sans_call_creds != NULL);
@@ -52,7 +52,7 @@ grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
GPR_ARRAY_SIZE(args_to_add));
grpc_channel_credentials_unref(exec_ctx, creds_sans_call_creds);
}
- grpc_channel *lb_channel = grpc_client_channel_factory_create_channel(
+ grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
exec_ctx, client_channel_factory, lb_service_target_addresses,
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
if (channel_credentials != NULL) {
@@ -61,10 +61,10 @@ grpc_channel *grpc_lb_policy_grpclb_create_lb_channel(
return lb_channel;
}
-grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
- grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info,
- grpc_fake_resolver_response_generator *response_generator,
- const grpc_channel_args *args) {
+grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
+ grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+ grpc_fake_resolver_response_generator* response_generator,
+ const grpc_channel_args* args) {
const grpc_arg to_add[] = {
grpc_lb_targets_info_create_channel_arg(targets_info),
grpc_fake_resolver_response_generator_arg(response_generator)};
@@ -89,7 +89,7 @@ grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args(
*
* - The fake resolver generator, because we are replacing it with the one
* from the grpclb policy, used to propagate updates to the LB channel. */
- static const char *keys_to_remove[] = {
+ static const char* keys_to_remove[] = {
GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI,
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR};
/* Add the targets info table to be used for secure naming */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
index b38c076f38..ce88cf9ee4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
@@ -70,4 +70,4 @@ void grpc_grpclb_dropped_call_counts_destroy(
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 4d5fb2081c..87d7336b0c 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -23,9 +23,9 @@
#include <grpc/support/alloc.h>
/* invoked once for every Server in ServerList */
-static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
+static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field,
+ void** arg) {
+ grpc_grpclb_serverlist* sl = (grpc_grpclb_serverlist*)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -40,16 +40,16 @@ typedef struct decode_serverlist_arg {
* which index of the serverlist are we currently decoding */
size_t decoding_idx;
/* The decoded serverlist */
- grpc_grpclb_serverlist *serverlist;
+ grpc_grpclb_serverlist* serverlist;
} decode_serverlist_arg;
/* invoked once for every Server in ServerList */
-static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
- void **arg) {
- decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
+static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field,
+ void** arg) {
+ decode_serverlist_arg* dec_arg = (decode_serverlist_arg*)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
- grpc_grpclb_server *server =
- (grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
+ grpc_grpclb_server* server =
+ (grpc_grpclb_server*)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@@ -59,9 +59,9 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
return true;
}
-grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
- grpc_grpclb_request *req =
- (grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
+grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name) {
+ grpc_grpclb_request* req =
+ (grpc_grpclb_request*)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@@ -71,24 +71,24 @@ grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
}
static void populate_timestamp(gpr_timespec timestamp,
- struct _grpc_lb_v1_Timestamp *timestamp_pb) {
+ struct _grpc_lb_v1_Timestamp* timestamp_pb) {
timestamp_pb->has_seconds = true;
timestamp_pb->seconds = timestamp.tv_sec;
timestamp_pb->has_nanos = true;
timestamp_pb->nanos = timestamp.tv_nsec;
}
-static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
- void *const *arg) {
- char *str = (char *)*arg;
+static bool encode_string(pb_ostream_t* stream, const pb_field_t* field,
+ void* const* arg) {
+ char* str = (char*)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
- return pb_encode_string(stream, (uint8_t *)str, strlen(str));
+ return pb_encode_string(stream, (uint8_t*)str, strlen(str));
}
-static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
- void *const *arg) {
- grpc_grpclb_dropped_call_counts *drop_entries =
- (grpc_grpclb_dropped_call_counts *)*arg;
+static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field,
+ void* const* arg) {
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ (grpc_grpclb_dropped_call_counts*)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@@ -105,10 +105,10 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
return true;
}
-grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
- grpc_grpclb_client_stats *client_stats) {
- grpc_grpclb_request *req =
- (grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
+grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
+ grpc_grpclb_client_stats* client_stats) {
+ grpc_grpclb_request* req =
+ (grpc_grpclb_request*)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@@ -123,12 +123,12 @@ grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
&req->client_stats.num_calls_finished,
&req->client_stats.num_calls_finished_with_client_failed_to_send,
&req->client_stats.num_calls_finished_known_received,
- (grpc_grpclb_dropped_call_counts **)&req->client_stats
+ (grpc_grpclb_dropped_call_counts**)&req->client_stats
.calls_finished_with_drop.arg);
return req;
}
-grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
+grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request) {
size_t encoded_length;
pb_ostream_t sizestream;
pb_ostream_t outputstream;
@@ -145,10 +145,10 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) {
return slice;
}
-void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
+void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
if (request->has_client_stats) {
- grpc_grpclb_dropped_call_counts *drop_entries =
- (grpc_grpclb_dropped_call_counts *)
+ grpc_grpclb_dropped_call_counts* drop_entries =
+ (grpc_grpclb_dropped_call_counts*)
request->client_stats.calls_finished_with_drop.arg;
grpc_grpclb_dropped_call_counts_destroy(drop_entries);
}
@@ -156,7 +156,7 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request *request) {
}
typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response;
-grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
+grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
@@ -170,8 +170,8 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
if (!res.has_initial_response) return NULL;
- grpc_grpclb_initial_response *initial_res =
- (grpc_grpclb_initial_response *)gpr_malloc(
+ grpc_grpclb_initial_response* initial_res =
+ (grpc_grpclb_initial_response*)gpr_malloc(
sizeof(grpc_grpclb_initial_response));
memcpy(initial_res, &res.initial_response,
sizeof(grpc_grpclb_initial_response));
@@ -179,14 +179,14 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
return initial_res;
}
-grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
- grpc_grpclb_serverlist *sl =
- (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+ grpc_grpclb_serverlist* sl =
+ (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@@ -200,8 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
- sl->servers = (grpc_grpclb_server **)gpr_zalloc(
- sizeof(grpc_grpclb_server *) * sl->num_servers);
+ sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
+ sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@@ -221,7 +221,7 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
return sl;
}
-void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) {
if (serverlist == NULL) {
return;
}
@@ -232,25 +232,25 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
gpr_free(serverlist);
}
-grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
- const grpc_grpclb_serverlist *sl) {
- grpc_grpclb_serverlist *copy =
- (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
+grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
+ const grpc_grpclb_serverlist* sl) {
+ grpc_grpclb_serverlist* copy =
+ (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
- copy->servers = (grpc_grpclb_server **)gpr_malloc(
- sizeof(grpc_grpclb_server *) * sl->num_servers);
+ copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
+ sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
copy->servers[i] =
- (grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
+ (grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;
}
-bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
- const grpc_grpclb_serverlist *rhs) {
+bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
+ const grpc_grpclb_serverlist* rhs) {
if (lhs == NULL || rhs == NULL) {
return false;
}
@@ -269,13 +269,13 @@ bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
return true;
}
-bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs,
- const grpc_grpclb_server *rhs) {
+bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs,
+ const grpc_grpclb_server* rhs) {
return memcmp(lhs, rhs, sizeof(grpc_grpclb_server)) == 0;
}
-int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
- const grpc_grpclb_duration *rhs) {
+int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
+ const grpc_grpclb_duration* rhs) {
GPR_ASSERT(lhs && rhs);
if (lhs->has_seconds && rhs->has_seconds) {
if (lhs->seconds < rhs->seconds) return -1;
@@ -299,13 +299,13 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
return 0;
}
-grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb) {
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb) {
return (grpc_millis)(
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC +
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS);
}
void grpc_grpclb_initial_response_destroy(
- grpc_grpclb_initial_response *response) {
+ grpc_grpclb_initial_response* response) {
gpr_free(response);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index 56b9c096d0..138012c63a 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -37,59 +37,59 @@ typedef grpc_lb_v1_InitialLoadBalanceResponse grpc_grpclb_initial_response;
typedef grpc_lb_v1_Server grpc_grpclb_server;
typedef grpc_lb_v1_Duration grpc_grpclb_duration;
typedef struct {
- grpc_grpclb_server **servers;
+ grpc_grpclb_server** servers;
size_t num_servers;
grpc_grpclb_duration expiration_interval;
} grpc_grpclb_serverlist;
/** Create a request for a gRPC LB service under \a lb_service_name */
-grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name);
-grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
- grpc_grpclb_client_stats *client_stats);
+grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name);
+grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked(
+ grpc_grpclb_client_stats* client_stats);
/** Protocol Buffers v3-encode \a request */
-grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request);
+grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request);
/** Destroy \a request */
-void grpc_grpclb_request_destroy(grpc_grpclb_request *request);
+void grpc_grpclb_request_destroy(grpc_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a
* grpc_grpclb_initial_response */
-grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse(
+grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response);
/** Parse the list of servers from an encoded \a grpc_grpclb_response */
-grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
+grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* grpc_grpclb_destroy_serverlist on the returned copy. */
-grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
- const grpc_grpclb_serverlist *sl);
+grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy(
+ const grpc_grpclb_serverlist* sl);
-bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs,
- const grpc_grpclb_serverlist *rhs);
+bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs,
+ const grpc_grpclb_serverlist* rhs);
-bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs,
- const grpc_grpclb_server *rhs);
+bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs,
+ const grpc_grpclb_server* rhs);
/** Destroy \a serverlist */
-void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist);
+void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist);
/** Compare \a lhs against \a rhs and return 0 if \a lhs and \a rhs are equal,
* < 0 if \a lhs represents a duration shorter than \a rhs and > 0 otherwise */
-int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs,
- const grpc_grpclb_duration *rhs);
+int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs,
+ const grpc_grpclb_duration* rhs);
-grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration *duration_pb);
+grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb);
/** Destroy \a initial_response */
void grpc_grpclb_initial_response_destroy(
- grpc_grpclb_initial_response *response);
+ grpc_grpclb_initial_response* response);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index f0c66c68e1..125a4186aa 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -33,33 +33,33 @@ grpc_tracer_flag grpc_lb_pick_first_trace =
GRPC_TRACER_INITIALIZER(false, "pick_first");
typedef struct pending_pick {
- struct pending_pick *next;
+ struct pending_pick* next;
uint32_t initial_metadata_flags;
- grpc_connected_subchannel **target;
- grpc_closure *on_complete;
+ grpc_connected_subchannel** target;
+ grpc_closure* on_complete;
} pending_pick;
typedef struct {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
- grpc_lb_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list;
/** latest pending subchannel list */
- grpc_lb_subchannel_list *latest_pending_subchannel_list;
+ grpc_lb_subchannel_list* latest_pending_subchannel_list;
/** selected subchannel in \a subchannel_list */
- grpc_lb_subchannel_data *selected;
+ grpc_lb_subchannel_data* selected;
/** have we started picking? */
bool started_picking;
/** are we shut down? */
bool shutdown;
/** list of picks that are waiting on connectivity */
- pending_pick *pending_picks;
+ pending_pick* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
-static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
GPR_ASSERT(p->pending_picks == NULL);
@@ -67,17 +67,17 @@ static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
grpc_subchannel_index_unref();
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p);
+ gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void*)p);
}
}
-static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
- grpc_error *error) {
+static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
+ grpc_error* error) {
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
}
p->shutdown = true;
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
@@ -100,19 +100,19 @@ static void shutdown_locked(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p,
GRPC_ERROR_UNREF(error);
}
-static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- shutdown_locked(exec_ctx, (pick_first_lb_policy *)pol,
+static void pf_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ shutdown_locked(exec_ctx, (pick_first_lb_policy*)pol,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
}
-static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@@ -128,15 +128,15 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+ grpc_error* error) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@@ -152,8 +152,8 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
+static void start_picking_locked(grpc_exec_ctx* exec_ctx,
+ pick_first_lb_policy* p) {
p->started_picking = true;
if (p->subchannel_list != NULL && p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0;
@@ -164,19 +164,19 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
}
-static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static int pf_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
// If we have a selected subchannel already, return synchronously.
if (p->selected != NULL) {
*target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected->connected_subchannel,
@@ -187,7 +187,7 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
+ pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@@ -196,10 +196,10 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
- pick_first_lb_policy *p) {
+static void destroy_unselected_subchannels_locked(grpc_exec_ctx* exec_ctx,
+ pick_first_lb_policy* p) {
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
- grpc_lb_subchannel_data *sd = &p->subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &p->subchannel_list->subchannels[i];
if (p->selected != sd) {
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"selected_different_subchannel");
@@ -208,23 +208,23 @@ static void destroy_unselected_subchannels_locked(grpc_exec_ctx *exec_ctx,
}
static grpc_connectivity_state pf_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
+ grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
}
-static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
+static void pf_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_closure* closure) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
closure);
@@ -234,13 +234,13 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
-static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
-static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- pick_first_lb_policy *p = (pick_first_lb_policy *)policy;
- const grpc_arg *arg =
+static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args) {
+ pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
if (p->subchannel_list == NULL) {
@@ -254,17 +254,17 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_ERROR,
"No valid LB addresses channel arg for Pick First %p update, "
"ignoring.",
- (void *)p);
+ (void*)p);
}
return;
}
- const grpc_lb_addresses *addresses =
- (const grpc_lb_addresses *)arg->value.pointer.p;
+ const grpc_lb_addresses* addresses =
+ (const grpc_lb_addresses*)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses",
- (void *)p, (unsigned long)addresses->num_addresses);
+ (void*)p, (unsigned long)addresses->num_addresses);
}
- grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
pf_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
@@ -294,7 +294,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
// We do have a selected subchannel.
// Check if it's present in the new list. If so, we're done.
for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
- grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
if (sd->subchannel == p->selected->subchannel) {
// The currently selected subchannel is in the update: we are done.
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
@@ -339,8 +339,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_DEBUG,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
- (void *)p, (void *)p->latest_pending_subchannel_list,
- (void *)subchannel_list);
+ (void*)p, (void*)p->latest_pending_subchannel_list,
+ (void*)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list,
@@ -358,19 +358,19 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
}
-static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
- pick_first_lb_policy *p = (pick_first_lb_policy *)sd->subchannel_list->policy;
+static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
+ pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
- (void *)p, (void *)sd->subchannel,
+ (void*)p, (void*)sd->subchannel,
sd->subchannel_list->checking_subchannel,
- sd->subchannel_list->num_subchannels, (void *)sd->subchannel_list,
+ sd->subchannel_list->num_subchannels, (void*)sd->subchannel_list,
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
grpc_error_string(error));
@@ -465,13 +465,13 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
"connected");
p->selected = sd;
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void *)p,
- (void *)sd->subchannel);
+ gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", (void*)p,
+ (void*)sd->subchannel);
}
// Drop all other subchannels, since we are now connected.
destroy_unselected_subchannels_locked(exec_ctx, p);
// Update any calls that were waiting for a pick.
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -479,7 +479,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Servicing pending pick with selected subchannel %p",
- (void *)p->selected);
+ (void*)p->selected);
}
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
@@ -530,7 +530,7 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"pf_candidate_shutdown");
// Advance to next subchannel and check its state.
- grpc_lb_subchannel_data *original_sd = sd;
+ grpc_lb_subchannel_data* original_sd = sd;
do {
sd->subchannel_list->checking_subchannel =
(sd->subchannel_list->checking_subchannel + 1) %
@@ -578,17 +578,17 @@ static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_notify_on_state_change_locked,
pf_update_locked};
-static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}
+static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}
-static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {}
+static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
-static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
+static grpc_lb_policy* create_pick_first(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
+ pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
- gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
+ gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
}
pf_update_locked(exec_ctx, &p->base, args);
grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
@@ -603,7 +603,7 @@ static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = {
static grpc_lb_policy_factory pick_first_lb_policy_factory = {
&pick_first_factory_vtable};
-static grpc_lb_policy_factory *pick_first_lb_factory_create() {
+static grpc_lb_policy_factory* pick_first_lb_factory_create() {
return &pick_first_lb_policy_factory;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index fae40e378b..df235922c8 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -46,12 +46,12 @@ grpc_tracer_flag grpc_lb_round_robin_trace =
*
* Once a pick is available, \a target is updated and \a on_complete called. */
typedef struct pending_pick {
- struct pending_pick *next;
+ struct pending_pick* next;
/* output argument where to store the pick()ed user_data. It'll be NULL if no
* such data is present or there's an error (the definite test for errors is
* \a target being NULL). */
- void **user_data;
+ void** user_data;
/* bitmask passed to pick() and used for selective cancelling. See
* grpc_lb_policy_cancel_picks() */
@@ -59,24 +59,24 @@ typedef struct pending_pick {
/* output argument where to store the pick()ed connected subchannel, or NULL
* upon error. */
- grpc_connected_subchannel **target;
+ grpc_connected_subchannel** target;
/* to be invoked once the pick() has completed (regardless of success) */
- grpc_closure *on_complete;
+ grpc_closure* on_complete;
} pending_pick;
typedef struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
- grpc_lb_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list;
/** have we started picking? */
bool started_picking;
/** are we shutting down? */
bool shutdown;
/** List of picks that are waiting on connectivity */
- pending_pick *pending_picks;
+ pending_pick* pending_picks;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker;
@@ -89,7 +89,7 @@ typedef struct round_robin_lb_policy {
* lists if they equal \a latest_pending_subchannel_list. In other words,
* racing callbacks that reference outdated subchannel lists won't perform any
* update. */
- grpc_lb_subchannel_list *latest_pending_subchannel_list;
+ grpc_lb_subchannel_list* latest_pending_subchannel_list;
} round_robin_lb_policy;
/** Returns the index into p->subchannel_list->subchannels of the next
@@ -99,13 +99,13 @@ typedef struct round_robin_lb_policy {
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
static size_t get_next_ready_subchannel_index_locked(
- const round_robin_lb_policy *p) {
+ const round_robin_lb_policy* p) {
GPR_ASSERT(p->subchannel_list != NULL);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %lu), "
"last_ready_subchannel_index=%lu",
- (void *)p, (unsigned long)p->subchannel_list->num_subchannels,
+ (void*)p, (unsigned long)p->subchannel_list->num_subchannels,
(unsigned long)p->last_ready_subchannel_index);
}
for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
@@ -116,8 +116,8 @@ static size_t get_next_ready_subchannel_index_locked(
GPR_DEBUG,
"[RR %p] checking subchannel %p, subchannel_list %p, index %lu: "
"state=%s",
- (void *)p, (void *)p->subchannel_list->subchannels[index].subchannel,
- (void *)p->subchannel_list, (unsigned long)index,
+ (void*)p, (void*)p->subchannel_list->subchannels[index].subchannel,
+ (void*)p->subchannel_list, (unsigned long)index,
grpc_connectivity_state_name(
p->subchannel_list->subchannels[index].curr_connectivity_state));
}
@@ -127,40 +127,39 @@ static size_t get_next_ready_subchannel_index_locked(
gpr_log(GPR_DEBUG,
"[RR %p] found next ready subchannel (%p) at index %lu of "
"subchannel_list %p",
- (void *)p,
- (void *)p->subchannel_list->subchannels[index].subchannel,
- (unsigned long)index, (void *)p->subchannel_list);
+ (void*)p,
+ (void*)p->subchannel_list->subchannels[index].subchannel,
+ (unsigned long)index, (void*)p->subchannel_list);
}
return index;
}
}
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void *)p);
+ gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void*)p);
}
return p->subchannel_list->num_subchannels;
}
// Sets p->last_ready_subchannel_index to last_ready_index.
-static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
+static void update_last_ready_subchannel_index_locked(round_robin_lb_policy* p,
size_t last_ready_index) {
GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
p->last_ready_subchannel_index = last_ready_index;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(
- GPR_DEBUG,
- "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
- (void *)p, (unsigned long)last_ready_index,
- (void *)p->subchannel_list->subchannels[last_ready_index].subchannel,
- (void *)p->subchannel_list->subchannels[last_ready_index]
- .connected_subchannel);
+ gpr_log(GPR_DEBUG,
+ "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
+ (void*)p, (unsigned long)last_ready_index,
+ (void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
+ (void*)p->subchannel_list->subchannels[last_ready_index]
+ .connected_subchannel);
}
}
-static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
- (void *)pol, (void *)pol);
+ (void*)pol, (void*)pol);
}
GPR_ASSERT(p->subchannel_list == NULL);
GPR_ASSERT(p->latest_pending_subchannel_list == NULL);
@@ -169,13 +168,13 @@ static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(p);
}
-static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
- grpc_error *error) {
+static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
+ grpc_error* error) {
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
}
p->shutdown = true;
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks) != NULL) {
p->pending_picks = pp->next;
*pp->target = NULL;
@@ -199,20 +198,20 @@ static void shutdown_locked(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p,
GRPC_ERROR_UNREF(error);
}
-static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
-static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_connected_subchannel **target,
- grpc_error *error) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_connected_subchannel** target,
+ grpc_error* error) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if (pp->target == target) {
*target = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
@@ -228,15 +227,15 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
+static void rr_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
- grpc_error *error) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
- pending_pick *pp = p->pending_picks;
+ grpc_error* error) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+ pending_pick* pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
- pending_pick *next = pp->next;
+ pending_pick* next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
@@ -253,8 +252,8 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
-static void start_picking_locked(grpc_exec_ctx *exec_ctx,
- round_robin_lb_policy *p) {
+static void start_picking_locked(grpc_exec_ctx* exec_ctx,
+ round_robin_lb_policy* p) {
p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
@@ -264,21 +263,21 @@ static void start_picking_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
}
-static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- const grpc_lb_policy_pick_args *pick_args,
- grpc_connected_subchannel **target,
- grpc_call_context_element *context, void **user_data,
- grpc_closure *on_complete) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ const grpc_lb_policy_pick_args* pick_args,
+ grpc_connected_subchannel** target,
+ grpc_call_context_element* context, void** user_data,
+ grpc_closure* on_complete) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void *)pol,
+ gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
@@ -286,7 +285,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
/* readily available, report right away */
- grpc_lb_subchannel_data *sd =
+ grpc_lb_subchannel_data* sd =
&p->subchannel_list->subchannels[next_ready_index];
*target =
GRPC_CONNECTED_SUBCHANNEL_REF(sd->connected_subchannel, "rr_picked");
@@ -298,8 +297,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GPR_DEBUG,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %lu)",
- (void *)p, (void *)sd->subchannel, (void *)*target,
- (void *)sd->subchannel_list, (unsigned long)next_ready_index);
+ (void*)p, (void*)sd->subchannel, (void*)*target,
+ (void*)sd->subchannel_list, (unsigned long)next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
update_last_ready_subchannel_index_locked(p, next_ready_index);
@@ -310,7 +309,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
- pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
+ pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@@ -320,8 +319,8 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
return 0;
}
-static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
- grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
+static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
+ grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
GPR_ASSERT(subchannel_list->num_ready > 0);
--subchannel_list->num_ready;
@@ -353,7 +352,7 @@ static void update_state_counters_locked(grpc_lb_subchannel_data *sd) {
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
* connectivity status set. */
static grpc_connectivity_state update_lb_connectivity_status_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, grpc_error* error) {
/* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled).
*
@@ -375,8 +374,8 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
* CHECK: p->num_idle == p->subchannel_list->num_subchannels.
*/
grpc_connectivity_state new_state = sd->curr_connectivity_state;
- grpc_lb_subchannel_list *subchannel_list = sd->subchannel_list;
- round_robin_lb_policy *p = (round_robin_lb_policy *)subchannel_list->policy;
+ grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
+ round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready");
@@ -397,7 +396,7 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] Shutting down: all subchannels have gone into shutdown",
- (void *)p);
+ (void*)p);
}
} else if (subchannel_list->num_transient_failures ==
p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */
@@ -415,18 +414,18 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
return new_state;
}
-static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_lb_subchannel_data *sd = (grpc_lb_subchannel_data *)arg;
- round_robin_lb_policy *p =
- (round_robin_lb_policy *)sd->subchannel_list->policy;
+static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
+ round_robin_lb_policy* p =
+ (round_robin_lb_policy*)sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p: "
"prev_state=%s new_state=%s p->shutdown=%d "
"sd->subchannel_list->shutting_down=%d error=%s",
- (void *)p, (void *)sd->subchannel, (void *)sd->subchannel_list,
+ (void*)p, (void*)sd->subchannel, (void*)sd->subchannel_list,
grpc_connectivity_state_name(sd->prev_connectivity_state),
grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe),
p->shutdown, sd->subchannel_list->shutting_down,
@@ -493,8 +492,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG,
"[RR %p] phasing out subchannel list %p (size %lu) in favor "
"of %p (size %lu)",
- (void *)p, (void *)p->subchannel_list, num_subchannels,
- (void *)sd->subchannel_list, num_subchannels);
+ (void*)p, (void*)p->subchannel_list, num_subchannels,
+ (void*)sd->subchannel_list, num_subchannels);
}
if (p->subchannel_list != NULL) {
// dispose of the current subchannel_list
@@ -509,14 +508,14 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
- grpc_lb_subchannel_data *selected =
+ grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
if (p->pending_picks != NULL) {
// if the selected subchannel is going to be used for the pending
// picks, update the last picked pointer
update_last_ready_subchannel_index_locked(p, next_ready_index);
}
- pending_pick *pp;
+ pending_pick* pp;
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -528,8 +527,8 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_log(GPR_DEBUG,
"[RR %p] Fulfilling pending pick. Target <-- subchannel %p "
"(subchannel_list %p, index %lu)",
- (void *)p, (void *)selected->subchannel,
- (void *)p->subchannel_list, (unsigned long)next_ready_index);
+ (void*)p, (void*)selected->subchannel,
+ (void*)p->subchannel_list, (unsigned long)next_ready_index);
}
GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
gpr_free(pp);
@@ -541,41 +540,42 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
static grpc_connectivity_state rr_check_connectivity_locked(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
return grpc_connectivity_state_get(&p->state_tracker, error);
}
-static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy *pol,
- grpc_connectivity_state *current,
- grpc_closure *notify) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy* pol,
+ grpc_connectivity_state* current,
+ grpc_closure* notify) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
current, notify);
}
-static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
- grpc_closure *closure) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
+static void rr_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+ grpc_closure* closure) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
- grpc_lb_subchannel_data *selected =
+ grpc_lb_subchannel_data* selected =
&p->subchannel_list->subchannels[next_ready_index];
- grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
+ grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Round Robin not connected"));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
}
}
-static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
- const grpc_lb_policy_args *args) {
- round_robin_lb_policy *p = (round_robin_lb_policy *)policy;
- const grpc_arg *arg =
+static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+ const grpc_lb_policy_args* args) {
+ round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
+ const grpc_arg* arg =
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", p);
@@ -589,12 +589,12 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
- grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
+ grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", p,
addresses->num_addresses);
}
- grpc_lb_subchannel_list *subchannel_list = grpc_lb_subchannel_list_create(
+ grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
rr_connectivity_changed_locked);
if (subchannel_list->num_subchannels == 0) {
@@ -615,8 +615,8 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
gpr_log(GPR_DEBUG,
"[RR %p] Shutting down latest pending subchannel list %p, "
"about to be replaced by newer latest %p",
- (void *)p, (void *)p->latest_pending_subchannel_list,
- (void *)subchannel_list);
+ (void*)p, (void*)p->latest_pending_subchannel_list,
+ (void*)subchannel_list);
}
grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
@@ -655,22 +655,22 @@ static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_notify_on_state_change_locked,
rr_update_locked};
-static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
+static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}
-static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {}
+static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
-static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args) {
+static grpc_lb_policy* round_robin_create(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args) {
GPR_ASSERT(args->client_channel_factory != NULL);
- round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
+ round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_subchannel_index_ref();
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
rr_update_locked(exec_ctx, &p->base, args);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
- gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void *)p,
+ gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
(unsigned long)p->subchannel_list->num_subchannels);
}
return &p->base;
@@ -683,7 +683,7 @@ static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = {
static grpc_lb_policy_factory round_robin_lb_policy_factory = {
&round_robin_factory_vtable};
-static grpc_lb_policy_factory *round_robin_lb_factory_create() {
+static grpc_lb_policy_factory* round_robin_lb_factory_create() {
return &round_robin_lb_policy_factory;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index 08ea4f480b..db38ef5305 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -28,17 +28,18 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
-void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_data *sd,
- const char *reason) {
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_data* sd,
+ const char* reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
- gpr_log(
- GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
- " of %" PRIuPTR " (subchannel %p): unreffing subchannel",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): unreffing subchannel",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
sd->subchannel = NULL;
@@ -56,7 +57,7 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
}
void grpc_lb_subchannel_data_start_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@@ -74,25 +75,26 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
}
void grpc_lb_subchannel_data_stop_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
- gpr_log(
- GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
- " (subchannel %p): stopping connectivity watch",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): stopping connectivity watch",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
sd->connectivity_notification_pending = false;
}
-grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
- const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
+ const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb) {
- grpc_lb_subchannel_list *subchannel_list =
- (grpc_lb_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
+ grpc_lb_subchannel_list* subchannel_list =
+ (grpc_lb_subchannel_list*)gpr_zalloc(sizeof(*subchannel_list));
if (GRPC_TRACER_ON(*tracer)) {
gpr_log(GPR_DEBUG,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
@@ -101,11 +103,11 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
subchannel_list->policy = p;
subchannel_list->tracer = tracer;
gpr_ref_init(&subchannel_list->refcount, 1);
- subchannel_list->subchannels = (grpc_lb_subchannel_data *)gpr_zalloc(
+ subchannel_list->subchannels = (grpc_lb_subchannel_data*)gpr_zalloc(
sizeof(grpc_lb_subchannel_data) * addresses->num_addresses);
// We need to remove the LB addresses in order to be able to compare the
// subchannel keys of subchannels from a different batch of addresses.
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
+ static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS,
GRPC_ARG_LB_ADDRESSES};
// Create a subchannel for each address.
grpc_subchannel_args sc_args;
@@ -116,18 +118,18 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
- grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
+ grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
exec_ctx, args->client_channel_factory, &sc_args);
grpc_channel_args_destroy(exec_ctx, new_args);
if (subchannel == NULL) {
// Subchannel could not be created.
if (GRPC_TRACER_ON(*tracer)) {
- char *address_uri =
+ char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG,
"[%s %p] could not create subchannel for address uri %s, "
@@ -138,15 +140,16 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
continue;
}
if (GRPC_TRACER_ON(*tracer)) {
- char *address_uri =
+ char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
- gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
- ": Created subchannel %p for address uri %s",
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR
+ ": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
gpr_free(address_uri);
}
- grpc_lb_subchannel_data *sd =
+ grpc_lb_subchannel_data* sd =
&subchannel_list->subchannels[subchannel_index++];
sd->subchannel_list = subchannel_list;
sd->subchannel = subchannel;
@@ -169,15 +172,15 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
return subchannel_list;
}
-static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_list *subchannel_list) {
+static void subchannel_list_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_list* subchannel_list) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
subchannel_list->tracer->name, subchannel_list->policy,
subchannel_list);
}
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"subchannel_list_destroy");
}
@@ -185,8 +188,8 @@ static void subchannel_list_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(subchannel_list);
}
-void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
gpr_ref_non_zero(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
@@ -197,9 +200,9 @@ void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
}
}
-void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
const bool done = gpr_unref(&subchannel_list->refcount);
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count);
@@ -214,35 +217,36 @@ void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
}
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
- grpc_lb_subchannel_list *subchannel_list, const char *reason) {
+ grpc_lb_subchannel_list* subchannel_list, const char* reason) {
GRPC_LB_POLICY_WEAK_REF(subchannel_list->policy, reason);
grpc_lb_subchannel_list_ref(subchannel_list, reason);
}
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
}
static void subchannel_data_cancel_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd, const char *reason) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
- gpr_log(
- GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
- " (subchannel %p): canceling connectivity watch (%s)",
- sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
- sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
- sd->subchannel_list->num_subchannels, sd->subchannel, reason);
+ gpr_log(GPR_DEBUG,
+ "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
+ " (subchannel %p): canceling connectivity watch (%s)",
+ sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
+ sd->subchannel_list,
+ (size_t)(sd - sd->subchannel_list->subchannels),
+ sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
void grpc_lb_subchannel_list_shutdown_and_unref(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason) {
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason) {
if (GRPC_TRACER_ON(*subchannel_list->tracer)) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
subchannel_list->tracer->name, subchannel_list->policy,
@@ -251,7 +255,7 @@ void grpc_lb_subchannel_list_shutdown_and_unref(
GPR_ASSERT(!subchannel_list->shutting_down);
subchannel_list->shutting_down = true;
for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
- grpc_lb_subchannel_data *sd = &subchannel_list->subchannels[i];
+ grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
// If there's a pending notification for this subchannel, cancel it;
// the callback is responsible for unreffing the subchannel.
// Otherwise, unref the subchannel directly.
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 9d5984260f..e18ad490e8 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -44,10 +44,10 @@ typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
typedef struct {
/** backpointer to owning subchannel list */
- grpc_lb_subchannel_list *subchannel_list;
+ grpc_lb_subchannel_list* subchannel_list;
/** subchannel itself */
- grpc_subchannel *subchannel;
- grpc_connected_subchannel *connected_subchannel;
+ grpc_subchannel* subchannel;
+ grpc_connected_subchannel* connected_subchannel;
/** Is a connectivity notification pending? */
bool connectivity_notification_pending;
/** notification that connectivity has changed on subchannel */
@@ -63,36 +63,36 @@ typedef struct {
* \a connectivity_changed_closure. */
grpc_connectivity_state pending_connectivity_state_unsafe;
/** the subchannel's target user data */
- void *user_data;
+ void* user_data;
/** vtable to operate over \a user_data */
- const grpc_lb_user_data_vtable *user_data_vtable;
+ const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_subchannel_data;
/// Unrefs the subchannel contained in sd.
-void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_data *sd,
- const char *reason);
+void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_data* sd,
+ const char* reason);
/// Starts watching the connectivity state of the subchannel.
/// The connectivity_changed_cb callback must invoke either
/// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
/// grpc_lb_subchannel_data_start_connectivity_watch().
void grpc_lb_subchannel_data_start_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
/// Stops watching the connectivity state of the subchannel.
void grpc_lb_subchannel_data_stop_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_data *sd);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
struct grpc_lb_subchannel_list {
/** backpointer to owning policy */
- grpc_lb_policy *policy;
+ grpc_lb_policy* policy;
- grpc_tracer_flag *tracer;
+ grpc_tracer_flag* tracer;
/** all our subchannels */
size_t num_subchannels;
- grpc_lb_subchannel_data *subchannels;
+ grpc_lb_subchannel_data* subchannels;
/** Index into subchannels of the one we're currently checking.
* Used when connecting to subchannels serially instead of in parallel. */
@@ -120,31 +120,31 @@ struct grpc_lb_subchannel_list {
bool shutting_down;
};
-grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy *p, grpc_tracer_flag *tracer,
- const grpc_lb_addresses *addresses, const grpc_lb_policy_args *args,
+grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_tracer_flag* tracer,
+ const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
grpc_iomgr_cb_func connectivity_changed_cb);
-void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
-void grpc_lb_subchannel_list_unref(grpc_exec_ctx *exec_ctx,
- grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
+ grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
/// Takes and releases refs needed for a connectivity notification.
/// This includes a ref to subchannel_list and a weak ref to the LB policy.
void grpc_lb_subchannel_list_ref_for_connectivity_watch(
- grpc_lb_subchannel_list *subchannel_list, const char *reason);
+ grpc_lb_subchannel_list* subchannel_list, const char* reason);
void grpc_lb_subchannel_list_unref_for_connectivity_watch(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
/// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
/// connectivity state notification callback will ultimately unref it.
void grpc_lb_subchannel_list_shutdown_and_unref(
- grpc_exec_ctx *exec_ctx, grpc_lb_subchannel_list *subchannel_list,
- const char *reason);
+ grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
+ const char* reason);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 8790ffdda3..360a42b177 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -37,7 +37,7 @@ typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
struct grpc_lb_policy_factory {
- const grpc_lb_policy_factory_vtable *vtable;
+ const grpc_lb_policy_factory_vtable* vtable;
};
/** A resolved address alongside any LB related information associated with it.
@@ -48,91 +48,91 @@ struct grpc_lb_policy_factory {
typedef struct grpc_lb_address {
grpc_resolved_address address;
bool is_balancer;
- char *balancer_name; /* For secure naming. */
- void *user_data;
+ char* balancer_name; /* For secure naming. */
+ void* user_data;
} grpc_lb_address;
typedef struct grpc_lb_user_data_vtable {
- void *(*copy)(void *);
- void (*destroy)(grpc_exec_ctx *exec_ctx, void *);
- int (*cmp)(void *, void *);
+ void* (*copy)(void*);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, void*);
+ int (*cmp)(void*, void*);
} grpc_lb_user_data_vtable;
typedef struct grpc_lb_addresses {
size_t num_addresses;
- grpc_lb_address *addresses;
- const grpc_lb_user_data_vtable *user_data_vtable;
+ grpc_lb_address* addresses;
+ const grpc_lb_user_data_vtable* user_data_vtable;
} grpc_lb_addresses;
/** Returns a grpc_addresses struct with enough space for
\a num_addresses addresses. The \a user_data_vtable argument may be
NULL if no user data will be added. */
-grpc_lb_addresses *grpc_lb_addresses_create(
- size_t num_addresses, const grpc_lb_user_data_vtable *user_data_vtable);
+grpc_lb_addresses* grpc_lb_addresses_create(
+ size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable);
/** Creates a copy of \a addresses. */
-grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses);
+grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses);
/** Sets the value of the address at index \a index of \a addresses.
* \a address is a socket address of length \a address_len.
* Takes ownership of \a balancer_name. */
-void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index,
- const void *address, size_t address_len,
- bool is_balancer, const char *balancer_name,
- void *user_data);
+void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
+ const void* address, size_t address_len,
+ bool is_balancer, const char* balancer_name,
+ void* user_data);
/** Sets the value of the address at index \a index of \a addresses from \a uri.
* Returns true upon success, false otherwise. Takes ownership of \a
* balancer_name. */
-bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses *addresses,
- size_t index, const grpc_uri *uri,
+bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses* addresses,
+ size_t index, const grpc_uri* uri,
bool is_balancer,
- const char *balancer_name,
- void *user_data);
+ const char* balancer_name,
+ void* user_data);
/** Compares \a addresses1 and \a addresses2. */
-int grpc_lb_addresses_cmp(const grpc_lb_addresses *addresses1,
- const grpc_lb_addresses *addresses2);
+int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1,
+ const grpc_lb_addresses* addresses2);
/** Destroys \a addresses. */
-void grpc_lb_addresses_destroy(grpc_exec_ctx *exec_ctx,
- grpc_lb_addresses *addresses);
+void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_lb_addresses* addresses);
/** Returns a channel arg containing \a addresses. */
grpc_arg grpc_lb_addresses_create_channel_arg(
- const grpc_lb_addresses *addresses);
+ const grpc_lb_addresses* addresses);
/** Returns the \a grpc_lb_addresses instance in \a channel_args or NULL */
-grpc_lb_addresses *grpc_lb_addresses_find_channel_arg(
- const grpc_channel_args *channel_args);
+grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
+ const grpc_channel_args* channel_args);
/** Arguments passed to LB policies. */
struct grpc_lb_policy_args {
- grpc_client_channel_factory *client_channel_factory;
- grpc_channel_args *args;
- grpc_combiner *combiner;
+ grpc_client_channel_factory* client_channel_factory;
+ grpc_channel_args* args;
+ grpc_combiner* combiner;
};
struct grpc_lb_policy_factory_vtable {
- void (*ref)(grpc_lb_policy_factory *factory);
- void (*unref)(grpc_lb_policy_factory *factory);
+ void (*ref)(grpc_lb_policy_factory* factory);
+ void (*unref)(grpc_lb_policy_factory* factory);
/** Implementation of grpc_lb_policy_factory_create_lb_policy */
- grpc_lb_policy *(*create_lb_policy)(grpc_exec_ctx *exec_ctx,
- grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args);
+ grpc_lb_policy* (*create_lb_policy)(grpc_exec_ctx* exec_ctx,
+ grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args);
/** Name for the LB policy this factory implements */
- const char *name;
+ const char* name;
};
-void grpc_lb_policy_factory_ref(grpc_lb_policy_factory *factory);
-void grpc_lb_policy_factory_unref(grpc_lb_policy_factory *factory);
+void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory);
+void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory);
/** Create a lb_policy instance. */
-grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy(
- grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory,
- grpc_lb_policy_args *args);
+grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
+ grpc_exec_ctx* exec_ctx, grpc_lb_policy_factory* factory,
+ grpc_lb_policy_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index f2460f8304..7b0a926a1b 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
@@ -24,7 +24,7 @@
#define MAX_POLICIES 10
-static grpc_lb_policy_factory *g_all_of_the_lb_policies[MAX_POLICIES];
+static grpc_lb_policy_factory* g_all_of_the_lb_policies[MAX_POLICIES];
static int g_number_of_lb_policies = 0;
void grpc_lb_policy_registry_init(void) { g_number_of_lb_policies = 0; }
@@ -36,7 +36,7 @@ void grpc_lb_policy_registry_shutdown(void) {
}
}
-void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
+void grpc_register_lb_policy(grpc_lb_policy_factory* factory) {
int i;
for (i = 0; i < g_number_of_lb_policies; i++) {
GPR_ASSERT(0 != gpr_stricmp(factory->vtable->name,
@@ -47,7 +47,7 @@ void grpc_register_lb_policy(grpc_lb_policy_factory *factory) {
g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory;
}
-static grpc_lb_policy_factory *lookup_factory(const char *name) {
+static grpc_lb_policy_factory* lookup_factory(const char* name) {
int i;
if (name == NULL) return NULL;
@@ -61,10 +61,10 @@ static grpc_lb_policy_factory *lookup_factory(const char *name) {
return NULL;
}
-grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
- grpc_lb_policy_args *args) {
- grpc_lb_policy_factory *factory = lookup_factory(name);
- grpc_lb_policy *lb_policy =
+grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
+ grpc_lb_policy_args* args) {
+ grpc_lb_policy_factory* factory = lookup_factory(name);
+ grpc_lb_policy* lb_policy =
grpc_lb_policy_factory_create_lb_policy(exec_ctx, factory, args);
return lb_policy;
}
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index 55154cb02a..055f751b57 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -32,14 +32,14 @@ void grpc_lb_policy_registry_init(void);
void grpc_lb_policy_registry_shutdown(void);
/** Register a LB policy factory. */
-void grpc_register_lb_policy(grpc_lb_policy_factory *factory);
+void grpc_register_lb_policy(grpc_lb_policy_factory* factory);
/** Create a \a grpc_lb_policy instance.
*
* If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init
* will be returned. */
-grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name,
- grpc_lb_policy_args *args);
+grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
+ grpc_lb_policy_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/parse_address.cc b/src/core/ext/filters/client_channel/parse_address.cc
index 2152b5a1e9..6cf77f13bc 100644
--- a/src/core/ext/filters/client_channel/parse_address.cc
+++ b/src/core/ext/filters/client_channel/parse_address.cc
@@ -33,13 +33,13 @@
#ifdef GRPC_HAVE_UNIX_SOCKET
-bool grpc_parse_unix(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_unix(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
if (strcmp("unix", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'unix' scheme, got '%s'", uri->scheme);
return false;
}
- struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
+ struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
const size_t maxlen = sizeof(un->sun_path);
const size_t path_len = strnlen(uri->path, maxlen);
if (path_len == maxlen) return false;
@@ -51,24 +51,24 @@ bool grpc_parse_unix(const grpc_uri *uri,
#else /* GRPC_HAVE_UNIX_SOCKET */
-bool grpc_parse_unix(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_unix(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
abort();
}
#endif /* GRPC_HAVE_UNIX_SOCKET */
-bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors) {
bool success = false;
// Split host and port.
- char *host;
- char *port;
+ char* host;
+ char* port;
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in);
- struct sockaddr_in *in = (struct sockaddr_in *)addr->addr;
+ struct sockaddr_in* in = (struct sockaddr_in*)addr->addr;
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
@@ -92,32 +92,32 @@ done:
return success;
}
-bool grpc_parse_ipv4(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_ipv4(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
if (strcmp("ipv4", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'ipv4' scheme, got '%s'", uri->scheme);
return false;
}
- const char *host_port = uri->path;
+ const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
return grpc_parse_ipv4_hostport(host_port, resolved_addr,
true /* log_errors */);
}
-bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors) {
bool success = false;
// Split host and port.
- char *host;
- char *port;
+ char* host;
+ char* port;
if (!gpr_split_host_port(hostport, &host, &port)) return false;
// Parse IP address.
memset(addr, 0, sizeof(*addr));
addr->len = sizeof(struct sockaddr_in6);
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr->addr;
+ struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr->addr;
in6->sin6_family = AF_INET6;
// Handle the RFC6874 syntax for IPv6 zone identifiers.
- char *host_end = (char *)gpr_memrchr(host, '%', strlen(host));
+ char* host_end = (char*)gpr_memrchr(host, '%', strlen(host));
if (host_end != NULL) {
GPR_ASSERT(host_end >= host);
char host_without_scope[INET6_ADDRSTRLEN];
@@ -161,19 +161,19 @@ done:
return success;
}
-bool grpc_parse_ipv6(const grpc_uri *uri,
- grpc_resolved_address *resolved_addr) {
+bool grpc_parse_ipv6(const grpc_uri* uri,
+ grpc_resolved_address* resolved_addr) {
if (strcmp("ipv6", uri->scheme) != 0) {
gpr_log(GPR_ERROR, "Expected 'ipv6' scheme, got '%s'", uri->scheme);
return false;
}
- const char *host_port = uri->path;
+ const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
return grpc_parse_ipv6_hostport(host_port, resolved_addr,
true /* log_errors */);
}
-bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr) {
+bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr) {
if (strcmp("unix", uri->scheme) == 0) {
return grpc_parse_unix(uri, resolved_addr);
} else if (strcmp("ipv4", uri->scheme) == 0) {
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index 27d06a1cb3..b45859f9a2 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -30,23 +30,23 @@ extern "C" {
/** Populate \a resolved_addr from \a uri, whose path is expected to contain a
* unix socket path. Returns true upon success. */
-bool grpc_parse_unix(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_unix(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri, whose path is expected to contain an
* IPv4 host:port pair. Returns true upon success. */
-bool grpc_parse_ipv4(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_ipv4(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri, whose path is expected to contain an
* IPv6 host:port pair. Returns true upon success. */
-bool grpc_parse_ipv6(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_ipv6(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Populate \a resolved_addr from \a uri. Returns true upon success. */
-bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr);
+bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
/** Parse bare IPv4 or IPv6 "IP:port" strings. */
-bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors);
-bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr,
+bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
bool log_errors);
#ifdef __cplusplus
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index 8401504fcf..7e84b98cb9 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -24,17 +24,17 @@ grpc_tracer_flag grpc_trace_resolver_refcount =
GRPC_TRACER_INITIALIZER(false, "resolver_refcount");
#endif
-void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable,
- grpc_combiner *combiner) {
+void grpc_resolver_init(grpc_resolver* resolver,
+ const grpc_resolver_vtable* vtable,
+ grpc_combiner* combiner) {
resolver->vtable = vtable;
resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver");
gpr_ref_init(&resolver->refs, 1);
}
#ifndef NDEBUG
-void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
- const char *reason) {
+void grpc_resolver_ref(grpc_resolver* resolver, const char* file, int line,
+ const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -42,14 +42,14 @@ void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line,
old_refs, old_refs + 1, reason);
}
#else
-void grpc_resolver_ref(grpc_resolver *resolver) {
+void grpc_resolver_ref(grpc_resolver* resolver) {
#endif
gpr_ref(&resolver->refs);
}
#ifndef NDEBUG
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- const char *file, int line, const char *reason) {
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ const char* file, int line, const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -57,27 +57,27 @@ void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
old_refs, old_refs - 1, reason);
}
#else
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver) {
#endif
if (gpr_unref(&resolver->refs)) {
- grpc_combiner *combiner = resolver->combiner;
+ grpc_combiner* combiner = resolver->combiner;
resolver->vtable->destroy(exec_ctx, resolver);
GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
}
}
-void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
resolver->vtable->shutdown_locked(exec_ctx, resolver);
}
-void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
}
-void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result,
- grpc_closure *on_complete) {
+void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** result,
+ grpc_closure* on_complete) {
resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
}
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index 73fbbbbc3b..a0eb0bcfdf 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -35,49 +35,49 @@ extern grpc_tracer_flag grpc_trace_resolver_refcount;
/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */
struct grpc_resolver {
- const grpc_resolver_vtable *vtable;
+ const grpc_resolver_vtable* vtable;
gpr_refcount refs;
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
};
struct grpc_resolver_vtable {
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
- void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
- void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result, grpc_closure *on_complete);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
+ void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
+ void (*channel_saw_error_locked)(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver);
+ void (*next_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** result, grpc_closure* on_complete);
};
#ifndef NDEBUG
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
#define GRPC_RESOLVER_UNREF(e, p, r) \
grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r))
-void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line,
- const char *reason);
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy,
- const char *file, int line, const char *reason);
+void grpc_resolver_ref(grpc_resolver* policy, const char* file, int line,
+ const char* reason);
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy,
+ const char* file, int line, const char* reason);
#else
#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p))
-void grpc_resolver_ref(grpc_resolver *policy);
-void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy);
+void grpc_resolver_ref(grpc_resolver* policy);
+void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy);
#endif
-void grpc_resolver_init(grpc_resolver *resolver,
- const grpc_resolver_vtable *vtable,
- grpc_combiner *combiner);
+void grpc_resolver_init(grpc_resolver* resolver,
+ const grpc_resolver_vtable* vtable,
+ grpc_combiner* combiner);
-void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
+void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon.
Must be called from the combiner passed as a resolver_arg at construction
time.*/
-void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver);
+void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver);
/** Get the next result from the resolver. Expected to set \a *result with
new channel args and then schedule \a on_complete for execution.
@@ -87,9 +87,9 @@ void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
Must be called from the combiner passed as a resolver_arg at construction
time.*/
-void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **result,
- grpc_closure *on_complete);
+void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** result,
+ grpc_closure* on_complete);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index a1ddaee499..76f08281f7 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -53,38 +53,38 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** DNS server to use (if not system default) */
- char *dns_server;
+ char* dns_server;
/** name to resolve (usually the same as target_name) */
- char *name_to_resolve;
+ char* name_to_resolve;
/** default port to use */
- char *default_port;
+ char* default_port;
/** channel args. */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
/** whether to request the service config */
bool request_service_config;
/** pollset_set to drive the name resolution process */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/** Closures used by the combiner */
grpc_closure dns_ares_on_retry_timer_locked;
grpc_closure dns_ares_on_resolved_locked;
/** Combiner guarding the rest of the state */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
/** are we currently resolving? */
bool resolving;
/** the pending resolving request */
- grpc_ares_request *pending_request;
+ grpc_ares_request* pending_request;
/** which version of the result have we published? */
int published_version;
/** which version of the result is current? */
int resolved_version;
/** pending next completion, or NULL */
- grpc_closure *next_completion;
+ grpc_closure* next_completion;
/** target result address for next completion */
- grpc_channel_args **target_result;
+ grpc_channel_args** target_result;
/** current (fully resolved) result */
- grpc_channel_args *resolved_result;
+ grpc_channel_args* resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@@ -92,32 +92,32 @@ typedef struct {
grpc_backoff backoff_state;
/** currently resolving addresses */
- grpc_lb_addresses *lb_addresses;
+ grpc_lb_addresses* lb_addresses;
/** currently resolving service config */
- char *service_config_json;
+ char* service_config_json;
} ares_dns_resolver;
-static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r);
-static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r);
+static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r);
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r);
-static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* r);
+static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete);
static const grpc_resolver_vtable dns_ares_resolver_vtable = {
dns_ares_destroy, dns_ares_shutdown_locked,
dns_ares_channel_saw_error_locked, dns_ares_next_locked};
-static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ ares_dns_resolver* r = (ares_dns_resolver*)resolver;
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@@ -133,18 +133,18 @@ static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ ares_dns_resolver* r = (ares_dns_resolver*)resolver;
if (!r->resolving) {
grpc_backoff_reset(&r->backoff_state);
dns_ares_start_resolving_locked(exec_ctx, r);
}
}
-static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- ares_dns_resolver *r = (ares_dns_resolver *)arg;
+static void dns_ares_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ ares_dns_resolver* r = (ares_dns_resolver*)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@@ -154,8 +154,8 @@ static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
-static bool value_in_json_array(grpc_json *array, const char *value) {
- for (grpc_json *entry = array->child; entry != NULL; entry = entry->next) {
+static bool value_in_json_array(grpc_json* array, const char* value) {
+ for (grpc_json* entry = array->child; entry != NULL; entry = entry->next) {
if (entry->type == GRPC_JSON_STRING && strcmp(entry->value, value) == 0) {
return true;
}
@@ -163,21 +163,21 @@ static bool value_in_json_array(grpc_json *array, const char *value) {
return false;
}
-static char *choose_service_config(char *service_config_choice_json) {
- grpc_json *choices_json = grpc_json_parse_string(service_config_choice_json);
+static char* choose_service_config(char* service_config_choice_json) {
+ grpc_json* choices_json = grpc_json_parse_string(service_config_choice_json);
if (choices_json == NULL || choices_json->type != GRPC_JSON_ARRAY) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
return NULL;
}
- char *service_config = NULL;
- for (grpc_json *choice = choices_json->child; choice != NULL;
+ char* service_config = NULL;
+ for (grpc_json* choice = choices_json->child; choice != NULL;
choice = choice->next) {
if (choice->type != GRPC_JSON_OBJECT) {
gpr_log(GPR_ERROR, "cannot parse service config JSON string");
break;
}
- grpc_json *service_config_json = NULL;
- for (grpc_json *field = choice->child; field != NULL; field = field->next) {
+ grpc_json* service_config_json = NULL;
+ for (grpc_json* field = choice->child; field != NULL; field = field->next) {
// Check client language, if specified.
if (strcmp(field->key, "clientLanguage") == 0) {
if (field->type != GRPC_JSON_ARRAY ||
@@ -188,7 +188,7 @@ static char *choose_service_config(char *service_config_choice_json) {
}
// Check client hostname, if specified.
if (strcmp(field->key, "clientHostname") == 0) {
- char *hostname = grpc_gethostname();
+ char* hostname = grpc_gethostname();
if (hostname == NULL || field->type != GRPC_JSON_ARRAY ||
!value_in_json_array(field, hostname)) {
service_config_json = NULL;
@@ -225,22 +225,22 @@ static char *choose_service_config(char *service_config_choice_json) {
return service_config;
}
-static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- ares_dns_resolver *r = (ares_dns_resolver *)arg;
- grpc_channel_args *result = NULL;
+static void dns_ares_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ ares_dns_resolver* r = (ares_dns_resolver*)arg;
+ grpc_channel_args* result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
r->pending_request = NULL;
if (r->lb_addresses != NULL) {
- static const char *args_to_remove[2];
+ static const char* args_to_remove[2];
size_t num_args_to_remove = 0;
grpc_arg new_args[3];
size_t num_args_to_add = 0;
new_args[num_args_to_add++] =
grpc_lb_addresses_create_channel_arg(r->lb_addresses);
- grpc_service_config *service_config = NULL;
- char *service_config_string = NULL;
+ grpc_service_config* service_config = NULL;
+ char* service_config_string = NULL;
if (r->service_config_json != NULL) {
service_config_string = choose_service_config(r->service_config_json);
gpr_free(r->service_config_json);
@@ -249,15 +249,15 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
service_config_string);
args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string);
+ (char*)GRPC_ARG_SERVICE_CONFIG, service_config_string);
service_config = grpc_service_config_create(service_config_string);
if (service_config != NULL) {
- const char *lb_policy_name =
+ const char* lb_policy_name =
grpc_service_config_get_lb_policy_name(service_config);
if (lb_policy_name != NULL) {
args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args_to_add++] = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name);
+ (char*)GRPC_ARG_LB_POLICY_NAME, (char*)lb_policy_name);
}
}
}
@@ -269,7 +269,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(service_config_string);
grpc_lb_addresses_destroy(exec_ctx, r->lb_addresses);
} else {
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
@@ -296,12 +296,12 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
-static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
+static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
gpr_log(GPR_DEBUG, "dns_ares_next is called.");
- ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ ares_dns_resolver* r = (ares_dns_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@@ -313,8 +313,8 @@ static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r) {
+static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r) {
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = true;
@@ -327,8 +327,8 @@ static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
r->request_service_config ? &r->service_config_json : NULL);
}
-static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- ares_dns_resolver *r) {
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ ares_dns_resolver* r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_result = r->resolved_result == NULL
@@ -341,9 +341,9 @@ static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
gpr_log(GPR_DEBUG, "dns_ares_destroy");
- ares_dns_resolver *r = (ares_dns_resolver *)gr;
+ ares_dns_resolver* r = (ares_dns_resolver*)gr;
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@@ -355,15 +355,15 @@ static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
gpr_free(r);
}
-static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
- grpc_resolver_args *args,
- const char *default_port) {
+static grpc_resolver* dns_ares_create(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_args* args,
+ const char* default_port) {
/* Get name from args. */
- const char *path = args->uri->path;
+ const char* path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
- ares_dns_resolver *r =
- (ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
+ ares_dns_resolver* r =
+ (ares_dns_resolver*)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);
@@ -371,7 +371,7 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
r->channel_args = grpc_channel_args_copy(args->args);
- const grpc_arg *arg = grpc_channel_args_find(
+ const grpc_arg* arg = grpc_channel_args_find(
r->channel_args, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION);
r->request_service_config = !grpc_channel_arg_get_integer(
arg, (grpc_integer_options){false, false, true});
@@ -398,19 +398,19 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
-static void dns_ares_factory_ref(grpc_resolver_factory *factory) {}
+static void dns_ares_factory_ref(grpc_resolver_factory* factory) {}
-static void dns_ares_factory_unref(grpc_resolver_factory *factory) {}
+static void dns_ares_factory_unref(grpc_resolver_factory* factory) {}
-static grpc_resolver *dns_factory_create_resolver(
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
- grpc_resolver_args *args) {
+static grpc_resolver* dns_factory_create_resolver(
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args) {
return dns_ares_create(exec_ctx, args, "https");
}
-static char *dns_ares_factory_get_default_host_name(
- grpc_resolver_factory *factory, grpc_uri *uri) {
- const char *path = uri->path;
+static char* dns_ares_factory_get_default_host_name(
+ grpc_resolver_factory* factory, grpc_uri* uri) {
+ const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
@@ -420,16 +420,16 @@ static const grpc_resolver_factory_vtable dns_ares_factory_vtable = {
dns_ares_factory_get_default_host_name, "dns"};
static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable};
-static grpc_resolver_factory *dns_ares_resolver_factory_create() {
+static grpc_resolver_factory* dns_ares_resolver_factory_create() {
return &dns_resolver_factory;
}
extern "C" void grpc_resolver_dns_ares_init(void) {
- char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
/* TODO(zyc): Turn on c-ares based resolver by default after the address
sorter and the CNAME support are added. */
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
- grpc_error *error = grpc_ares_init();
+ grpc_error* error = grpc_ares_init();
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
return;
@@ -441,7 +441,7 @@ extern "C" void grpc_resolver_dns_ares_init(void) {
}
extern "C" void grpc_resolver_dns_ares_shutdown(void) {
- char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
grpc_ares_cleanup();
}
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 3d4309f2fa..a5fb1f10e1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -31,31 +31,31 @@ typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
bound to its ares_channel when necessary. */
-void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver);
+void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver);
/* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
\a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
query. */
-ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver);
+ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver);
/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
created successfully. */
-grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
- grpc_pollset_set *pollset_set);
+grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
+ grpc_pollset_set* pollset_set);
/* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver
will be cancelled and their on_done callbacks will be invoked with a status
of ARES_ECANCELLED. */
-void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver);
/* Shutdown all the grpc_fds used by \a ev_driver */
-void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver);
+void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index c30cc93b6f..2bb98c1a3f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -37,7 +37,7 @@
typedef struct fd_node {
/** the owner of this fd node */
- grpc_ares_ev_driver *ev_driver;
+ grpc_ares_ev_driver* ev_driver;
/** a closure wrapping on_readable_cb, which should be invoked when the
grpc_fd in this node becomes readable. */
grpc_closure read_closure;
@@ -45,12 +45,12 @@ typedef struct fd_node {
grpc_fd in this node becomes writable. */
grpc_closure write_closure;
/** next fd node in the list */
- struct fd_node *next;
+ struct fd_node* next;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** the grpc_fd owned by this fd node */
- grpc_fd *fd;
+ grpc_fd* fd;
/** if the readable closure has been registered */
bool readable_registered;
/** if the writable closure has been registered */
@@ -63,31 +63,31 @@ struct grpc_ares_ev_driver {
/** the ares_channel owned by this event driver */
ares_channel channel;
/** pollset set for driving the IO events of the channel */
- grpc_pollset_set *pollset_set;
+ grpc_pollset_set* pollset_set;
/** refcount of the event driver */
gpr_refcount refs;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** a list of grpc_fd that this event driver is currently using. */
- fd_node *fds;
+ fd_node* fds;
/** is this event driver currently working? */
bool working;
/** is this event driver being shut down */
bool shutting_down;
};
-static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver);
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver);
-static grpc_ares_ev_driver *grpc_ares_ev_driver_ref(
- grpc_ares_ev_driver *ev_driver) {
+static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
+ grpc_ares_ev_driver* ev_driver) {
gpr_log(GPR_DEBUG, "Ref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
gpr_ref(&ev_driver->refs);
return ev_driver;
}
-static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
+static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) {
gpr_log(GPR_DEBUG, "Unref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
if (gpr_unref(&ev_driver->refs)) {
gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
@@ -98,7 +98,7 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
}
}
-static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+static void fd_node_destroy(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
GPR_ASSERT(!fdn->readable_registered);
GPR_ASSERT(!fdn->writable_registered);
@@ -111,29 +111,30 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_free(fdn);
}
-static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+static void fd_node_shutdown(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
gpr_mu_lock(&fdn->mu);
fdn->shutting_down = true;
if (!fdn->readable_registered && !fdn->writable_registered) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
- grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "c-ares fd shutdown"));
+ grpc_fd_shutdown(
+ exec_ctx, fdn->fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
-grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
- grpc_pollset_set *pollset_set) {
- *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
+grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver,
+ grpc_pollset_set* pollset_set) {
+ *ev_driver = (grpc_ares_ev_driver*)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
- char *err_msg;
+ char* err_msg;
gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s",
ares_strerror(status));
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
gpr_free(err_msg);
gpr_free(*ev_driver);
return err;
@@ -147,7 +148,7 @@ grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
return GRPC_ERROR_NONE;
}
-void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver) {
// It's not safe to shut down remaining fds here directly, becauses
// ares_host_callback does not provide an exec_ctx. We mark the event driver
// as being shut down. If the event driver is working,
@@ -159,14 +160,15 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
grpc_ares_ev_driver_unref(ev_driver);
}
-void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver) {
+void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver) {
gpr_mu_lock(&ev_driver->mu);
ev_driver->shutting_down = true;
- fd_node *fn = ev_driver->fds;
+ fd_node* fn = ev_driver->fds;
while (fn != NULL) {
- grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "grpc_ares_ev_driver_shutdown"));
+ grpc_fd_shutdown(
+ exec_ctx, fn->fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);
@@ -174,13 +176,13 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
// Search fd in the fd_node list head. This is an O(n) search, the max possible
// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
-static fd_node *pop_fd_node(fd_node **head, int fd) {
+static fd_node* pop_fd_node(fd_node** head, int fd) {
fd_node dummy_head;
dummy_head.next = *head;
- fd_node *node = &dummy_head;
+ fd_node* node = &dummy_head;
while (node->next != NULL) {
if (grpc_fd_wrapped_fd(node->next->fd) == fd) {
- fd_node *ret = node->next;
+ fd_node* ret = node->next;
node->next = node->next->next;
*head = dummy_head.next;
return ret;
@@ -191,16 +193,16 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
}
/* Check if \a fd is still readable */
-static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver,
+static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver,
int fd) {
size_t bytes_available = 0;
return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
}
-static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- fd_node *fdn = (fd_node *)arg;
- grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+static void on_readable_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ fd_node* fdn = (fd_node*)arg;
+ grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->readable_registered = false;
@@ -232,10 +234,10 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_ares_ev_driver_unref(ev_driver);
}
-static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- fd_node *fdn = (fd_node *)arg;
- grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+static void on_writable_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ fd_node* fdn = (fd_node*)arg;
+ grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
const int fd = grpc_fd_wrapped_fd(fdn->fd);
fdn->writable_registered = false;
@@ -265,15 +267,15 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_ares_ev_driver_unref(ev_driver);
}
-ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver) {
+ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver) {
return &ev_driver->channel;
}
// Get the file descriptors used by the ev_driver's ares channel, register
// driver_closure with these filedescriptors.
-static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver) {
- fd_node *new_list = NULL;
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver) {
+ fd_node* new_list = NULL;
if (!ev_driver->shutting_down) {
ares_socket_t socks[ARES_GETSOCK_MAXNUM];
int socks_bitmask =
@@ -281,12 +283,12 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) {
if (ARES_GETSOCK_READABLE(socks_bitmask, i) ||
ARES_GETSOCK_WRITABLE(socks_bitmask, i)) {
- fd_node *fdn = pop_fd_node(&ev_driver->fds, socks[i]);
+ fd_node* fdn = pop_fd_node(&ev_driver->fds, socks[i]);
// Create a new fd_node if sock[i] is not in the fd_node list.
if (fdn == NULL) {
- char *fd_name;
+ char* fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
- fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
+ fdn = (fd_node*)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;
@@ -331,7 +333,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
// are therefore no longer in use, so they can be shut down and removed from
// the list.
while (ev_driver->fds != NULL) {
- fd_node *cur = ev_driver->fds;
+ fd_node* cur = ev_driver->fds;
ev_driver->fds = ev_driver->fds->next;
fd_node_shutdown(exec_ctx, cur);
}
@@ -343,8 +345,8 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
- grpc_ares_ev_driver *ev_driver) {
+void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
+ grpc_ares_ev_driver* ev_driver) {
gpr_mu_lock(&ev_driver->mu);
if (!ev_driver->working) {
ev_driver->working = true;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 04379975e1..9408b9d81d 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -51,13 +51,13 @@ struct grpc_ares_request {
struct ares_addr_port_node dns_server_addr;
/** following members are set in grpc_resolve_address_ares_impl */
/** closure to call when the request completes */
- grpc_closure *on_done;
+ grpc_closure* on_done;
/** the pointer to receive the resolved addresses */
- grpc_lb_addresses **lb_addrs_out;
+ grpc_lb_addresses** lb_addrs_out;
/** the pointer to receive the service config in JSON */
- char **service_config_json_out;
+ char** service_config_json_out;
/** the evernt driver used by this request */
- grpc_ares_ev_driver *ev_driver;
+ grpc_ares_ev_driver* ev_driver;
/** number of ongoing queries */
gpr_refcount pending_queries;
@@ -66,15 +66,15 @@ struct grpc_ares_request {
/** is there at least one successful query, set in on_done_cb */
bool success;
/** the errors explaining the request failure, set in on_done_cb */
- grpc_error *error;
+ grpc_error* error;
};
typedef struct grpc_ares_hostbyname_request {
/** following members are set in create_hostbyname_request */
/** the top-level request instance */
- grpc_ares_request *parent_request;
+ grpc_ares_request* parent_request;
/** host to resolve, parsed from the name to resolve */
- char *host;
+ char* host;
/** port to fill in sockaddr_in, parsed from the name to resolve */
uint16_t port;
/** is it a grpclb address */
@@ -83,7 +83,7 @@ typedef struct grpc_ares_hostbyname_request {
static void do_basic_init(void) { gpr_mu_init(&g_init_mu); }
-static uint16_t strhtons(const char *port) {
+static uint16_t strhtons(const char* port) {
if (strcmp(port, "http") == 0) {
return htons(80);
} else if (strcmp(port, "https") == 0) {
@@ -92,12 +92,12 @@ static uint16_t strhtons(const char *port) {
return htons((unsigned short)atoi(port));
}
-static void grpc_ares_request_ref(grpc_ares_request *r) {
+static void grpc_ares_request_ref(grpc_ares_request* r) {
gpr_ref(&r->pending_queries);
}
-static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
- grpc_ares_request *r) {
+static void grpc_ares_request_unref(grpc_exec_ctx* exec_ctx,
+ grpc_ares_request* r) {
/* If there are no pending queries, invoke on_done callback and destroy the
request */
if (gpr_unref(&r->pending_queries)) {
@@ -120,10 +120,10 @@ static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
}
}
-static grpc_ares_hostbyname_request *create_hostbyname_request(
- grpc_ares_request *parent_request, char *host, uint16_t port,
+static grpc_ares_hostbyname_request* create_hostbyname_request(
+ grpc_ares_request* parent_request, char* host, uint16_t port,
bool is_balancer) {
- grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc(
+ grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)gpr_zalloc(
sizeof(grpc_ares_hostbyname_request));
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
@@ -133,23 +133,23 @@ static grpc_ares_hostbyname_request *create_hostbyname_request(
return hr;
}
-static void destroy_hostbyname_request(grpc_exec_ctx *exec_ctx,
- grpc_ares_hostbyname_request *hr) {
+static void destroy_hostbyname_request(grpc_exec_ctx* exec_ctx,
+ grpc_ares_hostbyname_request* hr) {
grpc_ares_request_unref(exec_ctx, hr->parent_request);
gpr_free(hr->host);
gpr_free(hr);
}
-static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
- struct hostent *hostent) {
- grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)arg;
- grpc_ares_request *r = hr->parent_request;
+static void on_hostbyname_done_cb(void* arg, int status, int timeouts,
+ struct hostent* hostent) {
+ grpc_ares_hostbyname_request* hr = (grpc_ares_hostbyname_request*)arg;
+ grpc_ares_request* r = hr->parent_request;
gpr_mu_lock(&r->mu);
if (status == ARES_SUCCESS) {
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
r->success = true;
- grpc_lb_addresses **lb_addresses = r->lb_addrs_out;
+ grpc_lb_addresses** lb_addresses = r->lb_addrs_out;
if (*lb_addresses == NULL) {
*lb_addresses = grpc_lb_addresses_create(0, NULL);
}
@@ -158,7 +158,7 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
- (*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
+ (*lb_addresses)->addresses = (grpc_lb_address*)gpr_realloc(
(*lb_addresses)->addresses,
sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
@@ -208,10 +208,10 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
}
}
} else if (!r->success) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@@ -223,26 +223,26 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
destroy_hostbyname_request(NULL, hr);
}
-static void on_srv_query_done_cb(void *arg, int status, int timeouts,
- unsigned char *abuf, int alen) {
- grpc_ares_request *r = (grpc_ares_request *)arg;
+static void on_srv_query_done_cb(void* arg, int status, int timeouts,
+ unsigned char* abuf, int alen) {
+ grpc_ares_request* r = (grpc_ares_request*)arg;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
if (status == ARES_SUCCESS) {
gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS");
- struct ares_srv_reply *reply;
+ struct ares_srv_reply* reply;
const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
if (parse_status == ARES_SUCCESS) {
- ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
- for (struct ares_srv_reply *srv_it = reply; srv_it != NULL;
+ ares_channel* channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ for (struct ares_srv_reply* srv_it = reply; srv_it != NULL;
srv_it = srv_it->next) {
if (grpc_ipv6_loopback_available()) {
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ grpc_ares_hostbyname_request* hr = create_hostbyname_request(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET6,
on_hostbyname_done_cb, hr);
}
- grpc_ares_hostbyname_request *hr = create_hostbyname_request(
+ grpc_ares_hostbyname_request* hr = create_hostbyname_request(
r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb,
hr);
@@ -253,10 +253,10 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts,
ares_free_data(reply);
}
} else if (!r->success) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
if (r->error == GRPC_ERROR_NONE) {
r->error = error;
@@ -270,15 +270,15 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts,
static const char g_service_config_attribute_prefix[] = "grpc_config=";
-static void on_txt_done_cb(void *arg, int status, int timeouts,
- unsigned char *buf, int len) {
+static void on_txt_done_cb(void* arg, int status, int timeouts,
+ unsigned char* buf, int len) {
gpr_log(GPR_DEBUG, "on_txt_done_cb");
- char *error_msg;
- grpc_ares_request *r = (grpc_ares_request *)arg;
+ char* error_msg;
+ grpc_ares_request* r = (grpc_ares_request*)arg;
const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1;
- struct ares_txt_ext *result = NULL;
- struct ares_txt_ext *reply = NULL;
- grpc_error *error = GRPC_ERROR_NONE;
+ struct ares_txt_ext* result = NULL;
+ struct ares_txt_ext* reply = NULL;
+ grpc_error* error = GRPC_ERROR_NONE;
gpr_mu_lock(&r->mu);
if (status != ARES_SUCCESS) goto fail;
status = ares_parse_txt_reply_ext(buf, len, &reply);
@@ -294,12 +294,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
- *r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
+ *r->service_config_json_out = (char*)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
- *r->service_config_json_out = (char *)gpr_realloc(
+ *r->service_config_json_out = (char*)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@@ -326,15 +326,15 @@ done:
grpc_ares_request_unref(NULL, r);
}
-static grpc_ares_request *grpc_dns_lookup_ares_impl(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) {
- grpc_error *error = GRPC_ERROR_NONE;
- grpc_ares_hostbyname_request *hr = NULL;
- grpc_ares_request *r = NULL;
- ares_channel *channel = NULL;
+static grpc_ares_request* grpc_dns_lookup_ares_impl(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_ares_hostbyname_request* hr = NULL;
+ grpc_ares_request* r = NULL;
+ ares_channel* channel = NULL;
/* TODO(zyc): Enable tracing after #9603 is checked in */
/* if (grpc_dns_trace) {
gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
@@ -342,8 +342,8 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} */
/* parse name, splitting it into host and port parts */
- char *host;
- char *port;
+ char* host;
+ char* port;
gpr_split_host_port(name, &host, &port);
if (host == NULL) {
error = grpc_error_set_str(
@@ -360,11 +360,11 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
port = gpr_strdup(default_port);
}
- grpc_ares_ev_driver *ev_driver;
+ grpc_ares_ev_driver* ev_driver;
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
- r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
+ r = (grpc_ares_request*)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@@ -380,7 +380,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
grpc_resolved_address addr;
if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) {
r->dns_server_addr.family = AF_INET;
- struct sockaddr_in *in = (struct sockaddr_in *)addr.addr;
+ struct sockaddr_in* in = (struct sockaddr_in*)addr.addr;
memcpy(&r->dns_server_addr.addr.addr4, &in->sin_addr,
sizeof(struct in_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -388,7 +388,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
} else if (grpc_parse_ipv6_hostport(dns_server, &addr,
false /* log_errors */)) {
r->dns_server_addr.family = AF_INET6;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr.addr;
+ struct sockaddr_in6* in6 = (struct sockaddr_in6*)addr.addr;
memcpy(&r->dns_server_addr.addr.addr6, &in6->sin6_addr,
sizeof(struct in6_addr));
r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr);
@@ -402,7 +402,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
}
int status = ares_set_servers_ports(*channel, &r->dns_server_addr);
if (status != ARES_SUCCESS) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
ares_strerror(status));
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
@@ -423,7 +423,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
if (check_grpclb) {
/* Query the SRV record */
grpc_ares_request_ref(r);
- char *service_name;
+ char* service_name;
gpr_asprintf(&service_name, "_grpclb._tcp.%s", host);
ares_query(*channel, service_name, ns_c_in, ns_t_srv, on_srv_query_done_cb,
r);
@@ -447,29 +447,29 @@ error_cleanup:
return NULL;
}
-grpc_ares_request *(*grpc_dns_lookup_ares)(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) = grpc_dns_lookup_ares_impl;
+grpc_ares_request* (*grpc_dns_lookup_ares)(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) = grpc_dns_lookup_ares_impl;
-void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {
+void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {
if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) {
grpc_ares_ev_driver_shutdown(exec_ctx, r->ev_driver);
}
}
-grpc_error *grpc_ares_init(void) {
+grpc_error* grpc_ares_init(void) {
gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu);
int status = ares_library_init(ARES_LIB_INIT_ALL);
gpr_mu_unlock(&g_init_mu);
if (status != ARES_SUCCESS) {
- char *error_msg;
+ char* error_msg;
gpr_asprintf(&error_msg, "ares_library_init failed: %s",
ares_strerror(status));
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
gpr_free(error_msg);
return error;
}
@@ -488,28 +488,28 @@ void grpc_ares_cleanup(void) {
typedef struct grpc_resolve_address_ares_request {
/** the pointer to receive the resolved addresses */
- grpc_resolved_addresses **addrs_out;
+ grpc_resolved_addresses** addrs_out;
/** currently resolving lb addresses */
- grpc_lb_addresses *lb_addrs;
+ grpc_lb_addresses* lb_addrs;
/** closure to call when the resolve_address_ares request completes */
- grpc_closure *on_resolve_address_done;
+ grpc_closure* on_resolve_address_done;
/** a closure wrapping on_dns_lookup_done_cb, which should be invoked when the
grpc_dns_lookup_ares operation is done. */
grpc_closure on_dns_lookup_done;
} grpc_resolve_address_ares_request;
-static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_resolve_address_ares_request *r =
- (grpc_resolve_address_ares_request *)arg;
- grpc_resolved_addresses **resolved_addresses = r->addrs_out;
+static void on_dns_lookup_done_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_resolve_address_ares_request* r =
+ (grpc_resolve_address_ares_request*)arg;
+ grpc_resolved_addresses** resolved_addresses = r->addrs_out;
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
*resolved_addresses =
- (grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
+ (grpc_resolved_addresses*)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
- (*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
+ (*resolved_addresses)->addrs = (grpc_resolved_address*)gpr_zalloc(
sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
@@ -523,14 +523,14 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(r);
}
-static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
- const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
- grpc_resolve_address_ares_request *r =
- (grpc_resolve_address_ares_request *)gpr_zalloc(
+static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
+ const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {
+ grpc_resolve_address_ares_request* r =
+ (grpc_resolve_address_ares_request*)gpr_zalloc(
sizeof(grpc_resolve_address_ares_request));
r->addrs_out = addrs;
r->on_resolve_address_done = on_done;
@@ -543,8 +543,8 @@ static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
}
void (*grpc_resolve_address_ares)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 38fbea9aac..6882b7b1d1 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -36,12 +36,12 @@ typedef struct grpc_ares_request grpc_ares_request;
must be called at least once before this function. \a on_done may be
called directly in this function without being scheduled with \a exec_ctx,
so it must not try to acquire locks that are being held by the caller. */
-extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
- const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addresses);
+extern void (*grpc_resolve_address_ares)(grpc_exec_ctx* exec_ctx,
+ const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses);
/* Asynchronously resolve \a name. It will try to resolve grpclb SRV records in
addition to the normal address records. For normal address records, it uses
@@ -50,19 +50,19 @@ extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
function. \a on_done may be called directly in this function without being
scheduled with \a exec_ctx, so it must not try to acquire locks that are
being held by the caller. */
-extern grpc_ares_request *(*grpc_dns_lookup_ares)(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addresses, bool check_grpclb,
- char **service_config_json);
+extern grpc_ares_request* (*grpc_dns_lookup_ares)(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addresses, bool check_grpclb,
+ char** service_config_json);
/* Cancel the pending grpc_ares_request \a request */
-void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx,
- grpc_ares_request *request);
+void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx,
+ grpc_ares_request* request);
/* Initialize gRPC ares wrapper. Must be called at least once before
grpc_resolve_address_ares(). */
-grpc_error *grpc_ares_init(void);
+grpc_error* grpc_ares_init(void);
/* Uninitialized gRPC ares wrapper. If there was more than one previous call to
grpc_ares_init(), this function uninitializes the gRPC ares wrapper only if
@@ -74,4 +74,4 @@ void grpc_ares_cleanup(void);
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index f2587c4520..a68a7c47fb 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -25,36 +25,36 @@ struct grpc_ares_request {
char val;
};
-static grpc_ares_request *grpc_dns_lookup_ares_impl(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) {
+static grpc_ares_request* grpc_dns_lookup_ares_impl(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) {
return NULL;
}
-grpc_ares_request *(*grpc_dns_lookup_ares)(
- grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name,
- const char *default_port, grpc_pollset_set *interested_parties,
- grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb,
- char **service_config_json) = grpc_dns_lookup_ares_impl;
+grpc_ares_request* (*grpc_dns_lookup_ares)(
+ grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
+ const char* default_port, grpc_pollset_set* interested_parties,
+ grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+ char** service_config_json) = grpc_dns_lookup_ares_impl;
-void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {}
+void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {}
-grpc_error *grpc_ares_init(void) { return GRPC_ERROR_NONE; }
+grpc_error* grpc_ares_init(void) { return GRPC_ERROR_NONE; }
void grpc_ares_cleanup(void) {}
-static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx,
- const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {}
+static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
+ const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {}
void (*grpc_resolve_address_ares)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
#endif /* GRPC_ARES != 1 || defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 62aead5517..a57ab66118 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -45,13 +45,13 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** name to resolve */
- char *name_to_resolve;
+ char* name_to_resolve;
/** default port to use */
- char *default_port;
+ char* default_port;
/** channel args. */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
/** pollset_set to drive the name resolution process */
- grpc_pollset_set *interested_parties;
+ grpc_pollset_set* interested_parties;
/** are we currently resolving? */
bool resolving;
@@ -60,11 +60,11 @@ typedef struct {
/** which version of the result is current? */
int resolved_version;
/** pending next completion, or NULL */
- grpc_closure *next_completion;
+ grpc_closure* next_completion;
/** target result address for next completion */
- grpc_channel_args **target_result;
+ grpc_channel_args** target_result;
/** current (fully resolved) result */
- grpc_channel_args *resolved_result;
+ grpc_channel_args* resolved_result;
/** retry timer */
bool have_retry_timer;
grpc_timer retry_timer;
@@ -73,30 +73,30 @@ typedef struct {
grpc_backoff backoff_state;
/** currently resolving addresses */
- grpc_resolved_addresses *addresses;
+ grpc_resolved_addresses* addresses;
} dns_resolver;
-static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r);
-static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r);
+static void dns_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r);
+static void dns_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r);
-static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void dns_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void dns_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* r);
+static void dns_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete);
static const grpc_resolver_vtable dns_resolver_vtable = {
dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
dns_next_locked};
-static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- dns_resolver *r = (dns_resolver *)resolver;
+static void dns_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ dns_resolver* r = (dns_resolver*)resolver;
if (r->have_retry_timer) {
grpc_timer_cancel(exec_ctx, &r->retry_timer);
}
@@ -109,19 +109,19 @@ static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- dns_resolver *r = (dns_resolver *)resolver;
+static void dns_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ dns_resolver* r = (dns_resolver*)resolver;
if (!r->resolving) {
grpc_backoff_reset(&r->backoff_state);
dns_start_resolving_locked(exec_ctx, r);
}
}
-static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
- dns_resolver *r = (dns_resolver *)resolver;
+static void dns_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ dns_resolver* r = (dns_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
@@ -133,9 +133,9 @@ static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
}
}
-static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- dns_resolver *r = (dns_resolver *)arg;
+static void dns_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ dns_resolver* r = (dns_resolver*)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
@@ -147,17 +147,17 @@ static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
}
-static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- dns_resolver *r = (dns_resolver *)arg;
- grpc_channel_args *result = NULL;
+static void dns_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ dns_resolver* r = (dns_resolver*)arg;
+ grpc_channel_args* result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
GRPC_ERROR_REF(error);
error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(r->name_to_resolve));
if (r->addresses != NULL) {
- grpc_lb_addresses *addresses = grpc_lb_addresses_create(
+ grpc_lb_addresses* addresses = grpc_lb_addresses_create(
r->addresses->naddrs, NULL /* user_data_vtable */);
for (size_t i = 0; i < r->addresses->naddrs; ++i) {
grpc_lb_addresses_set_address(
@@ -198,8 +198,8 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
}
-static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r) {
+static void dns_start_resolving_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r) {
GRPC_RESOLVER_REF(&r->base, "dns-resolving");
GPR_ASSERT(!r->resolving);
r->resolving = true;
@@ -211,8 +211,8 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx,
&r->addresses);
}
-static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- dns_resolver *r) {
+static void dns_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ dns_resolver* r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_result = r->resolved_result == NULL
@@ -224,8 +224,8 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
- dns_resolver *r = (dns_resolver *)gr;
+static void dns_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+ dns_resolver* r = (dns_resolver*)gr;
if (r->resolved_result != NULL) {
grpc_channel_args_destroy(exec_ctx, r->resolved_result);
}
@@ -236,18 +236,18 @@ static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
gpr_free(r);
}
-static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
- grpc_resolver_args *args,
- const char *default_port) {
+static grpc_resolver* dns_create(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_args* args,
+ const char* default_port) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based dns uri's not supported");
return NULL;
}
// Get name from args.
- char *path = args->uri->path;
+ char* path = args->uri->path;
if (path[0] == '/') ++path;
// Create resolver.
- dns_resolver *r = (dns_resolver *)gpr_zalloc(sizeof(dns_resolver));
+ dns_resolver* r = (dns_resolver*)gpr_zalloc(sizeof(dns_resolver));
grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner);
r->name_to_resolve = gpr_strdup(path);
r->default_port = gpr_strdup(default_port);
@@ -269,19 +269,19 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
-static void dns_factory_ref(grpc_resolver_factory *factory) {}
+static void dns_factory_ref(grpc_resolver_factory* factory) {}
-static void dns_factory_unref(grpc_resolver_factory *factory) {}
+static void dns_factory_unref(grpc_resolver_factory* factory) {}
-static grpc_resolver *dns_factory_create_resolver(
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
- grpc_resolver_args *args) {
+static grpc_resolver* dns_factory_create_resolver(
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args) {
return dns_create(exec_ctx, args, "https");
}
-static char *dns_factory_get_default_host_name(grpc_resolver_factory *factory,
- grpc_uri *uri) {
- const char *path = uri->path;
+static char* dns_factory_get_default_host_name(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
+ const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
@@ -291,17 +291,17 @@ static const grpc_resolver_factory_vtable dns_factory_vtable = {
dns_factory_get_default_host_name, "dns"};
static grpc_resolver_factory dns_resolver_factory = {&dns_factory_vtable};
-static grpc_resolver_factory *dns_resolver_factory_create() {
+static grpc_resolver_factory* dns_resolver_factory_create() {
return &dns_resolver_factory;
}
extern "C" void grpc_resolver_dns_native_init(void) {
- char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
grpc_register_resolver_type(dns_resolver_factory_create());
} else {
- grpc_resolver_factory *existing_factory =
+ grpc_resolver_factory* existing_factory =
grpc_resolver_factory_lookup("dns");
if (existing_factory == NULL) {
gpr_log(GPR_DEBUG, "Using native dns resolver");
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index 95c3bafed8..3f341fa8ed 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -65,4 +65,4 @@ void grpc_fake_resolver_response_generator_unref(
#endif
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \
- */
+ */
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index dda9542325..9fc8dffea3 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -41,36 +41,36 @@ typedef struct {
/** base class: must be first */
grpc_resolver base;
/** the addresses that we've 'resolved' */
- grpc_lb_addresses *addresses;
+ grpc_lb_addresses* addresses;
/** channel args */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
/** have we published? */
bool published;
/** pending next completion, or NULL */
- grpc_closure *next_completion;
+ grpc_closure* next_completion;
/** target result address for next completion */
- grpc_channel_args **target_result;
+ grpc_channel_args** target_result;
} sockaddr_resolver;
-static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void sockaddr_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- sockaddr_resolver *r);
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ sockaddr_resolver* r);
-static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
-static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *r);
-static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
- grpc_channel_args **target_result,
- grpc_closure *on_complete);
+static void sockaddr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* r);
+static void sockaddr_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
sockaddr_destroy, sockaddr_shutdown_locked,
sockaddr_channel_saw_error_locked, sockaddr_next_locked};
-static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+static void sockaddr_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ sockaddr_resolver* r = (sockaddr_resolver*)resolver;
if (r->next_completion != NULL) {
*r->target_result = NULL;
GRPC_CLOSURE_SCHED(
@@ -80,26 +80,26 @@ static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver) {
- sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+static void sockaddr_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver) {
+ sockaddr_resolver* r = (sockaddr_resolver*)resolver;
r->published = false;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
}
-static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx,
- grpc_resolver *resolver,
- grpc_channel_args **target_result,
- grpc_closure *on_complete) {
- sockaddr_resolver *r = (sockaddr_resolver *)resolver;
+static void sockaddr_next_locked(grpc_exec_ctx* exec_ctx,
+ grpc_resolver* resolver,
+ grpc_channel_args** target_result,
+ grpc_closure* on_complete) {
+ sockaddr_resolver* r = (sockaddr_resolver*)resolver;
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_result = target_result;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
}
-static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
- sockaddr_resolver *r) {
+static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
+ sockaddr_resolver* r) {
if (r->next_completion != NULL && !r->published) {
r->published = true;
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
@@ -110,42 +110,42 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
- sockaddr_resolver *r = (sockaddr_resolver *)gr;
+static void sockaddr_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+ sockaddr_resolver* r = (sockaddr_resolver*)gr;
grpc_lb_addresses_destroy(exec_ctx, r->addresses);
grpc_channel_args_destroy(exec_ctx, r->channel_args);
gpr_free(r);
}
-static char *ip_get_default_authority(grpc_uri *uri) {
- const char *path = uri->path;
+static char* ip_get_default_authority(grpc_uri* uri) {
+ const char* path = uri->path;
if (path[0] == '/') ++path;
return gpr_strdup(path);
}
-static char *ipv4_get_default_authority(grpc_resolver_factory *factory,
- grpc_uri *uri) {
+static char* ipv4_get_default_authority(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
return ip_get_default_authority(uri);
}
-static char *ipv6_get_default_authority(grpc_resolver_factory *factory,
- grpc_uri *uri) {
+static char* ipv6_get_default_authority(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
return ip_get_default_authority(uri);
}
#ifdef GRPC_HAVE_UNIX_SOCKET
-char *unix_get_default_authority(grpc_resolver_factory *factory,
- grpc_uri *uri) {
+char* unix_get_default_authority(grpc_resolver_factory* factory,
+ grpc_uri* uri) {
return gpr_strdup("localhost");
}
#endif
-static void do_nothing(void *ignored) {}
+static void do_nothing(void* ignored) {}
-static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
- grpc_resolver_args *args,
- bool parse(const grpc_uri *uri,
- grpc_resolved_address *dst)) {
+static grpc_resolver* sockaddr_create(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_args* args,
+ bool parse(const grpc_uri* uri,
+ grpc_resolved_address* dst)) {
if (0 != strcmp(args->uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
args->uri->scheme);
@@ -157,12 +157,12 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer path_parts;
grpc_slice_buffer_init(&path_parts);
grpc_slice_split(path_slice, ",", &path_parts);
- grpc_lb_addresses *addresses =
+ grpc_lb_addresses* addresses =
grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
bool errors_found = false;
for (size_t i = 0; i < addresses->num_addresses; i++) {
grpc_uri ith_uri = *args->uri;
- char *part_str = grpc_slice_to_c_string(path_parts.slices[i]);
+ char* part_str = grpc_slice_to_c_string(path_parts.slices[i]);
ith_uri.path = part_str;
if (!parse(&ith_uri, &addresses->addresses[i].address)) {
errors_found = true; /* GPR_TRUE */
@@ -177,8 +177,8 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
/* Instantiate resolver. */
- sockaddr_resolver *r =
- (sockaddr_resolver *)gpr_zalloc(sizeof(sockaddr_resolver));
+ sockaddr_resolver* r =
+ (sockaddr_resolver*)gpr_zalloc(sizeof(sockaddr_resolver));
r->addresses = addresses;
r->channel_args = grpc_channel_args_copy(args->args);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
@@ -189,14 +189,14 @@ static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
* FACTORY
*/
-static void sockaddr_factory_ref(grpc_resolver_factory *factory) {}
+static void sockaddr_factory_ref(grpc_resolver_factory* factory) {}
-static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
+static void sockaddr_factory_unref(grpc_resolver_factory* factory) {}
#define DECL_FACTORY(name) \
- static grpc_resolver *name##_factory_create_resolver( \
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, \
- grpc_resolver_args *args) { \
+ static grpc_resolver* name##_factory_create_resolver( \
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory, \
+ grpc_resolver_args* args) { \
return sockaddr_create(exec_ctx, args, grpc_parse_##name); \
} \
static const grpc_resolver_factory_vtable name##_factory_vtable = { \
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index c8b2c58db3..62555a4f01 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -32,44 +32,44 @@ typedef struct grpc_resolver_factory grpc_resolver_factory;
typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
struct grpc_resolver_factory {
- const grpc_resolver_factory_vtable *vtable;
+ const grpc_resolver_factory_vtable* vtable;
};
typedef struct grpc_resolver_args {
- grpc_uri *uri;
- const grpc_channel_args *args;
- grpc_pollset_set *pollset_set;
- grpc_combiner *combiner;
+ grpc_uri* uri;
+ const grpc_channel_args* args;
+ grpc_pollset_set* pollset_set;
+ grpc_combiner* combiner;
} grpc_resolver_args;
struct grpc_resolver_factory_vtable {
- void (*ref)(grpc_resolver_factory *factory);
- void (*unref)(grpc_resolver_factory *factory);
+ void (*ref)(grpc_resolver_factory* factory);
+ void (*unref)(grpc_resolver_factory* factory);
/** Implementation of grpc_resolver_factory_create_resolver */
- grpc_resolver *(*create_resolver)(grpc_exec_ctx *exec_ctx,
- grpc_resolver_factory *factory,
- grpc_resolver_args *args);
+ grpc_resolver* (*create_resolver)(grpc_exec_ctx* exec_ctx,
+ grpc_resolver_factory* factory,
+ grpc_resolver_args* args);
/** Implementation of grpc_resolver_factory_get_default_authority */
- char *(*get_default_authority)(grpc_resolver_factory *factory, grpc_uri *uri);
+ char* (*get_default_authority)(grpc_resolver_factory* factory, grpc_uri* uri);
/** URI scheme that this factory implements */
- const char *scheme;
+ const char* scheme;
};
-void grpc_resolver_factory_ref(grpc_resolver_factory *resolver);
-void grpc_resolver_factory_unref(grpc_resolver_factory *resolver);
+void grpc_resolver_factory_ref(grpc_resolver_factory* resolver);
+void grpc_resolver_factory_unref(grpc_resolver_factory* resolver);
/** Create a resolver instance for a name */
-grpc_resolver *grpc_resolver_factory_create_resolver(
- grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
- grpc_resolver_args *args);
+grpc_resolver* grpc_resolver_factory_create_resolver(
+ grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
+ grpc_resolver_args* args);
/** Return a (freshly allocated with gpr_malloc) string representing
the default authority to use for this scheme. */
-char *grpc_resolver_factory_get_default_authority(
- grpc_resolver_factory *factory, grpc_uri *uri);
+char* grpc_resolver_factory_get_default_authority(
+ grpc_resolver_factory* factory, grpc_uri* uri);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/resolver_registry.cc b/src/core/ext/filters/client_channel/resolver_registry.cc
index 1a0fb0bc3c..9e45887f35 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.cc
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
@@ -27,7 +27,7 @@
#define MAX_RESOLVERS 10
#define DEFAULT_RESOLVER_PREFIX_MAX_LENGTH 32
-static grpc_resolver_factory *g_all_of_the_resolvers[MAX_RESOLVERS];
+static grpc_resolver_factory* g_all_of_the_resolvers[MAX_RESOLVERS];
static int g_number_of_resolvers = 0;
static char g_default_resolver_prefix[DEFAULT_RESOLVER_PREFIX_MAX_LENGTH] =
@@ -49,7 +49,7 @@ void grpc_resolver_registry_shutdown(void) {
}
void grpc_resolver_registry_set_default_prefix(
- const char *default_resolver_prefix) {
+ const char* default_resolver_prefix) {
const size_t len = strlen(default_resolver_prefix);
GPR_ASSERT(len < DEFAULT_RESOLVER_PREFIX_MAX_LENGTH &&
"default resolver prefix too long");
@@ -59,7 +59,7 @@ void grpc_resolver_registry_set_default_prefix(
strcpy(g_default_resolver_prefix, default_resolver_prefix);
}
-void grpc_register_resolver_type(grpc_resolver_factory *factory) {
+void grpc_register_resolver_type(grpc_resolver_factory* factory) {
int i;
for (i = 0; i < g_number_of_resolvers; i++) {
GPR_ASSERT(0 != strcmp(factory->vtable->scheme,
@@ -70,7 +70,7 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory) {
g_all_of_the_resolvers[g_number_of_resolvers++] = factory;
}
-static grpc_resolver_factory *lookup_factory(const char *name) {
+static grpc_resolver_factory* lookup_factory(const char* name) {
int i;
for (i = 0; i < g_number_of_resolvers; i++) {
@@ -81,22 +81,22 @@ static grpc_resolver_factory *lookup_factory(const char *name) {
return NULL;
}
-grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name) {
- grpc_resolver_factory *f = lookup_factory(name);
+grpc_resolver_factory* grpc_resolver_factory_lookup(const char* name) {
+ grpc_resolver_factory* f = lookup_factory(name);
if (f) grpc_resolver_factory_ref(f);
return f;
}
-static grpc_resolver_factory *lookup_factory_by_uri(grpc_uri *uri) {
+static grpc_resolver_factory* lookup_factory_by_uri(grpc_uri* uri) {
if (!uri) return NULL;
return lookup_factory(uri->scheme);
}
-static grpc_resolver_factory *resolve_factory(grpc_exec_ctx *exec_ctx,
- const char *target,
- grpc_uri **uri,
- char **canonical_target) {
- grpc_resolver_factory *factory = NULL;
+static grpc_resolver_factory* resolve_factory(grpc_exec_ctx* exec_ctx,
+ const char* target,
+ grpc_uri** uri,
+ char** canonical_target) {
+ grpc_resolver_factory* factory = NULL;
GPR_ASSERT(uri != NULL);
*uri = grpc_uri_parse(exec_ctx, target, 1);
@@ -116,15 +116,15 @@ static grpc_resolver_factory *resolve_factory(grpc_exec_ctx *exec_ctx,
return factory;
}
-grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *args,
- grpc_pollset_set *pollset_set,
- grpc_combiner *combiner) {
- grpc_uri *uri = NULL;
- char *canonical_target = NULL;
- grpc_resolver_factory *factory =
+grpc_resolver* grpc_resolver_create(grpc_exec_ctx* exec_ctx, const char* target,
+ const grpc_channel_args* args,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner) {
+ grpc_uri* uri = NULL;
+ char* canonical_target = NULL;
+ grpc_resolver_factory* factory =
resolve_factory(exec_ctx, target, &uri, &canonical_target);
- grpc_resolver *resolver;
+ grpc_resolver* resolver;
grpc_resolver_args resolver_args;
memset(&resolver_args, 0, sizeof(resolver_args));
resolver_args.uri = uri;
@@ -138,21 +138,21 @@ grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
return resolver;
}
-char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target) {
- grpc_uri *uri = NULL;
- char *canonical_target = NULL;
- grpc_resolver_factory *factory =
+char* grpc_get_default_authority(grpc_exec_ctx* exec_ctx, const char* target) {
+ grpc_uri* uri = NULL;
+ char* canonical_target = NULL;
+ grpc_resolver_factory* factory =
resolve_factory(exec_ctx, target, &uri, &canonical_target);
- char *authority = grpc_resolver_factory_get_default_authority(factory, uri);
+ char* authority = grpc_resolver_factory_get_default_authority(factory, uri);
grpc_uri_destroy(uri);
gpr_free(canonical_target);
return authority;
}
-char *grpc_resolver_factory_add_default_prefix_if_needed(
- grpc_exec_ctx *exec_ctx, const char *target) {
- grpc_uri *uri = NULL;
- char *canonical_target = NULL;
+char* grpc_resolver_factory_add_default_prefix_if_needed(
+ grpc_exec_ctx* exec_ctx, const char* target) {
+ grpc_uri* uri = NULL;
+ char* canonical_target = NULL;
resolve_factory(exec_ctx, target, &uri, &canonical_target);
grpc_uri_destroy(uri);
return canonical_target == NULL ? gpr_strdup(target) : canonical_target;
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index 06d0b99a35..01a2d0b18b 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -30,14 +30,14 @@ void grpc_resolver_registry_init();
void grpc_resolver_registry_shutdown(void);
/** Set the default URI prefix to \a default_prefix. */
-void grpc_resolver_registry_set_default_prefix(const char *default_prefix);
+void grpc_resolver_registry_set_default_prefix(const char* default_prefix);
/** Register a resolver type.
URI's of \a scheme will be resolved with the given resolver.
If \a priority is greater than zero, then the resolver will be eligible
to resolve names that are passed in with no scheme. Higher priority
resolvers will be tried before lower priority schemes. */
-void grpc_register_resolver_type(grpc_resolver_factory *factory);
+void grpc_register_resolver_type(grpc_resolver_factory* factory);
/** Create a resolver given \a target.
First tries to parse \a target as a URI. If this succeeds, tries
@@ -52,23 +52,23 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory);
(typically the set of arguments passed in from the client API).
\a pollset_set is used to drive IO in the name resolution process, it
should not be NULL. */
-grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *args,
- grpc_pollset_set *pollset_set,
- grpc_combiner *combiner);
+grpc_resolver* grpc_resolver_create(grpc_exec_ctx* exec_ctx, const char* target,
+ const grpc_channel_args* args,
+ grpc_pollset_set* pollset_set,
+ grpc_combiner* combiner);
/** Find a resolver factory given a name and return an (owned-by-the-caller)
* reference to it */
-grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name);
+grpc_resolver_factory* grpc_resolver_factory_lookup(const char* name);
/** Given a target, return a (freshly allocated with gpr_malloc) string
representing the default authority to pass from a client. */
-char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target);
+char* grpc_get_default_authority(grpc_exec_ctx* exec_ctx, const char* target);
/** Returns a newly allocated string containing \a target, adding the
default prefix if needed. */
-char *grpc_resolver_factory_add_default_prefix_if_needed(
- grpc_exec_ctx *exec_ctx, const char *target);
+char* grpc_resolver_factory_add_default_prefix_if_needed(
+ grpc_exec_ctx* exec_ctx, const char* target);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index b954e1b879..427df743d6 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -52,27 +52,27 @@
#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
-#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
- ((grpc_connected_subchannel *)(gpr_atm_##barrier##_load( \
+#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \
+ ((grpc_connected_subchannel*)(gpr_atm_##barrier##_load( \
&(subchannel)->connected_subchannel)))
typedef struct {
grpc_closure closure;
- grpc_subchannel *subchannel;
+ grpc_subchannel* subchannel;
grpc_connectivity_state connectivity_state;
} state_watcher;
typedef struct external_state_watcher {
- grpc_subchannel *subchannel;
- grpc_pollset_set *pollset_set;
- grpc_closure *notify;
+ grpc_subchannel* subchannel;
+ grpc_pollset_set* pollset_set;
+ grpc_closure* notify;
grpc_closure closure;
- struct external_state_watcher *next;
- struct external_state_watcher *prev;
+ struct external_state_watcher* next;
+ struct external_state_watcher* prev;
} external_state_watcher;
struct grpc_subchannel {
- grpc_connector *connector;
+ grpc_connector* connector;
/** refcount
- lower INTERNAL_REF_BITS bits are for internal references:
@@ -82,12 +82,12 @@ struct grpc_subchannel {
gpr_atm ref_pair;
/** non-transport related channel filters */
- const grpc_channel_filter **filters;
+ const grpc_channel_filter** filters;
size_t num_filters;
/** channel arguments */
- grpc_channel_args *args;
+ grpc_channel_args* args;
- grpc_subchannel_key *key;
+ grpc_subchannel_key* key;
/** set during connection */
grpc_connect_out_args connecting_result;
@@ -100,7 +100,7 @@ struct grpc_subchannel {
/** pollset_set tracking who's interested in a connection
being setup */
- grpc_pollset_set *pollset_set;
+ grpc_pollset_set* pollset_set;
/** active connection, or null; of type grpc_connected_subchannel */
gpr_atm connected_subchannel;
@@ -130,22 +130,22 @@ struct grpc_subchannel {
};
struct grpc_subchannel_call {
- grpc_connected_subchannel *connection;
- grpc_closure *schedule_closure_after_destroy;
+ grpc_connected_subchannel* connection;
+ grpc_closure* schedule_closure_after_destroy;
};
-#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
-#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)(con))
+#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack*)((call) + 1))
+#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack*)(con))
#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
- (((grpc_subchannel_call *)(callstack)) - 1)
+ (((grpc_subchannel_call*)(callstack)) - 1)
-static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
- grpc_error *error);
+static void subchannel_connected(grpc_exec_ctx* exec_ctx, void* subchannel,
+ grpc_error* error);
#ifndef NDEBUG
#define REF_REASON reason
#define REF_MUTATE_EXTRA_ARGS \
- GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose
+ GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char* purpose
#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
#else
#define REF_REASON ""
@@ -157,21 +157,21 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
* connection implementation
*/
-static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg;
+static void connection_destroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_connected_subchannel* c = (grpc_connected_subchannel*)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
-grpc_connected_subchannel *grpc_connected_subchannel_ref(
- grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_connected_subchannel* grpc_connected_subchannel_ref(
+ grpc_connected_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
return c;
}
-void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *c
+void grpc_connected_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c),
REF_REASON);
@@ -181,10 +181,10 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
* grpc_subchannel implementation
*/
-static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_subchannel *c = (grpc_subchannel *)arg;
- gpr_free((void *)c->filters);
+static void subchannel_destroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_subchannel* c = (grpc_subchannel*)arg;
+ gpr_free((void*)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
@@ -194,7 +194,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
-static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
+static gpr_atm ref_mutate(grpc_subchannel* c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
@@ -208,8 +208,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
return old_val;
}
-grpc_subchannel *grpc_subchannel_ref(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel* grpc_subchannel_ref(
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
0 REF_MUTATE_PURPOSE("STRONG_REF"));
@@ -217,16 +217,16 @@ grpc_subchannel *grpc_subchannel_ref(
return c;
}
-grpc_subchannel *grpc_subchannel_weak_ref(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel* grpc_subchannel_weak_ref(
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
GPR_ASSERT(old_refs != 0);
return c;
}
-grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
if (!c) return NULL;
for (;;) {
gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair);
@@ -241,8 +241,8 @@ grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
}
}
-static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
- grpc_connected_subchannel *con;
+static void disconnect(grpc_exec_ctx* exec_ctx, grpc_subchannel* c) {
+ grpc_connected_subchannel* con;
grpc_subchannel_index_unregister(exec_ctx, c->key, c);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected);
@@ -258,8 +258,8 @@ static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
gpr_mu_unlock(&c->mu);
}
-void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+void grpc_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
// add a weak ref and subtract a strong ref (atomically)
old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
@@ -270,56 +270,57 @@ void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "strong-unref");
}
-void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c
+void grpc_subchannel_weak_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
}
-grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
- grpc_connector *connector,
- const grpc_subchannel_args *args) {
- grpc_subchannel_key *key = grpc_subchannel_key_create(args);
- grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key);
+grpc_subchannel* grpc_subchannel_create(grpc_exec_ctx* exec_ctx,
+ grpc_connector* connector,
+ const grpc_subchannel_args* args) {
+ grpc_subchannel_key* key = grpc_subchannel_key_create(args);
+ grpc_subchannel* c = grpc_subchannel_index_find(exec_ctx, key);
if (c) {
grpc_subchannel_key_destroy(exec_ctx, key);
return c;
}
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx);
- c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
+ c = (grpc_subchannel*)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
- c->filters = (const grpc_channel_filter **)gpr_malloc(
- sizeof(grpc_channel_filter *) * c->num_filters);
- memcpy((void *)c->filters, args->filters,
- sizeof(grpc_channel_filter *) * c->num_filters);
+ c->filters = (const grpc_channel_filter**)gpr_malloc(
+ sizeof(grpc_channel_filter*) * c->num_filters);
+ memcpy((void*)c->filters, args->filters,
+ sizeof(grpc_channel_filter*) * c->num_filters);
} else {
c->filters = NULL;
}
c->pollset_set = grpc_pollset_set_create();
- grpc_resolved_address *addr =
- (grpc_resolved_address *)gpr_malloc(sizeof(*addr));
+ grpc_resolved_address* addr =
+ (grpc_resolved_address*)gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
- grpc_resolved_address *new_address = NULL;
- grpc_channel_args *new_args = NULL;
+ grpc_resolved_address* new_address = NULL;
+ grpc_channel_args* new_args = NULL;
if (grpc_proxy_mappers_map_address(exec_ctx, addr, args->args, &new_address,
&new_args)) {
GPR_ASSERT(new_address != NULL);
gpr_free(addr);
addr = new_address;
}
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
+ static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
gpr_free(addr);
c->args = grpc_channel_args_copy_and_add_and_remove(
@@ -375,8 +376,8 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return grpc_subchannel_index_register(exec_ctx, key, c);
}
-static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c) {
+static void continue_connect_locked(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c) {
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
@@ -390,8 +391,8 @@ static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
&c->connected);
}
-grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
- grpc_error **error) {
+grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c,
+ grpc_error** error) {
grpc_connectivity_state state;
gpr_mu_lock(&c->mu);
state = grpc_connectivity_state_get(&c->state_tracker, error);
@@ -399,10 +400,10 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
return state;
}
-static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- external_state_watcher *w = (external_state_watcher *)arg;
- grpc_closure *follow_up = w->notify;
+static void on_external_state_watcher_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ external_state_watcher* w = (external_state_watcher*)arg;
+ grpc_closure* follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
w->pollset_set);
@@ -416,8 +417,8 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
}
-static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_subchannel *c = (grpc_subchannel *)arg;
+static void on_alarm(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_subchannel* c = (grpc_subchannel*)arg;
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@@ -438,8 +439,8 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_ERROR_UNREF(error);
}
-static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c) {
+static void maybe_start_connecting_locked(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c) {
if (c->disconnected) {
/* Don't try to connect if we're already disconnected */
return;
@@ -484,10 +485,10 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
}
void grpc_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *notify) {
- external_state_watcher *w;
+ grpc_exec_ctx* exec_ctx, grpc_subchannel* c,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* notify) {
+ external_state_watcher* w;
if (state == NULL) {
gpr_mu_lock(&c->mu);
@@ -500,7 +501,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
- w = (external_state_watcher *)gpr_malloc(sizeof(*w));
+ w = (external_state_watcher*)gpr_malloc(sizeof(*w));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@@ -523,18 +524,18 @@ void grpc_subchannel_notify_on_state_change(
}
void grpc_connected_subchannel_process_transport_op(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_transport_op *op) {
- grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
- grpc_channel_element *top_elem = grpc_channel_stack_element(channel_stack, 0);
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+ grpc_transport_op* op) {
+ grpc_channel_stack* channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
+ grpc_channel_element* top_elem = grpc_channel_stack_element(channel_stack, 0);
top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
}
-static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
- grpc_error *error) {
- state_watcher *sw = (state_watcher *)p;
- grpc_subchannel *c = sw->subchannel;
- gpr_mu *mu = &c->mu;
+static void subchannel_on_child_state_changed(grpc_exec_ctx* exec_ctx, void* p,
+ grpc_error* error) {
+ state_watcher* sw = (state_watcher*)p;
+ grpc_subchannel* c = sw->subchannel;
+ gpr_mu* mu = &c->mu;
gpr_mu_lock(mu);
@@ -559,13 +560,13 @@ static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
gpr_free(sw);
}
-static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *con,
- grpc_pollset_set *interested_parties,
- grpc_connectivity_state *state,
- grpc_closure *closure) {
- grpc_transport_op *op = grpc_make_transport_op(NULL);
- grpc_channel_element *elem;
+static void connected_subchannel_state_op(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* con,
+ grpc_pollset_set* interested_parties,
+ grpc_connectivity_state* state,
+ grpc_closure* closure) {
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
+ grpc_channel_element* elem;
op->connectivity_state = state;
op->on_connectivity_state_change = closure;
op->bind_pollset_set = interested_parties;
@@ -574,31 +575,31 @@ static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx,
}
void grpc_connected_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *closure) {
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* closure) {
connected_subchannel_state_op(exec_ctx, con, interested_parties, state,
closure);
}
-void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *con,
- grpc_closure *closure) {
- grpc_transport_op *op = grpc_make_transport_op(NULL);
- grpc_channel_element *elem;
+void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* con,
+ grpc_closure* closure) {
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
+ grpc_channel_element* elem;
op->send_ping = closure;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
elem->filter->start_transport_op(exec_ctx, elem, op);
}
-static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *c) {
- grpc_connected_subchannel *con;
- grpc_channel_stack *stk;
- state_watcher *sw_subchannel;
+static bool publish_transport_locked(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* c) {
+ grpc_connected_subchannel* con;
+ grpc_channel_stack* stk;
+ state_watcher* sw_subchannel;
/* construct channel stack */
- grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
grpc_channel_stack_builder_set_channel_arguments(
exec_ctx, builder, c->connecting_result.channel_args);
grpc_channel_stack_builder_set_transport(builder,
@@ -609,8 +610,8 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder_destroy(exec_ctx, builder);
return false;
}
- grpc_error *error = grpc_channel_stack_builder_finish(
- exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con);
+ grpc_error* error = grpc_channel_stack_builder_finish(
+ exec_ctx, builder, 0, 1, connection_destroy, NULL, (void**)&con);
if (error != GRPC_ERROR_NONE) {
grpc_transport_destroy(exec_ctx, c->connecting_result.transport);
gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
@@ -622,7 +623,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
- sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel));
+ sw_subchannel = (state_watcher*)gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
@@ -657,10 +658,10 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
return true;
}
-static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_subchannel *c = (grpc_subchannel *)arg;
- grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
+static void subchannel_connected(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_subchannel* c = (grpc_subchannel*)arg;
+ grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
gpr_mu_lock(&c->mu);
@@ -678,7 +679,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
- const char *errmsg = grpc_error_string(error);
+ const char* errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
maybe_start_connecting_locked(exec_ctx, c);
@@ -693,65 +694,65 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
* grpc_subchannel_call implementation
*/
-static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
- grpc_error *error) {
- grpc_subchannel_call *c = (grpc_subchannel_call *)call;
+static void subchannel_call_destroy(grpc_exec_ctx* exec_ctx, void* call,
+ grpc_error* error) {
+ grpc_subchannel_call* c = (grpc_subchannel_call*)call;
GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
- grpc_connected_subchannel *connection = c->connection;
+ grpc_connected_subchannel* connection = c->connection;
grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL,
c->schedule_closure_after_destroy);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call");
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
-void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call *call,
- grpc_closure *closure) {
+void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
+ grpc_closure* closure) {
GPR_ASSERT(call->schedule_closure_after_destroy == NULL);
GPR_ASSERT(closure != NULL);
call->schedule_closure_after_destroy = closure;
}
void grpc_subchannel_call_ref(
- grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+ grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *c
+void grpc_subchannel_call_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}
-void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call,
- grpc_transport_stream_op_batch *batch) {
+void grpc_subchannel_call_process_op(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* call,
+ grpc_transport_stream_op_batch* batch) {
GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
- grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
- grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
+ grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
+ grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
}
-grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
- grpc_subchannel *c) {
+grpc_connected_subchannel* grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel* c) {
return GET_CONNECTED_SUBCHANNEL(c, acq);
}
-const grpc_subchannel_key *grpc_subchannel_get_key(
- const grpc_subchannel *subchannel) {
+const grpc_subchannel_key* grpc_subchannel_get_key(
+ const grpc_subchannel* subchannel) {
return subchannel->key;
}
-grpc_error *grpc_connected_subchannel_create_call(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- const grpc_connected_subchannel_call_args *args,
- grpc_subchannel_call **call) {
- grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = (grpc_subchannel_call *)gpr_arena_alloc(
+grpc_error* grpc_connected_subchannel_create_call(
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+ const grpc_connected_subchannel_call_args* args,
+ grpc_subchannel_call** call) {
+ grpc_channel_stack* chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
+ *call = (grpc_subchannel_call*)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
- grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
+ grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
const grpc_call_element_args call_args = {
callstk, /* call_stack */
@@ -763,10 +764,10 @@ grpc_error *grpc_connected_subchannel_create_call(
args->arena, /* arena */
args->call_combiner /* call_combiner */
};
- grpc_error *error = grpc_call_stack_init(
+ grpc_error* error = grpc_call_stack_init(
exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
- const char *error_string = grpc_error_string(error);
+ const char* error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
return error;
}
@@ -774,39 +775,39 @@ grpc_error *grpc_connected_subchannel_create_call(
return GRPC_ERROR_NONE;
}
-grpc_call_stack *grpc_subchannel_call_get_call_stack(
- grpc_subchannel_call *subchannel_call) {
+grpc_call_stack* grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call* subchannel_call) {
return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
}
-static void grpc_uri_to_sockaddr(grpc_exec_ctx *exec_ctx, const char *uri_str,
- grpc_resolved_address *addr) {
- grpc_uri *uri = grpc_uri_parse(exec_ctx, uri_str, 0 /* suppress_errors */);
+static void grpc_uri_to_sockaddr(grpc_exec_ctx* exec_ctx, const char* uri_str,
+ grpc_resolved_address* addr) {
+ grpc_uri* uri = grpc_uri_parse(exec_ctx, uri_str, 0 /* suppress_errors */);
GPR_ASSERT(uri != NULL);
if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr));
grpc_uri_destroy(uri);
}
-void grpc_get_subchannel_address_arg(grpc_exec_ctx *exec_ctx,
- const grpc_channel_args *args,
- grpc_resolved_address *addr) {
- const char *addr_uri_str = grpc_get_subchannel_address_uri_arg(args);
+void grpc_get_subchannel_address_arg(grpc_exec_ctx* exec_ctx,
+ const grpc_channel_args* args,
+ grpc_resolved_address* addr) {
+ const char* addr_uri_str = grpc_get_subchannel_address_uri_arg(args);
memset(addr, 0, sizeof(*addr));
if (*addr_uri_str != '\0') {
grpc_uri_to_sockaddr(exec_ctx, addr_uri_str, addr);
}
}
-const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) {
- const grpc_arg *addr_arg =
+const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args) {
+ const grpc_arg* addr_arg =
grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS);
GPR_ASSERT(addr_arg != NULL); // Should have been set by LB policy.
GPR_ASSERT(addr_arg->type == GRPC_ARG_STRING);
return addr_arg->value.string;
}
-grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) {
+grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr) {
return grpc_channel_arg_string_create(
- (char *)GRPC_ARG_SUBCHANNEL_ADDRESS,
+ (char*)GRPC_ARG_SUBCHANNEL_ADDRESS,
addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
}
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 1cd73f3ff4..970f182ff0 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -79,118 +79,118 @@ typedef struct grpc_subchannel_key grpc_subchannel_key;
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
#endif
-grpc_subchannel *grpc_subchannel_ref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel *grpc_subchannel_ref_from_weak_ref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel
+grpc_subchannel* grpc_subchannel_ref(
+ grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
+ grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel *grpc_subchannel_weak_ref(
- grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel *channel
+grpc_subchannel* grpc_subchannel_weak_ref(
+ grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_weak_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel* channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_connected_subchannel *grpc_connected_subchannel_ref(
- grpc_connected_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *channel
+grpc_connected_subchannel* grpc_connected_subchannel_ref(
+ grpc_connected_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_unref(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(
- grpc_subchannel_call *call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *call
+ grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_call_unref(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a subchannel call */
typedef struct {
- grpc_polling_entity *pollent;
+ grpc_polling_entity* pollent;
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
- gpr_arena *arena;
- grpc_call_context_element *context;
- grpc_call_combiner *call_combiner;
+ gpr_arena* arena;
+ grpc_call_context_element* context;
+ grpc_call_combiner* call_combiner;
} grpc_connected_subchannel_call_args;
-grpc_error *grpc_connected_subchannel_create_call(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- const grpc_connected_subchannel_call_args *args,
- grpc_subchannel_call **subchannel_call);
+grpc_error* grpc_connected_subchannel_create_call(
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* connected_subchannel,
+ const grpc_connected_subchannel_call_args* args,
+ grpc_subchannel_call** subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel,
- grpc_transport_op *op);
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* subchannel,
+ grpc_transport_op* op);
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
- grpc_subchannel *channel, grpc_error **error);
+ grpc_subchannel* channel, grpc_error** error);
/** Calls notify when the connectivity state of a channel becomes different
from *state. Updates *state with the new state of the channel. */
void grpc_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *notify);
+ grpc_exec_ctx* exec_ctx, grpc_subchannel* channel,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* notify);
void grpc_connected_subchannel_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel,
- grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
- grpc_closure *notify);
-void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
- grpc_connected_subchannel *channel,
- grpc_closure *notify);
+ grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* channel,
+ grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+ grpc_closure* notify);
+void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
+ grpc_connected_subchannel* channel,
+ grpc_closure* notify);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */
-grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
- grpc_subchannel *subchannel);
+grpc_connected_subchannel* grpc_subchannel_get_connected_subchannel(
+ grpc_subchannel* subchannel);
/** return the subchannel index key for \a subchannel */
-const grpc_subchannel_key *grpc_subchannel_get_key(
- const grpc_subchannel *subchannel);
+const grpc_subchannel_key* grpc_subchannel_get_key(
+ const grpc_subchannel* subchannel);
/** continue processing a transport op */
-void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_call *subchannel_call,
- grpc_transport_stream_op_batch *op);
+void grpc_subchannel_call_process_op(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_call* subchannel_call,
+ grpc_transport_stream_op_batch* op);
/** Must be called once per call. Sets the 'then_schedule_closure' argument for
call stack destruction. */
void grpc_subchannel_call_set_cleanup_closure(
- grpc_subchannel_call *subchannel_call, grpc_closure *closure);
+ grpc_subchannel_call* subchannel_call, grpc_closure* closure);
-grpc_call_stack *grpc_subchannel_call_get_call_stack(
- grpc_subchannel_call *subchannel_call);
+grpc_call_stack* grpc_subchannel_call_get_call_stack(
+ grpc_subchannel_call* subchannel_call);
struct grpc_subchannel_args {
/* When updating this struct, also update subchannel_index.c */
/** Channel filters for this channel - wrapped factories will likely
want to mutate this */
- const grpc_channel_filter **filters;
+ const grpc_channel_filter** filters;
/** The number of filters in the above array */
size_t filter_count;
/** Channel arguments to be supplied to the newly created channel */
- const grpc_channel_args *args;
+ const grpc_channel_args* args;
};
/** create a subchannel given a connector */
-grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
- grpc_connector *connector,
- const grpc_subchannel_args *args);
+grpc_subchannel* grpc_subchannel_create(grpc_exec_ctx* exec_ctx,
+ grpc_connector* connector,
+ const grpc_subchannel_args* args);
/// Sets \a addr from \a args.
-void grpc_get_subchannel_address_arg(grpc_exec_ctx *exec_ctx,
- const grpc_channel_args *args,
- grpc_resolved_address *addr);
+void grpc_get_subchannel_address_arg(grpc_exec_ctx* exec_ctx,
+ const grpc_channel_args* args,
+ grpc_resolved_address* addr);
/// Returns the URI string for the address to connect to.
-const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args);
+const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args);
/// Returns a new channel arg encoding the subchannel address as a string.
/// Caller is responsible for freeing the string.
-grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr);
+grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 1f466ec0b8..0c4213cf77 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -42,15 +42,15 @@ struct grpc_subchannel_key {
static bool g_force_creation = false;
-static grpc_subchannel_key *create_key(
- const grpc_subchannel_args *args,
- grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
- grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k));
+static grpc_subchannel_key* create_key(
+ const grpc_subchannel_args* args,
+ grpc_channel_args* (*copy_channel_args)(const grpc_channel_args* args)) {
+ grpc_subchannel_key* k = (grpc_subchannel_key*)gpr_malloc(sizeof(*k));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
- k->args.filters = (const grpc_channel_filter **)gpr_malloc(
+ k->args.filters = (const grpc_channel_filter**)gpr_malloc(
sizeof(*k->args.filters) * k->args.filter_count);
- memcpy((grpc_channel_filter *)k->args.filters, args->filters,
+ memcpy((grpc_channel_filter*)k->args.filters, args->filters,
sizeof(*k->args.filters) * k->args.filter_count);
} else {
k->args.filters = NULL;
@@ -59,17 +59,17 @@ static grpc_subchannel_key *create_key(
return k;
}
-grpc_subchannel_key *grpc_subchannel_key_create(
- const grpc_subchannel_args *args) {
+grpc_subchannel_key* grpc_subchannel_key_create(
+ const grpc_subchannel_args* args) {
return create_key(args, grpc_channel_args_normalize);
}
-static grpc_subchannel_key *subchannel_key_copy(grpc_subchannel_key *k) {
+static grpc_subchannel_key* subchannel_key_copy(grpc_subchannel_key* k) {
return create_key(&k->args, grpc_channel_args_copy);
}
-int grpc_subchannel_key_compare(const grpc_subchannel_key *a,
- const grpc_subchannel_key *b) {
+int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
+ const grpc_subchannel_key* b) {
if (g_force_creation) return false;
int c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
@@ -81,35 +81,34 @@ int grpc_subchannel_key_compare(const grpc_subchannel_key *a,
return grpc_channel_args_compare(a->args.args, b->args.args);
}
-void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *k) {
- gpr_free((grpc_channel_args *)k->args.filters);
- grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)k->args.args);
+void grpc_subchannel_key_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* k) {
+ gpr_free((grpc_channel_args*)k->args.filters);
+ grpc_channel_args_destroy(exec_ctx, (grpc_channel_args*)k->args.args);
gpr_free(k);
}
-static void sck_avl_destroy(void *p, void *user_data) {
- grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key *)p);
+static void sck_avl_destroy(void* p, void* user_data) {
+ grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
+ grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key*)p);
}
-static void *sck_avl_copy(void *p, void *unused) {
- return subchannel_key_copy((grpc_subchannel_key *)p);
+static void* sck_avl_copy(void* p, void* unused) {
+ return subchannel_key_copy((grpc_subchannel_key*)p);
}
-static long sck_avl_compare(void *a, void *b, void *unused) {
- return grpc_subchannel_key_compare((grpc_subchannel_key *)a,
- (grpc_subchannel_key *)b);
+static long sck_avl_compare(void* a, void* b, void* unused) {
+ return grpc_subchannel_key_compare((grpc_subchannel_key*)a,
+ (grpc_subchannel_key*)b);
}
-static void scv_avl_destroy(void *p, void *user_data) {
- grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data;
- GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
- "subchannel_index");
+static void scv_avl_destroy(void* p, void* user_data) {
+ grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
+ GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel*)p, "subchannel_index");
}
-static void *scv_avl_copy(void *p, void *unused) {
- GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel *)p, "subchannel_index");
+static void* scv_avl_copy(void* p, void* unused) {
+ GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel*)p, "subchannel_index");
return p;
}
@@ -145,25 +144,25 @@ void grpc_subchannel_index_unref(void) {
void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); }
-grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key) {
+grpc_subchannel* grpc_subchannel_index_find(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key) {
// Lock, and take a reference to the subchannel index.
// We don't need to do the search under a lock as avl's are immutable.
gpr_mu_lock(&g_mu);
gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx);
gpr_mu_unlock(&g_mu);
- grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
- (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
+ grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
+ (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;
}
-grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed) {
- grpc_subchannel *c = NULL;
+grpc_subchannel* grpc_subchannel_index_register(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed) {
+ grpc_subchannel* c = NULL;
bool need_to_unref_constructed;
while (c == NULL) {
@@ -176,7 +175,7 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
- c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
+ c = (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx);
if (c != NULL) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@@ -211,9 +210,9 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
return c;
}
-void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed) {
+void grpc_subchannel_index_unregister(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed) {
bool done = false;
while (!done) {
// Compare and swap loop:
@@ -224,7 +223,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
// Check to see if this key still refers to the previously
// registered subchannel
- grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
+ grpc_subchannel* c = (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx);
if (c != constructed) {
gpr_avl_unref(index, exec_ctx);
break;
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index 05c3878379..47f9c7bb1e 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -29,34 +29,34 @@ extern "C" {
shared amongst channels */
/** Create a key that can be used to uniquely identify a subchannel */
-grpc_subchannel_key *grpc_subchannel_key_create(
- const grpc_subchannel_args *args);
+grpc_subchannel_key* grpc_subchannel_key_create(
+ const grpc_subchannel_args* args);
/** Destroy a subchannel key */
-void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key);
+void grpc_subchannel_key_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key);
/** Given a subchannel key, find the subchannel registered for it.
Returns NULL if no such channel exists.
Thread-safe. */
-grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key);
+grpc_subchannel* grpc_subchannel_index_find(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key);
/** Register a subchannel against a key.
Takes ownership of \a constructed.
Returns the registered subchannel. This may be different from
\a constructed in the case of a registration race. */
-grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed);
+grpc_subchannel* grpc_subchannel_index_register(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed);
/** Remove \a constructed as the registered subchannel for \a key. */
-void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
- grpc_subchannel_key *key,
- grpc_subchannel *constructed);
+void grpc_subchannel_index_unregister(grpc_exec_ctx* exec_ctx,
+ grpc_subchannel_key* key,
+ grpc_subchannel* constructed);
-int grpc_subchannel_key_compare(const grpc_subchannel_key *a,
- const grpc_subchannel_key *b);
+int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
+ const grpc_subchannel_key* b);
/** Initialize the subchannel index (global) */
void grpc_subchannel_index_init(void);
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index fb4fb8e694..917e65342b 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -34,9 +34,9 @@
/** a size_t default value... maps to all 1's */
#define NOT_SET (~(size_t)0)
-static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
+static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section,
bool suppress_errors) {
- char *line_prefix;
+ char* line_prefix;
size_t pfx_len;
if (!suppress_errors) {
@@ -45,7 +45,7 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
- line_prefix = (char *)gpr_malloc(pfx_len + 1);
+ line_prefix = (char*)gpr_malloc(pfx_len + 1);
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@@ -56,13 +56,13 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
}
/** Returns a copy of percent decoded \a src[begin, end) */
-static char *decode_and_copy_component(grpc_exec_ctx *exec_ctx, const char *src,
+static char* decode_and_copy_component(grpc_exec_ctx* exec_ctx, const char* src,
size_t begin, size_t end) {
grpc_slice component =
grpc_slice_from_copied_buffer(src + begin, end - begin);
grpc_slice decoded_component =
grpc_permissive_percent_decode_slice(component);
- char *out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
+ char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
grpc_slice_unref_internal(exec_ctx, component);
grpc_slice_unref_internal(exec_ctx, decoded_component);
return out;
@@ -76,7 +76,7 @@ static bool valid_hex(char c) {
/** Returns how many chars to advance if \a uri_text[i] begins a valid \a pchar
* production. If \a uri_text[i] introduces an invalid \a pchar (such as percent
* sign not followed by two hex digits), NOT_SET is returned. */
-static size_t parse_pchar(const char *uri_text, size_t i) {
+static size_t parse_pchar(const char* uri_text, size_t i) {
/* pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
* unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
* pct-encoded = "%" HEXDIG HEXDIG
@@ -118,7 +118,7 @@ static size_t parse_pchar(const char *uri_text, size_t i) {
}
/* *( pchar / "?" / "/" ) */
-static int parse_fragment_or_query(const char *uri_text, size_t *i) {
+static int parse_fragment_or_query(const char* uri_text, size_t* i) {
char c;
while ((c = uri_text[*i]) != 0) {
const size_t advance = parse_pchar(uri_text, *i); /* pchar */
@@ -143,9 +143,9 @@ static int parse_fragment_or_query(const char *uri_text, size_t *i) {
return 1;
}
-static void parse_query_parts(grpc_uri *uri) {
- static const char *QUERY_PARTS_SEPARATOR = "&";
- static const char *QUERY_PARTS_VALUE_SEPARATOR = "=";
+static void parse_query_parts(grpc_uri* uri) {
+ static const char* QUERY_PARTS_SEPARATOR = "&";
+ static const char* QUERY_PARTS_VALUE_SEPARATOR = "=";
GPR_ASSERT(uri->query != NULL);
if (uri->query[0] == '\0') {
uri->query_parts = NULL;
@@ -157,11 +157,11 @@ static void parse_query_parts(grpc_uri *uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
uri->query_parts_values =
- (char **)gpr_malloc(uri->num_query_parts * sizeof(char **));
+ (char**)gpr_malloc(uri->num_query_parts * sizeof(char**));
for (size_t i = 0; i < uri->num_query_parts; i++) {
- char **query_param_parts;
+ char** query_param_parts;
size_t num_query_param_parts;
- char *full = uri->query_parts[i];
+ char* full = uri->query_parts[i];
gpr_string_split(full, QUERY_PARTS_VALUE_SEPARATOR, &query_param_parts,
&num_query_param_parts);
GPR_ASSERT(num_query_param_parts > 0);
@@ -182,9 +182,9 @@ static void parse_query_parts(grpc_uri *uri) {
}
}
-grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
+grpc_uri* grpc_uri_parse(grpc_exec_ctx* exec_ctx, const char* uri_text,
bool suppress_errors) {
- grpc_uri *uri;
+ grpc_uri* uri;
size_t scheme_begin = 0;
size_t scheme_end = NOT_SET;
size_t authority_begin = NOT_SET;
@@ -270,7 +270,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
fragment_end = i;
}
- uri = (grpc_uri *)gpr_zalloc(sizeof(*uri));
+ uri = (grpc_uri*)gpr_zalloc(sizeof(*uri));
uri->scheme =
decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
uri->authority = decode_and_copy_component(exec_ctx, uri_text,
@@ -286,7 +286,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
return uri;
}
-const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key) {
+const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key) {
GPR_ASSERT(key != NULL);
if (key[0] == '\0') return NULL;
@@ -298,7 +298,7 @@ const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key) {
return NULL;
}
-void grpc_uri_destroy(grpc_uri *uri) {
+void grpc_uri_destroy(grpc_uri* uri) {
if (!uri) return;
gpr_free(uri->scheme);
gpr_free(uri->authority);
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
index e78da5928b..cd877ade8d 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/ext/filters/client_channel/uri_parser.h
@@ -27,29 +27,29 @@ extern "C" {
#endif
typedef struct {
- char *scheme;
- char *authority;
- char *path;
- char *query;
+ char* scheme;
+ char* authority;
+ char* path;
+ char* query;
/** Query substrings separated by '&' */
- char **query_parts;
+ char** query_parts;
/** Number of elements in \a query_parts and \a query_parts_values */
size_t num_query_parts;
/** Split each query part by '='. NULL if not present. */
- char **query_parts_values;
- char *fragment;
+ char** query_parts_values;
+ char* fragment;
} grpc_uri;
/** parse a uri, return NULL on failure */
-grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
+grpc_uri* grpc_uri_parse(grpc_exec_ctx* exec_ctx, const char* uri_text,
bool suppress_errors);
/** return the part of a query string after the '=' in "?key=xxx&...", or NULL
* if key is not present */
-const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key);
+const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key);
/** destroy a uri */
-void grpc_uri_destroy(grpc_uri *uri);
+void grpc_uri_destroy(grpc_uri* uri);
#ifdef __cplusplus
}
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index 6208089f2e..590bd22b1e 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -36,7 +36,7 @@
static const size_t kMaxPayloadSizeForGet = 2048;
typedef struct call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
// State for handling send_initial_metadata ops.
grpc_linked_mdelem method;
grpc_linked_mdelem scheme;
@@ -45,20 +45,20 @@ typedef struct call_data {
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
// State for handling recv_initial_metadata ops.
- grpc_metadata_batch *recv_initial_metadata;
- grpc_closure *original_recv_initial_metadata_ready;
+ grpc_metadata_batch* recv_initial_metadata;
+ grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
// State for handling recv_trailing_metadata ops.
- grpc_metadata_batch *recv_trailing_metadata;
- grpc_closure *original_recv_trailing_metadata_on_complete;
+ grpc_metadata_batch* recv_trailing_metadata;
+ grpc_closure* original_recv_trailing_metadata_on_complete;
grpc_closure recv_trailing_metadata_on_complete;
// State for handling send_message ops.
- grpc_transport_stream_op_batch *send_message_batch;
+ grpc_transport_stream_op_batch* send_message_batch;
size_t send_message_bytes_read;
grpc_byte_stream_cache send_message_cache;
grpc_caching_byte_stream send_message_caching_stream;
grpc_closure on_send_message_next_done;
- grpc_closure *original_send_message_on_complete;
+ grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
} call_data;
@@ -68,18 +68,18 @@ typedef struct channel_data {
size_t max_payload_size_for_get;
} channel_data;
-static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_metadata_batch *b) {
+static grpc_error* client_filter_incoming_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_metadata_batch* b) {
if (b->idx.named.status != NULL) {
if (grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) {
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.status);
} else {
- char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
+ char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
GPR_DUMP_ASCII);
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Received http2 header with status: %s", val);
- grpc_error *e = grpc_error_set_str(
+ grpc_error* e = grpc_error_set_str(
grpc_error_set_int(
grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -125,7 +125,7 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
- char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
+ char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "Unexpected content-type '%s'", val);
gpr_free(val);
@@ -137,10 +137,10 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
- void *user_data, grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
+ void* user_data, grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -151,11 +151,11 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
error);
}
-static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void recv_trailing_metadata_on_complete(grpc_exec_ctx* exec_ctx,
+ void* user_data,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_trailing_metadata);
@@ -166,10 +166,10 @@ static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
error);
}
-static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void send_message_on_complete(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
grpc_byte_stream_cache_destroy(exec_ctx, &calld->send_message_cache);
GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete,
GRPC_ERROR_REF(error));
@@ -177,10 +177,10 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
// Pulls a slice from the send_message byte stream, updating
// calld->send_message_bytes_read.
-static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
- call_data *calld) {
+static grpc_error* pull_slice_from_send_message(grpc_exec_ctx* exec_ctx,
+ call_data* calld) {
grpc_slice incoming_slice;
- grpc_error *error = grpc_byte_stream_pull(
+ grpc_error* error = grpc_byte_stream_pull(
exec_ctx, &calld->send_message_caching_stream.base, &incoming_slice);
if (error == GRPC_ERROR_NONE) {
calld->send_message_bytes_read += GRPC_SLICE_LENGTH(incoming_slice);
@@ -194,12 +194,12 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// calld->send_message_caching_stream.base.length, then we have completed
// reading from the byte stream; otherwise, an async read has been dispatched
// and on_send_message_next_done() will be invoked when it is complete.
-static grpc_error *read_all_available_send_message_data(grpc_exec_ctx *exec_ctx,
- call_data *calld) {
+static grpc_error* read_all_available_send_message_data(grpc_exec_ctx* exec_ctx,
+ call_data* calld) {
while (grpc_byte_stream_next(exec_ctx,
&calld->send_message_caching_stream.base,
~(size_t)0, &calld->on_send_message_next_done)) {
- grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
+ grpc_error* error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) return error;
if (calld->send_message_bytes_read ==
calld->send_message_caching_stream.base.length) {
@@ -210,10 +210,10 @@ static grpc_error *read_all_available_send_message_data(grpc_exec_ctx *exec_ctx,
}
// Async callback for grpc_byte_stream_next().
-static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_send_message_next_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, error, calld->call_combiner);
@@ -233,8 +233,8 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
}
-static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
- char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1);
+static char* slice_buffer_to_string(grpc_slice_buffer* slice_buffer) {
+ char* payload_bytes = (char*)gpr_malloc(slice_buffer->length + 1);
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@@ -248,10 +248,10 @@ static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
// Modifies the path entry in the batch's send_initial_metadata to
// append the base64-encoded query for a GET request.
-static grpc_error *update_path_for_get(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* update_path_for_get(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice path_slice =
GRPC_MDVALUE(batch->payload->send_initial_metadata.send_initial_metadata
->idx.named.path->md);
@@ -264,33 +264,33 @@ static grpc_error *update_path_for_get(grpc_exec_ctx *exec_ctx,
false /* multi_line */);
grpc_slice path_with_query_slice = GRPC_SLICE_MALLOC(estimated_len);
/* memcopy individual pieces into this slice */
- char *write_ptr = (char *)GRPC_SLICE_START_PTR(path_with_query_slice);
- char *original_path = (char *)GRPC_SLICE_START_PTR(path_slice);
+ char* write_ptr = (char*)GRPC_SLICE_START_PTR(path_with_query_slice);
+ char* original_path = (char*)GRPC_SLICE_START_PTR(path_slice);
memcpy(write_ptr, original_path, GRPC_SLICE_LENGTH(path_slice));
write_ptr += GRPC_SLICE_LENGTH(path_slice);
*write_ptr++ = '?';
- char *payload_bytes =
+ char* payload_bytes =
slice_buffer_to_string(&calld->send_message_cache.cache_buffer);
- grpc_base64_encode_core((char *)write_ptr, payload_bytes,
+ grpc_base64_encode_core((char*)write_ptr, payload_bytes,
batch->payload->send_message.send_message->length,
true /* url_safe */, false /* multi_line */);
gpr_free(payload_bytes);
/* remove trailing unused memory and add trailing 0 to terminate string */
- char *t = (char *)GRPC_SLICE_START_PTR(path_with_query_slice);
+ char* t = (char*)GRPC_SLICE_START_PTR(path_with_query_slice);
/* safe to use strlen since base64_encode will always add '\0' */
path_with_query_slice =
grpc_slice_sub_no_ref(path_with_query_slice, 0, strlen(t));
/* substitute previous path with the new path+query */
grpc_mdelem mdelem_path_and_query =
grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, path_with_query_slice);
- grpc_metadata_batch *b =
+ grpc_metadata_batch* b =
batch->payload->send_initial_metadata.send_initial_metadata;
return grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
mdelem_path_and_query);
}
-static void remove_if_present(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
+static void remove_if_present(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
grpc_metadata_batch_callouts_index idx) {
if (batch->idx.array[idx] != NULL) {
grpc_metadata_batch_remove(exec_ctx, batch, batch->idx.array[idx]);
@@ -298,10 +298,10 @@ static void remove_if_present(grpc_exec_ctx *exec_ctx,
}
static void hc_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *channeld = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* channeld = (channel_data*)elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
if (batch->recv_initial_metadata) {
@@ -322,7 +322,7 @@ static void hc_start_transport_stream_op_batch(
batch->on_complete = &calld->recv_trailing_metadata_on_complete;
}
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
bool batch_will_be_handled_asynchronously = false;
if (batch->send_initial_metadata) {
// Decide which HTTP VERB to use. We use GET if the request is marked
@@ -422,10 +422,10 @@ done:
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
@@ -441,11 +441,11 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {}
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {}
-static grpc_mdelem scheme_from_args(const grpc_channel_args *args) {
+static grpc_mdelem scheme_from_args(const grpc_channel_args* args) {
unsigned i;
size_t j;
grpc_mdelem valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP,
@@ -466,7 +466,7 @@ static grpc_mdelem scheme_from_args(const grpc_channel_args *args) {
return GRPC_MDELEM_SCHEME_HTTP;
}
-static size_t max_payload_size_from_args(const grpc_channel_args *args) {
+static size_t max_payload_size_from_args(const grpc_channel_args* args) {
if (args != NULL) {
for (size_t i = 0; i < args->num_args; ++i) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET)) {
@@ -482,12 +482,12 @@ static size_t max_payload_size_from_args(const grpc_channel_args *args) {
return kMaxPayloadSizeForGet;
}
-static grpc_slice user_agent_from_args(const grpc_channel_args *args,
- const char *transport_name) {
+static grpc_slice user_agent_from_args(const grpc_channel_args* args,
+ const char* transport_name) {
gpr_strvec v;
size_t i;
int is_first = 1;
- char *tmp;
+ char* tmp;
grpc_slice result;
gpr_strvec_init(&v);
@@ -533,10 +533,10 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args,
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
@@ -550,9 +550,9 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
}
diff --git a/src/core/ext/filters/http/http_filters_plugin.cc b/src/core/ext/filters/http/http_filters_plugin.cc
index 8f5b856317..064e66e323 100644
--- a/src/core/ext/filters/http/http_filters_plugin.cc
+++ b/src/core/ext/filters/http/http_filters_plugin.cc
@@ -27,25 +27,25 @@
#include "src/core/lib/transport/transport_impl.h"
typedef struct {
- const grpc_channel_filter *filter;
- const char *control_channel_arg;
+ const grpc_channel_filter* filter;
+ const char* control_channel_arg;
} optional_filter;
static optional_filter compress_filter = {
&grpc_message_compress_filter, GRPC_ARG_ENABLE_PER_MESSAGE_COMPRESSION};
static bool is_building_http_like_transport(
- grpc_channel_stack_builder *builder) {
- grpc_transport *t = grpc_channel_stack_builder_get_transport(builder);
+ grpc_channel_stack_builder* builder) {
+ grpc_transport* t = grpc_channel_stack_builder_get_transport(builder);
return t != NULL && strstr(t->vtable->name, "http");
}
-static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
+static bool maybe_add_optional_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg) {
if (!is_building_http_like_transport(builder)) return true;
- optional_filter *filtarg = (optional_filter *)arg;
- const grpc_channel_args *channel_args =
+ optional_filter* filtarg = (optional_filter*)arg;
+ const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(
grpc_channel_args_find(channel_args, filtarg->control_channel_arg),
@@ -55,12 +55,12 @@ static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
: true;
}
-static bool maybe_add_required_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg) {
+static bool maybe_add_required_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg) {
return is_building_http_like_transport(builder)
? grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL)
+ builder, (const grpc_channel_filter*)arg, NULL, NULL)
: true;
}
@@ -77,13 +77,13 @@ extern "C" void grpc_http_filters_init(void) {
maybe_add_optional_filter, &compress_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- maybe_add_required_filter, (void *)&grpc_http_client_filter);
+ maybe_add_required_filter, (void*)&grpc_http_client_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- maybe_add_required_filter, (void *)&grpc_http_client_filter);
+ maybe_add_required_filter, (void*)&grpc_http_client_filter);
grpc_channel_init_register_stage(
GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- maybe_add_required_filter, (void *)&grpc_http_server_filter);
+ maybe_add_required_filter, (void*)&grpc_http_server_filter);
}
extern "C" void grpc_http_filters_shutdown(void) {}
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index f785e1355d..949ff917d6 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -45,7 +45,7 @@ typedef enum {
} initial_metadata_state;
typedef struct call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
grpc_linked_mdelem compression_algorithm_storage;
grpc_linked_mdelem stream_compression_algorithm_storage;
grpc_linked_mdelem accept_encoding_storage;
@@ -54,12 +54,12 @@ typedef struct call_data {
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
initial_metadata_state send_initial_metadata_state;
- grpc_error *cancel_error;
+ grpc_error* cancel_error;
grpc_closure start_send_message_batch_in_call_combiner;
- grpc_transport_stream_op_batch *send_message_batch;
+ grpc_transport_stream_op_batch* send_message_batch;
grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_slice_buffer_stream replacement_stream;
- grpc_closure *original_send_message_on_complete;
+ grpc_closure* original_send_message_on_complete;
grpc_closure send_message_on_complete;
grpc_closure on_send_message_next_done;
} call_data;
@@ -80,10 +80,10 @@ typedef struct channel_data {
uint32_t supported_stream_compression_algorithms;
} channel_data;
-static bool skip_compression(grpc_call_element *elem, uint32_t flags,
+static bool skip_compression(grpc_call_element* elem, uint32_t flags,
bool has_compression_algorithm) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *channeld = (channel_data *)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* channeld = (channel_data*)elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true;
@@ -99,15 +99,15 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags,
}
/** Filter initial metadata */
-static grpc_error *process_send_initial_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata,
- bool *has_compression_algorithm) GRPC_MUST_USE_RESULT;
-static grpc_error *process_send_initial_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *channeld = (channel_data *)elem->channel_data;
+static grpc_error* process_send_initial_metadata(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_metadata_batch* initial_metadata,
+ bool* has_compression_algorithm) GRPC_MUST_USE_RESULT;
+static grpc_error* process_send_initial_metadata(
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_metadata_batch* initial_metadata, bool* has_compression_algorithm) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* channeld = (channel_data*)elem->channel_data;
*has_compression_algorithm = false;
grpc_stream_compression_algorithm stream_compression_algorithm =
GRPC_STREAM_COMPRESS_NONE;
@@ -117,7 +117,7 @@ static grpc_error *process_send_initial_metadata(
initial_metadata->idx.named.grpc_internal_stream_encoding_request->md;
if (!grpc_stream_compression_algorithm_parse(
GRPC_MDVALUE(md), &stream_compression_algorithm)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid stream compression algorithm: '%s' (unknown). Ignoring.",
val);
@@ -126,7 +126,7 @@ static grpc_error *process_send_initial_metadata(
}
if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset,
stream_compression_algorithm)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(
GPR_ERROR,
"Invalid stream compression algorithm: '%s' (previously disabled). "
@@ -152,7 +152,7 @@ static grpc_error *process_send_initial_metadata(
initial_metadata->idx.named.grpc_internal_encoding_request->md;
if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md),
&calld->compression_algorithm)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid compression algorithm: '%s' (unknown). Ignoring.", val);
gpr_free(val);
@@ -177,7 +177,7 @@ static grpc_error *process_send_initial_metadata(
*has_compression_algorithm = true;
}
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
/* hint compression algorithm */
if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) {
error = grpc_metadata_batch_add_tail(
@@ -211,30 +211,30 @@ static grpc_error *process_send_initial_metadata(
return error;
}
-static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void send_message_on_complete(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &calld->slices);
GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete,
GRPC_ERROR_REF(error));
}
-static void send_message_batch_continue(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+static void send_message_batch_continue(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
// Note: The call to grpc_call_next_op() results in yielding the
// call combiner, so we need to clear calld->send_message_batch
// before we do that.
- grpc_transport_stream_op_batch *send_message_batch =
+ grpc_transport_stream_op_batch* send_message_batch =
calld->send_message_batch;
calld->send_message_batch = NULL;
grpc_call_next_op(exec_ctx, elem, send_message_batch);
}
-static void finish_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+static void finish_send_message(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
// Compress the data if appropriate.
grpc_slice_buffer tmp;
grpc_slice_buffer_init(&tmp);
@@ -244,21 +244,22 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
&calld->slices, &tmp);
if (did_compress) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- const char *algo_name;
+ const char* algo_name;
const size_t before_size = calld->slices.length;
const size_t after_size = tmp.length;
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
- gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
- " bytes (%.2f%% savings)",
+ gpr_log(GPR_DEBUG,
+ "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
+ " bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}
grpc_slice_buffer_swap(&calld->slices, &tmp);
send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
} else {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- const char *algo_name;
+ const char* algo_name;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG,
@@ -282,10 +283,10 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
send_message_batch_continue(exec_ctx, elem);
}
-static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_error *error) {
- call_data *calld = (call_data *)arg;
+static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_error* error) {
+ call_data* calld = (call_data*)arg;
if (calld->send_message_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
@@ -295,10 +296,10 @@ static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
}
// Pulls a slice from the send_message byte stream and adds it to calld->slices.
-static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
- call_data *calld) {
+static grpc_error* pull_slice_from_send_message(grpc_exec_ctx* exec_ctx,
+ call_data* calld) {
grpc_slice incoming_slice;
- grpc_error *error = grpc_byte_stream_pull(
+ grpc_error* error = grpc_byte_stream_pull(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
&incoming_slice);
if (error == GRPC_ERROR_NONE) {
@@ -311,13 +312,13 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx,
// If all data has been read, invokes finish_send_message(). Otherwise,
// an async call to grpc_byte_stream_next() has been started, which will
// eventually result in calling on_send_message_next_done().
-static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+static void continue_reading_send_message(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
while (grpc_byte_stream_next(
exec_ctx, calld->send_message_batch->payload->send_message.send_message,
~(size_t)0, &calld->on_send_message_next_done)) {
- grpc_error *error = pull_slice_from_send_message(exec_ctx, calld);
+ grpc_error* error = pull_slice_from_send_message(exec_ctx, calld);
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
@@ -333,10 +334,10 @@ static void continue_reading_send_message(grpc_exec_ctx *exec_ctx,
}
// Async callback for grpc_byte_stream_next().
-static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_send_message_next_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
// Closure callback; does not take ownership of error.
fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
@@ -357,10 +358,10 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *unused) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void start_send_message_batch(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* unused) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (skip_compression(
elem,
calld->send_message_batch->payload->send_message.send_message->flags,
@@ -372,9 +373,9 @@ static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
}
static void compress_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
// Handle cancel_stream.
if (batch->cancel_stream) {
@@ -405,7 +406,7 @@ static void compress_start_transport_stream_op_batch(
if (batch->send_initial_metadata) {
GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
bool has_compression_algorithm;
- grpc_error *error = process_send_initial_metadata(
+ grpc_error* error = process_send_initial_metadata(
exec_ctx, elem,
batch->payload->send_initial_metadata.send_initial_metadata,
&has_compression_algorithm);
@@ -453,10 +454,10 @@ done:
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->call_combiner = args->call_combiner;
calld->cancel_error = GRPC_ERROR_NONE;
grpc_slice_buffer_init(&calld->slices);
@@ -470,19 +471,19 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
GRPC_ERROR_UNREF(calld->cancel_error);
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *channeld = (channel_data *)elem->channel_data;
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* channeld = (channel_data*)elem->channel_data;
/* Configuration for message compression */
channeld->enabled_algorithms_bitset =
@@ -530,8 +531,8 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
const grpc_channel_filter grpc_message_compress_filter = {
compress_start_transport_stream_op_batch,
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.h b/src/core/ext/filters/http/message_compress/message_compress_filter.h
index 92771d9858..79a2815655 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.h
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.h
@@ -56,4 +56,4 @@ extern const grpc_channel_filter grpc_message_compress_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H \
- */
+ */
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 03958136b4..5cfe5acced 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -32,7 +32,7 @@
#define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
typedef struct call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
@@ -42,15 +42,15 @@ typedef struct call_data {
/* flag to ensure payload_bin is delivered only once */
bool payload_bin_delivered;
- grpc_metadata_batch *recv_initial_metadata;
- uint32_t *recv_initial_metadata_flags;
+ grpc_metadata_batch* recv_initial_metadata;
+ uint32_t* recv_initial_metadata_flags;
/** Closure to call when finished with the hs_on_recv hook */
- grpc_closure *on_done_recv;
+ grpc_closure* on_done_recv;
/** Closure to call when we retrieve read message from the path URI
*/
- grpc_closure *recv_message_ready;
- grpc_closure *on_complete;
- grpc_byte_stream **pp_recv_message;
+ grpc_closure* recv_message_ready;
+ grpc_closure* on_complete;
+ grpc_byte_stream** pp_recv_message;
grpc_slice_buffer read_slice_buffer;
grpc_slice_buffer_stream read_stream;
@@ -62,11 +62,13 @@ typedef struct call_data {
grpc_closure hs_recv_message_ready;
} call_data;
-typedef struct channel_data { uint8_t unused; } channel_data;
+typedef struct channel_data {
+ uint8_t unused;
+} channel_data;
-static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_metadata_batch *b) {
+static grpc_error* server_filter_outgoing_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_metadata_batch* b) {
if (b->idx.named.grpc_message != NULL) {
grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
GRPC_MDVALUE(b->idx.named.grpc_message->md),
@@ -82,8 +84,8 @@ static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void add_error(const char *error_name, grpc_error **cumulative,
- grpc_error *new_err) {
+static void add_error(const char* error_name, grpc_error** cumulative,
+ grpc_error* new_err) {
if (new_err == GRPC_ERROR_NONE) return;
if (*cumulative == GRPC_ERROR_NONE) {
*cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name);
@@ -91,12 +93,12 @@ static void add_error(const char *error_name, grpc_error **cumulative,
*cumulative = grpc_error_add_child(*cumulative, new_err);
}
-static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_metadata_batch *b) {
- call_data *calld = (call_data *)elem->call_data;
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *error_name = "Failed processing incoming headers";
+static grpc_error* server_filter_incoming_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_metadata_batch* b) {
+ call_data* calld = (call_data*)elem->call_data;
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* error_name = "Failed processing incoming headers";
if (b->idx.named.method != NULL) {
if (grpc_mdelem_eq(b->idx.named.method->md, GRPC_MDELEM_METHOD_POST)) {
@@ -183,7 +185,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
} else {
/* TODO(klempner): We're currently allowing this, but we shouldn't
see it without a proxy so log for now. */
- char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
+ char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md),
GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "Unexpected content-type '%s'", val);
gpr_free(val);
@@ -203,7 +205,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
* query parameter which is base64 encoded request payload. */
const char k_query_separator = '?';
grpc_slice path_slice = GRPC_MDVALUE(b->idx.named.path->md);
- uint8_t *path_ptr = (uint8_t *)GRPC_SLICE_START_PTR(path_slice);
+ uint8_t* path_ptr = (uint8_t*)GRPC_SLICE_START_PTR(path_slice);
size_t path_length = GRPC_SLICE_LENGTH(path_slice);
/* offset of the character '?' */
size_t offset = 0;
@@ -226,7 +228,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_add(
&calld->read_slice_buffer,
grpc_base64_decode_with_len(
- exec_ctx, (const char *)GRPC_SLICE_START_PTR(query_slice),
+ exec_ctx, (const char*)GRPC_SLICE_START_PTR(query_slice),
GRPC_SLICE_LENGTH(query_slice), k_url_safe));
grpc_slice_buffer_stream_init(&calld->read_stream,
&calld->read_slice_buffer, 0);
@@ -238,15 +240,15 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
}
if (b->idx.named.host != NULL && b->idx.named.authority == NULL) {
- grpc_linked_mdelem *el = b->idx.named.host;
+ grpc_linked_mdelem* el = b->idx.named.host;
grpc_mdelem md = GRPC_MDELEM_REF(el->md);
grpc_metadata_batch_remove(exec_ctx, b, el);
- add_error(
- error_name, &error,
- grpc_metadata_batch_add_head(
- exec_ctx, b, el, grpc_mdelem_from_slices(
- exec_ctx, GRPC_MDSTR_AUTHORITY,
- grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
+ add_error(error_name, &error,
+ grpc_metadata_batch_add_head(
+ exec_ctx, b, el,
+ grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_AUTHORITY,
+ grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
GRPC_MDELEM_UNREF(exec_ctx, md);
}
@@ -261,10 +263,10 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
return error;
}
-static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void hs_on_recv(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@@ -274,15 +276,15 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv, err);
}
-static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void hs_on_complete(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
- : (grpc_byte_stream *)&calld->read_stream;
+ : (grpc_byte_stream*)&calld->read_stream;
// Re-enter call combiner for recv_message_ready, since the surface
// code will release the call combiner for each callback it receives.
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
@@ -294,10 +296,10 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_CLOSURE_RUN(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
}
-static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void hs_recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (calld->seen_path_with_query) {
// Do nothing. This is probably a GET request, and payload will be
// returned in hs_on_complete callback.
@@ -310,15 +312,15 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
}
}
-static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+static grpc_error* hs_mutate_op(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
/* grab pointers to our data from the call element */
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
if (op->send_initial_metadata) {
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *error_name = "Failed sending initial metadata";
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* error_name = "Failed sending initial metadata";
add_error(
error_name, &error,
grpc_metadata_batch_add_head(
@@ -364,7 +366,7 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
}
if (op->send_trailing_metadata) {
- grpc_error *error = server_filter_outgoing_metadata(
+ grpc_error* error = server_filter_outgoing_metadata(
exec_ctx, elem,
op->payload->send_trailing_metadata.send_trailing_metadata);
if (error != GRPC_ERROR_NONE) return error;
@@ -374,11 +376,11 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
}
static void hs_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
- grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
+ grpc_error* error = hs_mutate_op(exec_ctx, elem, op);
if (error != GRPC_ERROR_NONE) {
grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
calld->call_combiner);
@@ -389,11 +391,11 @@ static void hs_start_transport_stream_op_batch(
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
/* grab pointers to our data from the call element */
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
/* initialize members */
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
@@ -407,24 +409,24 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_stream_op_batch,
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index ca8a3b2a13..77b086c324 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -43,21 +43,21 @@ typedef struct call_data {
/* stores the recv_initial_metadata op's ready closure, which we wrap with our
* own (on_initial_md_ready) in order to capture the incoming initial metadata
* */
- grpc_closure *ops_recv_initial_metadata_ready;
+ grpc_closure* ops_recv_initial_metadata_ready;
/* to get notified of the availability of the incoming initial metadata. */
grpc_closure on_initial_md_ready;
- grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata;
} call_data;
typedef struct channel_data {
intptr_t id; /**< an id unique to the channel */
} channel_data;
-static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *err) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+static void on_initial_md_ready(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* err) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != NULL) {
@@ -85,10 +85,10 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@@ -108,10 +108,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@@ -136,12 +136,12 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = (channel_data *)elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@@ -158,8 +158,8 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
grpc_load_reporting_call_data lr_call_data = {
@@ -173,11 +173,11 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
*/
}
-static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
- void *user_data,
+static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx* exec_ctx,
+ void* user_data,
grpc_mdelem md) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@@ -186,10 +186,10 @@ static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
}
static void lr_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.h b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
index 94d19cc249..356f8b8e66 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
@@ -33,4 +33,4 @@ extern const grpc_channel_filter grpc_server_load_reporting_filter;
#endif
#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H \
- */
+ */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
index 223fb3ee8b..b26cbe3579 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
@@ -32,17 +32,17 @@
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel_init.h"
-static bool is_load_reporting_enabled(const grpc_channel_args *a) {
+static bool is_load_reporting_enabled(const grpc_channel_args* a) {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(a, GRPC_ARG_ENABLE_LOAD_REPORTING), false);
}
static bool maybe_add_server_load_reporting_filter(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
- const grpc_channel_args *args =
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
- const grpc_channel_filter *filter = (const grpc_channel_filter *)arg;
- grpc_channel_stack_builder_iterator *it =
+ const grpc_channel_filter* filter = (const grpc_channel_filter*)arg;
+ grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_iterator_find(builder, filter->name);
const bool already_has_load_reporting_filter =
!grpc_channel_stack_builder_iterator_is_end(it);
@@ -55,7 +55,7 @@ static bool maybe_add_server_load_reporting_filter(
}
grpc_arg grpc_load_reporting_enable_arg() {
- return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING,
+ return grpc_channel_arg_integer_create((char*)GRPC_ARG_ENABLE_LOAD_REPORTING,
1);
}
@@ -64,7 +64,7 @@ grpc_arg grpc_load_reporting_enable_arg() {
extern "C" void grpc_server_load_reporting_plugin_init(void) {
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
maybe_add_server_load_reporting_filter,
- (void *)&grpc_server_load_reporting_filter);
+ (void*)&grpc_server_load_reporting_filter);
}
extern "C" void grpc_server_load_reporting_plugin_shutdown() {}
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
index 65e254eb53..a6448ce97e 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
@@ -49,11 +49,11 @@ typedef struct grpc_load_reporting_call_data {
/** Only valid when \a source is \a GRPC_LR_POINT_CALL_DESTRUCTION, that is,
* once the call has completed */
- const grpc_call_final_info *final_info;
+ const grpc_call_final_info* final_info;
- const char *initial_md_string; /**< value string for LR's initial md key */
- const char *trailing_md_string; /**< value string for LR's trailing md key */
- const char *method_name; /**< Corresponds to :path header */
+ const char* initial_md_string; /**< value string for LR's initial md key */
+ const char* trailing_md_string; /**< value string for LR's trailing md key */
+ const char* method_name; /**< Corresponds to :path header */
} grpc_load_reporting_call_data;
/** Return a \a grpc_arg enabling load reporting */
@@ -64,4 +64,4 @@ grpc_arg grpc_load_reporting_enable_arg();
#endif
#endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H \
- */
+ */
diff --git a/src/core/ext/filters/workarounds/workaround_utils.cc b/src/core/ext/filters/workarounds/workaround_utils.cc
index e600fbee67..9db42fbeee 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.cc
+++ b/src/core/ext/filters/workarounds/workaround_utils.cc
@@ -21,26 +21,26 @@
user_agent_parser ua_parser[GRPC_MAX_WORKAROUND_ID];
-static void destroy_user_agent_md(void *user_agent_md) {
+static void destroy_user_agent_md(void* user_agent_md) {
gpr_free(user_agent_md);
}
-grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md) {
- grpc_workaround_user_agent_md *user_agent_md =
- (grpc_workaround_user_agent_md *)grpc_mdelem_get_user_data(
+grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md) {
+ grpc_workaround_user_agent_md* user_agent_md =
+ (grpc_workaround_user_agent_md*)grpc_mdelem_get_user_data(
md, destroy_user_agent_md);
if (NULL != user_agent_md) {
return user_agent_md;
}
- user_agent_md = (grpc_workaround_user_agent_md *)gpr_malloc(
+ user_agent_md = (grpc_workaround_user_agent_md*)gpr_malloc(
sizeof(grpc_workaround_user_agent_md));
for (int i = 0; i < GRPC_MAX_WORKAROUND_ID; i++) {
if (ua_parser[i]) {
user_agent_md->workaround_active[i] = ua_parser[i](md);
}
}
- grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void *)user_agent_md);
+ grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void*)user_agent_md);
return user_agent_md;
}
diff --git a/src/core/ext/filters/workarounds/workaround_utils.h b/src/core/ext/filters/workarounds/workaround_utils.h
index 3913cae6b2..a954ad4001 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.h
+++ b/src/core/ext/filters/workarounds/workaround_utils.h
@@ -32,7 +32,7 @@ typedef struct grpc_workaround_user_agent_md {
bool workaround_active[GRPC_MAX_WORKAROUND_ID];
} grpc_workaround_user_agent_md;
-grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md);
+grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md);
typedef bool (*user_agent_parser)(grpc_mdelem);
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.cc b/src/core/ext/transport/chttp2/alpn/alpn.cc
index ca2e801ec8..89892457d6 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.cc
+++ b/src/core/ext/transport/chttp2/alpn/alpn.cc
@@ -21,9 +21,9 @@
#include <grpc/support/useful.h>
/* in order of preference */
-static const char *const supported_versions[] = {"grpc-exp", "h2"};
+static const char* const supported_versions[] = {"grpc-exp", "h2"};
-int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size) {
+int grpc_chttp2_is_alpn_version_supported(const char* version, size_t size) {
size_t i;
for (i = 0; i < GPR_ARRAY_SIZE(supported_versions); i++) {
if (!strncmp(version, supported_versions[i], size)) return 1;
@@ -35,7 +35,7 @@ size_t grpc_chttp2_num_alpn_versions(void) {
return GPR_ARRAY_SIZE(supported_versions);
}
-const char *grpc_chttp2_get_alpn_version_index(size_t i) {
+const char* grpc_chttp2_get_alpn_version_index(size_t i) {
GPR_ASSERT(i < GPR_ARRAY_SIZE(supported_versions));
return supported_versions[i];
}
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.h b/src/core/ext/transport/chttp2/alpn/alpn.h
index 99b928ea59..4a420e83e0 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.h
+++ b/src/core/ext/transport/chttp2/alpn/alpn.h
@@ -26,14 +26,14 @@ extern "C" {
#endif
/* Retuns 1 if the version is supported, 0 otherwise. */
-int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size);
+int grpc_chttp2_is_alpn_version_supported(const char* version, size_t size);
/* Returns the number of protocol versions to advertise */
size_t grpc_chttp2_num_alpn_versions(void);
/* Returns the protocol version at index i (0 <= i <
* grpc_chttp2_num_alpn_versions()) */
-const char *grpc_chttp2_get_alpn_version_index(size_t i);
+const char* grpc_chttp2_get_alpn_version_index(size_t i);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.cc b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
index 74839f2156..6cd476f4ca 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.cc
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
@@ -45,25 +45,25 @@ typedef struct {
bool shutdown;
bool connecting;
- grpc_closure *notify;
+ grpc_closure* notify;
grpc_connect_in_args args;
- grpc_connect_out_args *result;
+ grpc_connect_out_args* result;
- grpc_endpoint *endpoint; // Non-NULL until handshaking starts.
+ grpc_endpoint* endpoint; // Non-NULL until handshaking starts.
grpc_closure connected;
- grpc_handshake_manager *handshake_mgr;
+ grpc_handshake_manager* handshake_mgr;
} chttp2_connector;
-static void chttp2_connector_ref(grpc_connector *con) {
- chttp2_connector *c = (chttp2_connector *)con;
+static void chttp2_connector_ref(grpc_connector* con) {
+ chttp2_connector* c = (chttp2_connector*)con;
gpr_ref(&c->refs);
}
-static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx,
- grpc_connector *con) {
- chttp2_connector *c = (chttp2_connector *)con;
+static void chttp2_connector_unref(grpc_exec_ctx* exec_ctx,
+ grpc_connector* con) {
+ chttp2_connector* c = (chttp2_connector*)con;
if (gpr_unref(&c->refs)) {
gpr_mu_destroy(&c->mu);
// If handshaking is not yet in progress, destroy the endpoint.
@@ -73,9 +73,9 @@ static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx,
}
}
-static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_connector *con, grpc_error *why) {
- chttp2_connector *c = (chttp2_connector *)con;
+static void chttp2_connector_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_connector* con, grpc_error* why) {
+ chttp2_connector* c = (chttp2_connector*)con;
gpr_mu_lock(&c->mu);
c->shutdown = true;
if (c->handshake_mgr != NULL) {
@@ -91,10 +91,10 @@ static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(why);
}
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
- chttp2_connector *c = (chttp2_connector *)args->user_data;
+static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_handshaker_args* args = (grpc_handshaker_args*)arg;
+ chttp2_connector* c = (chttp2_connector*)args->user_data;
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
@@ -124,17 +124,17 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
args->read_buffer);
c->result->channel_args = args->args;
}
- grpc_closure *notify = c->notify;
+ grpc_closure* notify = c->notify;
c->notify = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
c->handshake_mgr = NULL;
gpr_mu_unlock(&c->mu);
- chttp2_connector_unref(exec_ctx, (grpc_connector *)c);
+ chttp2_connector_unref(exec_ctx, (grpc_connector*)c);
}
-static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
- chttp2_connector *c) {
+static void start_handshake_locked(grpc_exec_ctx* exec_ctx,
+ chttp2_connector* c) {
c->handshake_mgr = grpc_handshake_manager_create();
grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, c->args.channel_args,
c->handshake_mgr);
@@ -146,8 +146,8 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
c->endpoint = NULL; // Endpoint handed off to handshake manager.
}
-static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- chttp2_connector *c = (chttp2_connector *)arg;
+static void connected(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ chttp2_connector* c = (chttp2_connector*)arg;
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting);
c->connecting = false;
@@ -158,14 +158,14 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
error = GRPC_ERROR_REF(error);
}
memset(c->result, 0, sizeof(*c->result));
- grpc_closure *notify = c->notify;
+ grpc_closure* notify = c->notify;
c->notify = NULL;
GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
if (c->endpoint != NULL) {
grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
}
gpr_mu_unlock(&c->mu);
- chttp2_connector_unref(exec_ctx, (grpc_connector *)arg);
+ chttp2_connector_unref(exec_ctx, (grpc_connector*)arg);
} else {
GPR_ASSERT(c->endpoint != NULL);
start_handshake_locked(exec_ctx, c);
@@ -173,12 +173,12 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
}
-static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx,
- grpc_connector *con,
- const grpc_connect_in_args *args,
- grpc_connect_out_args *result,
- grpc_closure *notify) {
- chttp2_connector *c = (chttp2_connector *)con;
+static void chttp2_connector_connect(grpc_exec_ctx* exec_ctx,
+ grpc_connector* con,
+ const grpc_connect_in_args* args,
+ grpc_connect_out_args* result,
+ grpc_closure* notify) {
+ chttp2_connector* c = (chttp2_connector*)con;
grpc_resolved_address addr;
grpc_get_subchannel_address_arg(exec_ctx, args->channel_args, &addr);
gpr_mu_lock(&c->mu);
@@ -201,8 +201,8 @@ static const grpc_connector_vtable chttp2_connector_vtable = {
chttp2_connector_ref, chttp2_connector_unref, chttp2_connector_shutdown,
chttp2_connector_connect};
-grpc_connector *grpc_chttp2_connector_create() {
- chttp2_connector *c = (chttp2_connector *)gpr_zalloc(sizeof(*c));
+grpc_connector* grpc_chttp2_connector_create() {
+ chttp2_connector* c = (chttp2_connector*)gpr_zalloc(sizeof(*c));
c->base.vtable = &chttp2_connector_vtable;
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.cc b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
index 6410a6043d..26c7f0debf 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
@@ -31,37 +31,37 @@
#include "src/core/lib/surface/channel.h"
static void client_channel_factory_ref(
- grpc_client_channel_factory *cc_factory) {}
+ grpc_client_channel_factory* cc_factory) {}
static void client_channel_factory_unref(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {}
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory) {}
-static grpc_subchannel *client_channel_factory_create_subchannel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- const grpc_subchannel_args *args) {
- grpc_connector *connector = grpc_chttp2_connector_create();
- grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args);
+static grpc_subchannel* client_channel_factory_create_subchannel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
+ const grpc_subchannel_args* args) {
+ grpc_connector* connector = grpc_chttp2_connector_create();
+ grpc_subchannel* s = grpc_subchannel_create(exec_ctx, connector, args);
grpc_connector_unref(exec_ctx, connector);
return s;
}
-static grpc_channel *client_channel_factory_create_channel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- const char *target, grpc_client_channel_type type,
- const grpc_channel_args *args) {
+static grpc_channel* client_channel_factory_create_channel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
+ const char* target, grpc_client_channel_type type,
+ const grpc_channel_args* args) {
if (target == NULL) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return NULL;
}
// Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_SERVER_URI,
+ (char*)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
- const char *to_remove[] = {GRPC_ARG_SERVER_URI};
- grpc_channel_args *new_args =
+ const char* to_remove[] = {GRPC_ARG_SERVER_URI};
+ grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
gpr_free(arg.value.string);
- grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args,
+ grpc_channel* channel = grpc_channel_create(exec_ctx, target, new_args,
GRPC_CLIENT_CHANNEL, NULL);
grpc_channel_args_destroy(exec_ctx, new_args);
return channel;
@@ -79,9 +79,9 @@ static grpc_client_channel_factory client_channel_factory = {
Asynchronously: - resolve target
- connect to it (trying alternatives as presented)
- perform handshakes */
-grpc_channel *grpc_insecure_channel_create(const char *target,
- const grpc_channel_args *args,
- void *reserved) {
+grpc_channel* grpc_insecure_channel_create(const char* target,
+ const grpc_channel_args* args,
+ void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE(
"grpc_insecure_channel_create(target=%s, args=%p, reserved=%p)", 3,
@@ -90,15 +90,16 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
// Add channel arg containing the client channel factory.
grpc_arg arg =
grpc_client_channel_factory_create_channel_arg(&client_channel_factory);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
// Create channel.
- grpc_channel *channel = client_channel_factory_create_channel(
+ grpc_channel* channel = client_channel_factory_create_channel(
&exec_ctx, &client_channel_factory, target,
GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
// Clean up.
grpc_channel_args_destroy(&exec_ctx, new_args);
grpc_exec_ctx_finish(&exec_ctx);
- return channel != NULL ? channel : grpc_lame_client_channel_create(
- target, GRPC_STATUS_INTERNAL,
- "Failed to create client channel");
+ return channel != NULL ? channel
+ : grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INTERNAL,
+ "Failed to create client channel");
}
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
index dd88136f7b..0974a7c393 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
@@ -35,42 +35,43 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/transport.h"
-grpc_channel *grpc_insecure_channel_create_from_fd(
- const char *target, int fd, const grpc_channel_args *args) {
+grpc_channel* grpc_insecure_channel_create_from_fd(
+ const char* target, int fd, const grpc_channel_args* args) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE("grpc_insecure_channel_create(target=%p, fd=%d, args=%p)", 3,
(target, fd, args));
grpc_arg default_authority_arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority");
- grpc_channel_args *final_args =
+ (char*)GRPC_ARG_DEFAULT_AUTHORITY, (char*)"test.authority");
+ grpc_channel_args* final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
int flags = fcntl(fd, F_GETFL, 0);
GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
- grpc_endpoint *client = grpc_tcp_client_create_from_fd(
+ grpc_endpoint* client = grpc_tcp_client_create_from_fd(
&exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");
- grpc_transport *transport =
+ grpc_transport* transport =
grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);
GPR_ASSERT(transport);
- grpc_channel *channel = grpc_channel_create(
+ grpc_channel* channel = grpc_channel_create(
&exec_ctx, target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
grpc_channel_args_destroy(&exec_ctx, final_args);
grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);
grpc_exec_ctx_finish(&exec_ctx);
- return channel != NULL ? channel : grpc_lame_client_channel_create(
- target, GRPC_STATUS_INTERNAL,
- "Failed to create client channel");
+ return channel != NULL ? channel
+ : grpc_lame_client_channel_create(
+ target, GRPC_STATUS_INTERNAL,
+ "Failed to create client channel");
}
#else // !GPR_SUPPORT_CHANNELS_FROM_FD
-grpc_channel *grpc_insecure_channel_create_from_fd(
- const char *target, int fd, const grpc_channel_args *args) {
+grpc_channel* grpc_insecure_channel_create_from_fd(
+ const char* target, int fd, const grpc_channel_args* args) {
GPR_ASSERT(0);
return NULL;
}
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
index fe296cf4ff..68c1e1868c 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
@@ -38,14 +38,14 @@
#include "src/core/lib/surface/channel.h"
static void client_channel_factory_ref(
- grpc_client_channel_factory *cc_factory) {}
+ grpc_client_channel_factory* cc_factory) {}
static void client_channel_factory_unref(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {}
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory) {}
-static grpc_subchannel_args *get_secure_naming_subchannel_args(
- grpc_exec_ctx *exec_ctx, const grpc_subchannel_args *args) {
- grpc_channel_credentials *channel_credentials =
+static grpc_subchannel_args* get_secure_naming_subchannel_args(
+ grpc_exec_ctx* exec_ctx, const grpc_subchannel_args* args) {
+ grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args->args);
if (channel_credentials == NULL) {
gpr_log(GPR_ERROR,
@@ -61,33 +61,33 @@ static grpc_subchannel_args *get_secure_naming_subchannel_args(
return NULL;
}
// To which address are we connecting? By default, use the server URI.
- const grpc_arg *server_uri_arg =
+ const grpc_arg* server_uri_arg =
grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
GPR_ASSERT(server_uri_arg != NULL);
GPR_ASSERT(server_uri_arg->type == GRPC_ARG_STRING);
- const char *server_uri_str = server_uri_arg->value.string;
+ const char* server_uri_str = server_uri_arg->value.string;
GPR_ASSERT(server_uri_str != NULL);
- grpc_uri *server_uri =
+ grpc_uri* server_uri =
grpc_uri_parse(exec_ctx, server_uri_str, true /* supress errors */);
GPR_ASSERT(server_uri != NULL);
- const char *server_uri_path;
+ const char* server_uri_path;
server_uri_path =
server_uri->path[0] == '/' ? server_uri->path + 1 : server_uri->path;
- const grpc_slice_hash_table *targets_info =
+ const grpc_slice_hash_table* targets_info =
grpc_lb_targets_info_find_in_args(args->args);
- char *target_name_to_check = NULL;
+ char* target_name_to_check = NULL;
if (targets_info != NULL) { // LB channel
// Find the balancer name for the target.
- const char *target_uri_str =
+ const char* target_uri_str =
grpc_get_subchannel_address_uri_arg(args->args);
- grpc_uri *target_uri =
+ grpc_uri* target_uri =
grpc_uri_parse(exec_ctx, target_uri_str, false /* suppress errors */);
GPR_ASSERT(target_uri != NULL);
if (target_uri->path[0] != '\0') { // "path" may be empty
const grpc_slice key = grpc_slice_from_static_string(
target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path);
- const char *value =
- (const char *)grpc_slice_hash_table_get(targets_info, key);
+ const char* value =
+ (const char*)grpc_slice_hash_table_get(targets_info, key);
if (value != NULL) target_name_to_check = gpr_strdup(value);
grpc_slice_unref_internal(exec_ctx, key);
}
@@ -102,9 +102,9 @@ static grpc_subchannel_args *get_secure_naming_subchannel_args(
}
grpc_uri_destroy(server_uri);
GPR_ASSERT(target_name_to_check != NULL);
- grpc_channel_security_connector *subchannel_security_connector = NULL;
+ grpc_channel_security_connector* subchannel_security_connector = NULL;
// Create the security connector using the credentials and target name.
- grpc_channel_args *new_args_from_connector = NULL;
+ grpc_channel_args* new_args_from_connector = NULL;
const grpc_security_status security_status =
grpc_channel_credentials_create_security_connector(
exec_ctx, channel_credentials, target_name_to_check, args->args,
@@ -120,7 +120,7 @@ static grpc_subchannel_args *get_secure_naming_subchannel_args(
grpc_arg new_security_connector_arg =
grpc_security_connector_to_arg(&subchannel_security_connector->base);
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
new_args_from_connector != NULL ? new_args_from_connector : args->args,
&new_security_connector_arg, 1);
GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &subchannel_security_connector->base,
@@ -128,17 +128,17 @@ static grpc_subchannel_args *get_secure_naming_subchannel_args(
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(exec_ctx, new_args_from_connector);
}
- grpc_subchannel_args *final_sc_args =
- (grpc_subchannel_args *)gpr_malloc(sizeof(*final_sc_args));
+ grpc_subchannel_args* final_sc_args =
+ (grpc_subchannel_args*)gpr_malloc(sizeof(*final_sc_args));
memcpy(final_sc_args, args, sizeof(*args));
final_sc_args->args = new_args;
return final_sc_args;
}
-static grpc_subchannel *client_channel_factory_create_subchannel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- const grpc_subchannel_args *args) {
- grpc_subchannel_args *subchannel_args =
+static grpc_subchannel* client_channel_factory_create_subchannel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
+ const grpc_subchannel_args* args) {
+ grpc_subchannel_args* subchannel_args =
get_secure_naming_subchannel_args(exec_ctx, args);
if (subchannel_args == NULL) {
gpr_log(
@@ -146,33 +146,33 @@ static grpc_subchannel *client_channel_factory_create_subchannel(
"Failed to create subchannel arguments during subchannel creation.");
return NULL;
}
- grpc_connector *connector = grpc_chttp2_connector_create();
- grpc_subchannel *s =
+ grpc_connector* connector = grpc_chttp2_connector_create();
+ grpc_subchannel* s =
grpc_subchannel_create(exec_ctx, connector, subchannel_args);
grpc_connector_unref(exec_ctx, connector);
grpc_channel_args_destroy(exec_ctx,
- (grpc_channel_args *)subchannel_args->args);
+ (grpc_channel_args*)subchannel_args->args);
gpr_free(subchannel_args);
return s;
}
-static grpc_channel *client_channel_factory_create_channel(
- grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory,
- const char *target, grpc_client_channel_type type,
- const grpc_channel_args *args) {
+static grpc_channel* client_channel_factory_create_channel(
+ grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
+ const char* target, grpc_client_channel_type type,
+ const grpc_channel_args* args) {
if (target == NULL) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return NULL;
}
// Add channel arg containing the server URI.
grpc_arg arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_SERVER_URI,
+ (char*)GRPC_ARG_SERVER_URI,
grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
- const char *to_remove[] = {GRPC_ARG_SERVER_URI};
- grpc_channel_args *new_args =
+ const char* to_remove[] = {GRPC_ARG_SERVER_URI};
+ grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
gpr_free(arg.value.string);
- grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args,
+ grpc_channel* channel = grpc_channel_create(exec_ctx, target, new_args,
GRPC_CLIENT_CHANNEL, NULL);
grpc_channel_args_destroy(exec_ctx, new_args);
return channel;
@@ -190,24 +190,24 @@ static grpc_client_channel_factory client_channel_factory = {
// Asynchronously: - resolve target
// - connect to it (trying alternatives as presented)
// - perform handshakes
-grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
- const char *target,
- const grpc_channel_args *args,
- void *reserved) {
+grpc_channel* grpc_secure_channel_create(grpc_channel_credentials* creds,
+ const char* target,
+ const grpc_channel_args* args,
+ void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE(
"grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
"reserved=%p)",
- 4, ((void *)creds, target, (void *)args, (void *)reserved));
+ 4, ((void*)creds, target, (void*)args, (void*)reserved));
GPR_ASSERT(reserved == NULL);
- grpc_channel *channel = NULL;
+ grpc_channel* channel = NULL;
if (creds != NULL) {
// Add channel args containing the client channel factory and channel
// credentials.
grpc_arg args_to_add[] = {
grpc_client_channel_factory_create_channel_arg(&client_channel_factory),
grpc_channel_credentials_to_arg(creds)};
- grpc_channel_args *new_args = grpc_channel_args_copy_and_add(
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
// Create channel.
channel = client_channel_factory_create_channel(
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.cc b/src/core/ext/transport/chttp2/server/chttp2_server.cc
index 7ac7f4ece8..98683acc59 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.cc
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.cc
@@ -42,31 +42,31 @@
#include "src/core/lib/surface/server.h"
typedef struct {
- grpc_server *server;
- grpc_tcp_server *tcp_server;
- grpc_channel_args *args;
+ grpc_server* server;
+ grpc_tcp_server* tcp_server;
+ grpc_channel_args* args;
gpr_mu mu;
bool shutdown;
grpc_closure tcp_server_shutdown_complete;
- grpc_closure *server_destroy_listener_done;
- grpc_handshake_manager *pending_handshake_mgrs;
+ grpc_closure* server_destroy_listener_done;
+ grpc_handshake_manager* pending_handshake_mgrs;
} server_state;
typedef struct {
- server_state *svr_state;
- grpc_pollset *accepting_pollset;
- grpc_tcp_server_acceptor *acceptor;
- grpc_handshake_manager *handshake_mgr;
+ server_state* svr_state;
+ grpc_pollset* accepting_pollset;
+ grpc_tcp_server_acceptor* acceptor;
+ grpc_handshake_manager* handshake_mgr;
} server_connection_state;
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
- server_connection_state *connection_state =
- (server_connection_state *)args->user_data;
+static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_handshaker_args* args = (grpc_handshaker_args*)arg;
+ server_connection_state* connection_state =
+ (server_connection_state*)args->user_data;
gpr_mu_lock(&connection_state->svr_state->mu);
if (error != GRPC_ERROR_NONE || connection_state->svr_state->shutdown) {
- const char *error_str = grpc_error_string(error);
+ const char* error_str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str);
if (error == GRPC_ERROR_NONE && args->endpoint != NULL) {
@@ -87,7 +87,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
// handshaker may have handed off the connection to some external
// code, so we can just clean up here without creating a transport.
if (args->endpoint != NULL) {
- grpc_transport *transport =
+ grpc_transport* transport =
grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0);
grpc_server_setup_transport(
exec_ctx, connection_state->svr_state->server, transport,
@@ -107,10 +107,10 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(connection_state);
}
-static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
- grpc_pollset *accepting_pollset,
- grpc_tcp_server_acceptor *acceptor) {
- server_state *state = (server_state *)arg;
+static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* tcp,
+ grpc_pollset* accepting_pollset,
+ grpc_tcp_server_acceptor* acceptor) {
+ server_state* state = (server_state*)arg;
gpr_mu_lock(&state->mu);
if (state->shutdown) {
gpr_mu_unlock(&state->mu);
@@ -119,13 +119,13 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
gpr_free(acceptor);
return;
}
- grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create();
+ grpc_handshake_manager* handshake_mgr = grpc_handshake_manager_create();
grpc_handshake_manager_pending_list_add(&state->pending_handshake_mgrs,
handshake_mgr);
gpr_mu_unlock(&state->mu);
grpc_tcp_server_ref(state->tcp_server);
- server_connection_state *connection_state =
- (server_connection_state *)gpr_malloc(sizeof(*connection_state));
+ server_connection_state* connection_state =
+ (server_connection_state*)gpr_malloc(sizeof(*connection_state));
connection_state->svr_state = state;
connection_state->accepting_pollset = accepting_pollset;
connection_state->acceptor = acceptor;
@@ -142,10 +142,10 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
}
/* Server callback: start listening on our ports */
-static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
- void *arg, grpc_pollset **pollsets,
+static void server_start_listener(grpc_exec_ctx* exec_ctx, grpc_server* server,
+ void* arg, grpc_pollset** pollsets,
size_t pollset_count) {
- server_state *state = (server_state *)arg;
+ server_state* state = (server_state*)arg;
gpr_mu_lock(&state->mu);
state->shutdown = false;
gpr_mu_unlock(&state->mu);
@@ -153,12 +153,12 @@ static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
on_accept, state);
}
-static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- server_state *state = (server_state *)arg;
+static void tcp_server_shutdown_complete(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ server_state* state = (server_state*)arg;
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
- grpc_closure *destroy_done = state->server_destroy_listener_done;
+ grpc_closure* destroy_done = state->server_destroy_listener_done;
GPR_ASSERT(state->shutdown);
grpc_handshake_manager_pending_list_shutdown_all(
exec_ctx, state->pending_handshake_mgrs, GRPC_ERROR_REF(error));
@@ -177,31 +177,31 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
/* Server callback: destroy the tcp listener (so we don't generate further
callbacks) */
-static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
- grpc_server *server, void *arg,
- grpc_closure *destroy_done) {
- server_state *state = (server_state *)arg;
+static void server_destroy_listener(grpc_exec_ctx* exec_ctx,
+ grpc_server* server, void* arg,
+ grpc_closure* destroy_done) {
+ server_state* state = (server_state*)arg;
gpr_mu_lock(&state->mu);
state->shutdown = true;
state->server_destroy_listener_done = destroy_done;
- grpc_tcp_server *tcp_server = state->tcp_server;
+ grpc_tcp_server* tcp_server = state->tcp_server;
gpr_mu_unlock(&state->mu);
grpc_tcp_server_shutdown_listeners(exec_ctx, tcp_server);
grpc_tcp_server_unref(exec_ctx, tcp_server);
}
-grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
- grpc_server *server, const char *addr,
- grpc_channel_args *args,
- int *port_num) {
- grpc_resolved_addresses *resolved = NULL;
- grpc_tcp_server *tcp_server = NULL;
+grpc_error* grpc_chttp2_server_add_port(grpc_exec_ctx* exec_ctx,
+ grpc_server* server, const char* addr,
+ grpc_channel_args* args,
+ int* port_num) {
+ grpc_resolved_addresses* resolved = NULL;
+ grpc_tcp_server* tcp_server = NULL;
size_t i;
size_t count = 0;
int port_temp;
- grpc_error *err = GRPC_ERROR_NONE;
- server_state *state = NULL;
- grpc_error **errors = NULL;
+ grpc_error* err = GRPC_ERROR_NONE;
+ server_state* state = NULL;
+ grpc_error** errors = NULL;
size_t naddrs = 0;
*port_num = -1;
@@ -211,7 +211,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
if (err != GRPC_ERROR_NONE) {
goto error;
}
- state = (server_state *)gpr_zalloc(sizeof(*state));
+ state = (server_state*)gpr_zalloc(sizeof(*state));
GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
@@ -228,7 +228,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&state->mu);
naddrs = resolved->naddrs;
- errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
+ errors = (grpc_error**)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
errors[i] =
grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp);
@@ -242,21 +242,22 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
}
}
if (count == 0) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
naddrs);
err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);
goto error;
} else if (count != naddrs) {
- char *msg;
- gpr_asprintf(&msg, "Only %" PRIuPTR
- " addresses added out of total %" PRIuPTR " resolved",
+ char* msg;
+ gpr_asprintf(&msg,
+ "Only %" PRIuPTR " addresses added out of total %" PRIuPTR
+ " resolved",
count, naddrs);
err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);
- const char *warning_message = grpc_error_string(err);
+ const char* warning_message = grpc_error_string(err);
gpr_log(GPR_INFO, "WARNING: %s", warning_message);
/* we managed to bind some addresses: continue */
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.h b/src/core/ext/transport/chttp2/server/chttp2_server.h
index 2ac155160f..4e0e7aa617 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.h
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.h
@@ -29,9 +29,9 @@ extern "C" {
/// Adds a port to \a server. Sets \a port_num to the port number.
/// Takes ownership of \a args.
-grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
- grpc_server *server, const char *addr,
- grpc_channel_args *args, int *port_num);
+grpc_error* grpc_chttp2_server_add_port(grpc_exec_ctx* exec_ctx,
+ grpc_server* server, const char* addr,
+ grpc_channel_args* args, int* port_num);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
index d42b2d123e..8984896538 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
@@ -25,16 +25,16 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/server.h"
-int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
+int grpc_server_add_insecure_http2_port(grpc_server* server, const char* addr) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
int port_num = 0;
GRPC_API_TRACE("grpc_server_add_insecure_http2_port(server=%p, addr=%s)", 2,
(server, addr));
- grpc_error *err = grpc_chttp2_server_add_port(
+ grpc_error* err = grpc_chttp2_server_add_port(
&exec_ctx, server, addr,
grpc_channel_args_copy(grpc_server_get_channel_args(server)), &port_num);
if (err != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(err);
+ const char* msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
GRPC_ERROR_UNREF(err);
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
index e647067f73..e37d69e5e9 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
@@ -34,25 +34,25 @@
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
-void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
- void *reserved, int fd) {
+void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
+ void* reserved, int fd) {
GPR_ASSERT(reserved == NULL);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- char *name;
+ char* name;
gpr_asprintf(&name, "fd:%d", fd);
- grpc_endpoint *server_endpoint =
+ grpc_endpoint* server_endpoint =
grpc_tcp_create(&exec_ctx, grpc_fd_create(fd, name),
grpc_server_get_channel_args(server), name);
gpr_free(name);
- const grpc_channel_args *server_args = grpc_server_get_channel_args(server);
- grpc_transport *transport = grpc_create_chttp2_transport(
+ const grpc_channel_args* server_args = grpc_server_get_channel_args(server);
+ grpc_transport* transport = grpc_create_chttp2_transport(
&exec_ctx, server_args, server_endpoint, 0 /* is_client */);
- grpc_pollset **pollsets;
+ grpc_pollset** pollsets;
size_t num_pollsets = 0;
grpc_server_get_pollsets(server, &pollsets, &num_pollsets);
@@ -67,8 +67,8 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
#else // !GPR_SUPPORT_CHANNELS_FROM_FD
-void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
- void *reserved, int fd) {
+void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
+ void* reserved, int fd) {
GPR_ASSERT(0);
}
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
index e74a138d23..4b2e348780 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
@@ -34,14 +34,14 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/server.h"
-int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
- grpc_server_credentials *creds) {
+int grpc_server_add_secure_http2_port(grpc_server* server, const char* addr,
+ grpc_server_credentials* creds) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *err = GRPC_ERROR_NONE;
- grpc_server_security_connector *sc = NULL;
+ grpc_error* err = GRPC_ERROR_NONE;
+ grpc_server_security_connector* sc = NULL;
int port_num = 0;
grpc_security_status status;
- grpc_channel_args *args = NULL;
+ grpc_channel_args* args = NULL;
GRPC_API_TRACE(
"grpc_server_add_secure_http2_port("
"server=%p, addr=%s, creds=%p)",
@@ -55,7 +55,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
status =
grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc);
if (status != GRPC_SECURITY_OK) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg,
"Unable to create secure server with credentials of type %s.",
creds->type);
@@ -79,7 +79,7 @@ done:
}
grpc_exec_ctx_finish(&exec_ctx);
if (err != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(err);
+ const char* msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
GRPC_ERROR_UNREF(err);
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.cc b/src/core/ext/transport/chttp2/transport/bin_decoder.cc
index 5a99cbeffc..3ccae7afc3 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.cc
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.cc
@@ -49,7 +49,7 @@ static uint8_t decode_table[] = {
static const uint8_t tail_xtra[4] = {0, 0, 1, 2};
-static bool input_is_valid(uint8_t *input_ptr, size_t length) {
+static bool input_is_valid(uint8_t* input_ptr, size_t length) {
size_t i;
for (i = 0; i < length; ++i) {
@@ -75,7 +75,7 @@ static bool input_is_valid(uint8_t *input_ptr, size_t length) {
#define COMPOSE_OUTPUT_BYTE_2(input_ptr) \
(uint8_t)((decode_table[input_ptr[2]] << 6) | decode_table[input_ptr[3]])
-bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) {
+bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx) {
size_t input_tail;
if (ctx->input_cur > ctx->input_end || ctx->output_cur > ctx->output_end) {
@@ -130,7 +130,7 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) {
return true;
}
-grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx,
+grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx* exec_ctx,
grpc_slice input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t output_length = input_length / 4 * 3;
@@ -147,7 +147,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx,
}
if (input_length > 0) {
- uint8_t *input_end = GRPC_SLICE_END_PTR(input);
+ uint8_t* input_end = GRPC_SLICE_END_PTR(input);
if (*(--input_end) == '=') {
output_length--;
if (*(--input_end) == '=') {
@@ -164,7 +164,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx,
ctx.contains_tail = false;
if (!grpc_base64_decode_partial(&ctx)) {
- char *s = grpc_slice_to_c_string(input);
+ char* s = grpc_slice_to_c_string(input);
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
gpr_free(s);
grpc_slice_unref_internal(exec_ctx, output);
@@ -175,7 +175,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx,
return output;
}
-grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
+grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx* exec_ctx,
grpc_slice input,
size_t output_length) {
size_t input_length = GRPC_SLICE_LENGTH(input);
@@ -210,7 +210,7 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
ctx.contains_tail = true;
if (!grpc_base64_decode_partial(&ctx)) {
- char *s = grpc_slice_to_c_string(input);
+ char* s = grpc_slice_to_c_string(input);
gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
gpr_free(s);
grpc_slice_unref_internal(exec_ctx, output);
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.h b/src/core/ext/transport/chttp2/transport/bin_decoder.h
index 1c0b2b7e97..a9c4c9a0f6 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.h
@@ -28,10 +28,10 @@ extern "C" {
struct grpc_base64_decode_context {
/* input/output: */
- uint8_t *input_cur;
- uint8_t *input_end;
- uint8_t *output_cur;
- uint8_t *output_end;
+ uint8_t* input_cur;
+ uint8_t* input_end;
+ uint8_t* output_cur;
+ uint8_t* output_end;
/* Indicate if the decoder should handle the tail of input data*/
bool contains_tail;
};
@@ -40,16 +40,16 @@ struct grpc_base64_decode_context {
or output_end is reached. When input_end is reached, (input_end - input_cur)
is less than 4. When output_end is reached, (output_end - output_cur) is less
than 3. Returns false if decoding is failed. */
-bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx);
+bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx);
/* base64 decode a slice with pad chars. Returns a new slice, does not take
ownership of the input. Returns an empty slice if decoding is failed. */
-grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx, grpc_slice input);
+grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx* exec_ctx, grpc_slice input);
/* base64 decode a slice without pad chars, data length is needed. Returns a new
slice, does not take ownership of the input. Returns an empty slice if
decoding is failed. */
-grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx,
+grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx* exec_ctx,
grpc_slice input,
size_t output_length);
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.cc b/src/core/ext/transport/chttp2/transport/bin_encoder.cc
index 42d481b3c0..09f984d7b2 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.cc
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.cc
@@ -52,8 +52,8 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
size_t tail_case = input_length % 3;
size_t output_length = input_triplets * 4 + tail_xtra[tail_case];
grpc_slice output = GRPC_SLICE_MALLOC(output_length);
- uint8_t *in = GRPC_SLICE_START_PTR(input);
- char *out = (char *)GRPC_SLICE_START_PTR(output);
+ uint8_t* in = GRPC_SLICE_START_PTR(input);
+ char* out = (char*)GRPC_SLICE_START_PTR(output);
size_t i;
/* encode full triplets */
@@ -85,15 +85,15 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
break;
}
- GPR_ASSERT(out == (char *)GRPC_SLICE_END_PTR(output));
+ GPR_ASSERT(out == (char*)GRPC_SLICE_END_PTR(output));
GPR_ASSERT(in == GRPC_SLICE_END_PTR(input));
return output;
}
grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) {
size_t nbits;
- uint8_t *in;
- uint8_t *out;
+ uint8_t* in;
+ uint8_t* out;
grpc_slice output;
uint32_t temp = 0;
uint32_t temp_length = 0;
@@ -136,17 +136,17 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) {
typedef struct {
uint32_t temp;
uint32_t temp_length;
- uint8_t *out;
+ uint8_t* out;
} huff_out;
-static void enc_flush_some(huff_out *out) {
+static void enc_flush_some(huff_out* out) {
while (out->temp_length > 8) {
out->temp_length -= 8;
*out->out++ = (uint8_t)(out->temp >> out->temp_length);
}
}
-static void enc_add2(huff_out *out, uint8_t a, uint8_t b) {
+static void enc_add2(huff_out* out, uint8_t a, uint8_t b) {
b64_huff_sym sa = huff_alphabet[a];
b64_huff_sym sb = huff_alphabet[b];
out->temp = (out->temp << (sa.length + sb.length)) |
@@ -155,7 +155,7 @@ static void enc_add2(huff_out *out, uint8_t a, uint8_t b) {
enc_flush_some(out);
}
-static void enc_add1(huff_out *out, uint8_t a) {
+static void enc_add1(huff_out* out, uint8_t a) {
b64_huff_sym sa = huff_alphabet[a];
out->temp = (out->temp << sa.length) | sa.bits;
out->temp_length += sa.length;
@@ -170,8 +170,8 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) {
size_t max_output_bits = 11 * output_syms;
size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0);
grpc_slice output = GRPC_SLICE_MALLOC(max_output_length);
- uint8_t *in = GRPC_SLICE_START_PTR(input);
- uint8_t *start_out = GRPC_SLICE_START_PTR(output);
+ uint8_t* in = GRPC_SLICE_START_PTR(input);
+ uint8_t* start_out = GRPC_SLICE_START_PTR(output);
huff_out out;
size_t i;
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 02fc53122d..034e6ed8ca 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -99,94 +99,94 @@ grpc_tracer_flag grpc_trace_chttp2_refcount =
#endif
/* forward declarations of various callbacks that we'll build closures around */
-static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
+static void write_action_begin_locked(grpc_exec_ctx* exec_ctx, void* t,
+ grpc_error* error);
+static void write_action(grpc_exec_ctx* exec_ctx, void* t, grpc_error* error);
+static void write_action_end_locked(grpc_exec_ctx* exec_ctx, void* t,
+ grpc_error* error);
-static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
+static void read_action_locked(grpc_exec_ctx* exec_ctx, void* t,
+ grpc_error* error);
-static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
- grpc_error *error);
+static void complete_fetch_locked(grpc_exec_ctx* exec_ctx, void* gs,
+ grpc_error* error);
/** Set a transport level setting, and push it to our peer */
-static void queue_setting_update(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static void queue_setting_update(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_chttp2_setting_id id, uint32_t value);
-static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_error *error);
+static void close_from_api(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_error* error);
/** Start new streams that have been created if we can */
-static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
+static void maybe_start_some_streams(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
-static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static void connectivity_state_set(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_connectivity_state state,
- grpc_error *error, const char *reason);
+ grpc_error* error, const char* reason);
-static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
- void *byte_stream,
- grpc_error *error_ignored);
+static void incoming_byte_stream_destroy_locked(grpc_exec_ctx* exec_ctx,
+ void* byte_stream,
+ grpc_error* error_ignored);
static void incoming_byte_stream_publish_error(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error);
-static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_incoming_byte_stream *bs);
-
-static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
- grpc_error *error);
-
-static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-
-static void close_transport_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_error *error);
-static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error);
-
-static void schedule_bdp_ping_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error);
-static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error);
-static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx *exec_ctx,
- void *tp, grpc_error *error);
-
-static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error);
-static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_closure *on_initiate,
- grpc_closure *on_complete);
-static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error);
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_error* error);
+static void incoming_byte_stream_unref(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_incoming_byte_stream* bs);
+
+static void benign_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* t,
+ grpc_error* error);
+static void destructive_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* t,
+ grpc_error* error);
+
+static void post_benign_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static void post_destructive_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+
+static void close_transport_locked(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, grpc_error* error);
+static void end_all_the_calls(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error);
+
+static void schedule_bdp_ping_locked(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static void start_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error);
+static void finish_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error);
+static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx* exec_ctx,
+ void* tp, grpc_error* error);
+
+static void cancel_pings(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error);
+static void send_ping_locked(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_closure* on_initiate,
+ grpc_closure* on_complete);
+static void retry_initiate_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error);
/** keepalive-relevant functions */
-static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
-
-static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void init_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void start_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void finish_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+static void keepalive_watchdog_fired_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
+
+static void reset_byte_stream(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
/*******************************************************************************
* CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/
-static void destruct_transport(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static void destruct_transport(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
size_t i;
grpc_endpoint_destroy(exec_ctx, t->ep);
@@ -216,7 +216,7 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
while (t->write_cb_pool) {
- grpc_chttp2_write_cb *next = t->write_cb_pool->next;
+ grpc_chttp2_write_cb* next = t->write_cb_pool->next;
gpr_free(t->write_cb_pool);
t->write_cb_pool = next;
}
@@ -230,9 +230,9 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
}
#ifndef NDEBUG
-void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, const char *reason,
- const char *file, int line) {
+void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
@@ -242,8 +242,8 @@ void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
destruct_transport(exec_ctx, t);
}
-void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
- const char *file, int line) {
+void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]",
@@ -252,20 +252,20 @@ void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
gpr_ref(&t->refs);
}
#else
-void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
if (!gpr_unref(&t->refs)) return;
destruct_transport(exec_ctx, t);
}
-void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
+void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); }
#endif
-static const grpc_transport_vtable *get_vtable(void);
+static const grpc_transport_vtable* get_vtable(void);
-static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- const grpc_channel_args *channel_args,
- grpc_endpoint *ep, bool is_client) {
+static void init_transport(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ const grpc_channel_args* channel_args,
+ grpc_endpoint* ep, bool is_client) {
size_t i;
int j;
@@ -494,7 +494,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
} else {
static const struct {
- const char *channel_arg_name;
+ const char* channel_arg_name;
grpc_chttp2_setting_id setting_id;
grpc_integer_options integer_options;
bool availability[2] /* server, client */;
@@ -580,9 +580,9 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
post_benign_reclaimer(exec_ctx, t);
}
-static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+static void destroy_transport_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
@@ -592,17 +592,17 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy");
}
-static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+static void destroy_transport(grpc_exec_ctx* exec_ctx, grpc_transport* gt) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
GRPC_CLOSURE_SCHED(exec_ctx,
GRPC_CLOSURE_CREATE(destroy_transport_locked, t,
grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
}
-static void close_transport_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_error *error) {
+static void close_transport_locked(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_error* error) {
end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error));
cancel_pings(exec_ctx, t, GRPC_ERROR_REF(error));
if (t->closed_with_error == GRPC_ERROR_NONE) {
@@ -645,7 +645,7 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
}
/* flush writable stream list to avoid dangling references */
- grpc_chttp2_stream *s;
+ grpc_chttp2_stream* s;
while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close");
}
@@ -656,28 +656,28 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
}
#ifndef NDEBUG
-void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason) {
+void grpc_chttp2_stream_ref(grpc_chttp2_stream* s, const char* reason) {
grpc_stream_ref(s->refcount, reason);
}
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
- const char *reason) {
+void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s,
+ const char* reason) {
grpc_stream_unref(exec_ctx, s->refcount, reason);
}
#else
-void grpc_chttp2_stream_ref(grpc_chttp2_stream *s) {
+void grpc_chttp2_stream_ref(grpc_chttp2_stream* s) {
grpc_stream_ref(s->refcount);
}
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) {
+void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s) {
grpc_stream_unref(exec_ctx, s->refcount);
}
#endif
-static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena) {
+static int init_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena) {
GPR_TIMER_BEGIN("init_stream", 0);
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)gs;
s->t = t;
s->refcount = refcount;
@@ -717,10 +717,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
return 0;
}
-static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
- grpc_error *error) {
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
- grpc_chttp2_transport *t = s->t;
+static void destroy_stream_locked(grpc_exec_ctx* exec_ctx, void* sp,
+ grpc_error* error) {
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)sp;
+ grpc_chttp2_transport* t = s->t;
GPR_TIMER_BEGIN("destroy_stream", 0);
@@ -771,12 +771,12 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_CLOSURE_SCHED(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
}
-static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs,
- grpc_closure *then_schedule_closure) {
+static void destroy_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs,
+ grpc_closure* then_schedule_closure) {
GPR_TIMER_BEGIN("destroy_stream", 0);
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)gs;
if (s->stream_compression_ctx != NULL) {
grpc_stream_compression_context_destroy(s->stream_compression_ctx);
@@ -789,29 +789,30 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->destroy_stream_arg = then_schedule_closure;
GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
- grpc_combiner_scheduler(t->combiner)),
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
+ grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0);
}
-grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
+grpc_chttp2_stream* grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport* t,
uint32_t id) {
- return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id);
+ return (grpc_chttp2_stream*)grpc_chttp2_stream_map_find(&t->stream_map, id);
}
-grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
uint32_t id) {
if (t->channel_callback.accept_stream == NULL) {
return NULL;
}
- grpc_chttp2_stream *accepting;
+ grpc_chttp2_stream* accepting;
GPR_ASSERT(t->accepting_stream == NULL);
t->accepting_stream = &accepting;
t->channel_callback.accept_stream(exec_ctx,
t->channel_callback.accept_stream_user_data,
- &t->base, (void *)(uintptr_t)id);
+ &t->base, (void*)(uintptr_t)id);
t->accepting_stream = NULL;
return accepting;
}
@@ -820,7 +821,7 @@ grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
* OUTPUT PROCESSING
*/
-static const char *write_state_name(grpc_chttp2_write_state st) {
+static const char* write_state_name(grpc_chttp2_write_state st) {
switch (st) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
return "IDLE";
@@ -832,8 +833,8 @@ static const char *write_state_name(grpc_chttp2_write_state st) {
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_write_state st, const char *reason) {
+static void set_write_state(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_write_state st, const char* reason) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER",
write_state_name(t->write_state),
@@ -842,7 +843,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
GRPC_CLOSURE_LIST_SCHED(exec_ctx, &t->run_after_write);
if (t->close_transport_on_writes_finished != NULL) {
- grpc_error *err = t->close_transport_on_writes_finished;
+ grpc_error* err = t->close_transport_on_writes_finished;
t->close_transport_on_writes_finished = NULL;
close_transport_locked(exec_ctx, t, err);
}
@@ -850,7 +851,7 @@ static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
static void inc_initiate_write_reason(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_initiate_write_reason reason) {
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_initiate_write_reason reason) {
switch (reason) {
case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx);
@@ -921,8 +922,8 @@ static void inc_initiate_write_reason(
}
}
-void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+void grpc_chttp2_initiate_write(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_chttp2_initiate_write_reason reason) {
GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
@@ -950,16 +951,16 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
-void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_mark_stream_writable(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
if (t->closed_with_error == GRPC_ERROR_NONE &&
grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
}
-static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
+static grpc_closure_scheduler* write_scheduler(grpc_chttp2_transport* t,
bool early_results_scheduled,
bool partial_write) {
/* if it's not the first write in a batch, always offload to the executor:
@@ -987,7 +988,7 @@ static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
}
#define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i))
-static const char *begin_writing_desc(bool partial, bool inlined) {
+static const char* begin_writing_desc(bool partial, bool inlined) {
switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) {
case WRITE_STATE_TUPLE_TO_INT(false, false):
return "begin write in background";
@@ -1001,10 +1002,10 @@ static const char *begin_writing_desc(bool partial, bool inlined) {
GPR_UNREACHABLE_CODE(return "bad state tuple");
}
-static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
- grpc_error *error_ignored) {
+static void write_action_begin_locked(grpc_exec_ctx* exec_ctx, void* gt,
+ grpc_error* error_ignored) {
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
grpc_chttp2_begin_write_result r;
if (t->closed_with_error != GRPC_ERROR_NONE) {
@@ -1019,18 +1020,20 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
if (!t->is_first_write_in_batch) {
GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
}
- grpc_closure_scheduler *scheduler =
+ grpc_closure_scheduler* scheduler =
write_scheduler(t, r.early_results_scheduled, r.partial);
if (scheduler != grpc_schedule_on_exec_ctx) {
GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
}
set_write_state(
- exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
- : GRPC_CHTTP2_WRITE_STATE_WRITING,
+ exec_ctx, t,
+ r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
+ : GRPC_CHTTP2_WRITE_STATE_WRITING,
begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
- write_action, t, scheduler),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler),
+ GRPC_ERROR_NONE);
} else {
GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
@@ -1040,8 +1043,8 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
GPR_TIMER_END("write_action_begin_locked", 0);
}
-static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+static void write_action(grpc_exec_ctx* exec_ctx, void* gt, grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
@@ -1050,10 +1053,10 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
GPR_TIMER_END("write_action", 0);
}
-static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
+static void write_action_end_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@@ -1098,10 +1101,10 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
// Dirties an HTTP2 setting to be sent out next time a writing path occurs.
// If the change needs to occur immediately, manually initiate a write.
-static void queue_setting_update(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static void queue_setting_update(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_chttp2_setting_id id, uint32_t value) {
- const grpc_chttp2_setting_parameters *sp =
+ const grpc_chttp2_setting_parameters* sp =
&grpc_chttp2_settings_parameters[id];
uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value);
if (use_value != value) {
@@ -1114,8 +1117,8 @@ static void queue_setting_update(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text) {
// GRPC_CHTTP2_IF_TRACING(
@@ -1151,9 +1154,9 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
"got_goaway");
}
-static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_chttp2_stream *s;
+static void maybe_start_some_streams(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_chttp2_stream* s;
/* start streams where we have free grpc_chttp2_stream ids and free
* concurrency */
while (t->next_stream_id <= MAX_CLIENT_STREAM_ID &&
@@ -1204,24 +1207,24 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
bits being used for flags defined above) */
#define CLOSURE_BARRIER_FIRST_REF_BIT (1 << 16)
-static grpc_closure *add_closure_barrier(grpc_closure *closure) {
+static grpc_closure* add_closure_barrier(grpc_closure* closure) {
closure->next_data.scratch += CLOSURE_BARRIER_FIRST_REF_BIT;
return closure;
}
-static void null_then_run_closure(grpc_exec_ctx *exec_ctx,
- grpc_closure **closure, grpc_error *error) {
- grpc_closure *c = *closure;
+static void null_then_run_closure(grpc_exec_ctx* exec_ctx,
+ grpc_closure** closure, grpc_error* error) {
+ grpc_closure* c = *closure;
*closure = NULL;
GRPC_CLOSURE_RUN(exec_ctx, c, error);
}
-void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- grpc_closure **pclosure,
- grpc_error *error, const char *desc) {
- grpc_closure *closure = *pclosure;
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
+ grpc_closure** pclosure,
+ grpc_error* error, const char* desc) {
+ grpc_closure* closure = *pclosure;
*pclosure = NULL;
if (closure == NULL) {
GRPC_ERROR_UNREF(error);
@@ -1229,7 +1232,7 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACER_ON(grpc_http_trace)) {
- const char *errstr = grpc_error_string(error);
+ const char* errstr = grpc_error_string(error);
gpr_log(
GPR_DEBUG,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
@@ -1265,7 +1268,7 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
}
}
-static bool contains_non_ok_status(grpc_metadata_batch *batch) {
+static bool contains_non_ok_status(grpc_metadata_batch* batch) {
if (batch->idx.named.grpc_status != NULL) {
return !grpc_mdelem_eq(batch->idx.named.grpc_status->md,
GRPC_MDELEM_GRPC_STATUS_0);
@@ -1273,9 +1276,9 @@ static bool contains_non_ok_status(grpc_metadata_batch *batch) {
return false;
}
-static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) {
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
@@ -1284,18 +1287,18 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
}
}
-static void add_fetched_slice_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+static void add_fetched_slice_locked(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
s->fetched_send_message_length +=
(uint32_t)GRPC_SLICE_LENGTH(s->fetching_slice);
grpc_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice);
maybe_become_writable_due_to_send_msg(exec_ctx, t, s);
}
-static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+static void continue_fetching_send_locked(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
for (;;) {
if (s->fetching_send_message == NULL) {
/* Stream was cancelled before message fetch completed */
@@ -1310,16 +1313,16 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE,
"fetching_send_message_finished");
} else {
- grpc_chttp2_write_cb *cb = t->write_cb_pool;
+ grpc_chttp2_write_cb* cb = t->write_cb_pool;
if (cb == NULL) {
- cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb));
+ cb = (grpc_chttp2_write_cb*)gpr_malloc(sizeof(*cb));
} else {
t->write_cb_pool = cb->next;
}
cb->call_at_byte = notify_offset;
cb->closure = s->fetching_send_message_finished;
s->fetching_send_message_finished = NULL;
- grpc_chttp2_write_cb **list =
+ grpc_chttp2_write_cb** list =
s->fetching_send_message->flags & GRPC_WRITE_THROUGH
? &s->on_write_finished_cbs
: &s->on_flow_controlled_cbs;
@@ -1330,7 +1333,7 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
return; /* early out */
} else if (grpc_byte_stream_next(exec_ctx, s->fetching_send_message,
UINT32_MAX, &s->complete_fetch_locked)) {
- grpc_error *error = grpc_byte_stream_pull(
+ grpc_error* error = grpc_byte_stream_pull(
exec_ctx, s->fetching_send_message, &s->fetching_slice);
if (error != GRPC_ERROR_NONE) {
grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message);
@@ -1342,10 +1345,10 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
- grpc_error *error) {
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
- grpc_chttp2_transport *t = s->t;
+static void complete_fetch_locked(grpc_exec_ctx* exec_ctx, void* gs,
+ grpc_error* error) {
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)gs;
+ grpc_chttp2_transport* t = s->t;
if (error == GRPC_ERROR_NONE) {
error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
&s->fetching_slice);
@@ -1360,14 +1363,14 @@ static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
}
}
-static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
-static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
+static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
bool is_client, bool is_initial) {
- for (grpc_linked_mdelem *md = md_batch->list.head; md != NULL;
+ for (grpc_linked_mdelem* md = md_batch->list.head; md != NULL;
md = md->next) {
- char *key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
- char *value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md));
+ char* key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
+ char* value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md));
gpr_log(GPR_INFO, "HTTP:%d:%s:%s: %s: %s", id, is_initial ? "HDR" : "TRL",
is_client ? "CLI" : "SVR", key, value);
gpr_free(key);
@@ -1375,20 +1378,20 @@ static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
}
}
-static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
- grpc_error *error_ignored) {
+static void perform_stream_op_locked(grpc_exec_ctx* exec_ctx, void* stream_op,
+ grpc_error* error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
- grpc_transport_stream_op_batch *op =
- (grpc_transport_stream_op_batch *)stream_op;
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg;
- grpc_transport_stream_op_batch_payload *op_payload = op->payload;
- grpc_chttp2_transport *t = s->t;
+ grpc_transport_stream_op_batch* op =
+ (grpc_transport_stream_op_batch*)stream_op;
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)op->handler_private.extra_arg;
+ grpc_transport_stream_op_batch_payload* op_payload = op->payload;
+ grpc_chttp2_transport* t = s->t;
GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx);
if (GRPC_TRACER_ON(grpc_http_trace)) {
- char *str = grpc_transport_stream_op_batch_string(op);
+ char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str,
op->on_complete);
gpr_free(str);
@@ -1402,7 +1405,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
}
- grpc_closure *on_complete = op->on_complete;
+ grpc_closure* on_complete = op->on_complete;
if (on_complete == NULL) {
on_complete =
GRPC_CLOSURE_CREATE(do_nothing, NULL, grpc_schedule_on_exec_ctx);
@@ -1530,7 +1533,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
"fetching_send_message_finished");
} else {
GPR_ASSERT(s->fetching_send_message == NULL);
- uint8_t *frame_hdr = grpc_slice_buffer_tiny_add(
+ uint8_t* frame_hdr = grpc_slice_buffer_tiny_add(
&s->flow_controlled_buffer, GRPC_HEADER_SIZE_IN_BYTES);
uint32_t flags = op_payload->send_message.send_message->flags;
frame_hdr[0] = (flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0;
@@ -1657,12 +1660,12 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "perform_stream_op");
}
-static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs,
- grpc_transport_stream_op_batch *op) {
+static void perform_stream_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs,
+ grpc_transport_stream_op_batch* op) {
GPR_TIMER_BEGIN("perform_stream_op", 0);
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)gs;
if (!t->is_client) {
if (op->send_initial_metadata) {
@@ -1678,7 +1681,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
if (GRPC_TRACER_ON(grpc_http_trace)) {
- char *str = grpc_transport_stream_op_batch_string(op);
+ char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str);
gpr_free(str);
}
@@ -1693,11 +1696,11 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GPR_TIMER_END("perform_stream_op", 0);
}
-static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
+static void cancel_pings(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error) {
/* callback remaining pings: they're not allowed to call into the transpot,
and maybe they hold resources that need to be freed */
- grpc_chttp2_ping_queue *pq = &t->ping_queue;
+ grpc_chttp2_ping_queue* pq = &t->ping_queue;
GPR_ASSERT(error != GRPC_ERROR_NONE);
for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
@@ -1706,24 +1709,24 @@ static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error);
}
-static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_closure *on_initiate, grpc_closure *on_ack) {
+static void send_ping_locked(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_closure* on_initiate, grpc_closure* on_ack) {
if (t->closed_with_error != GRPC_ERROR_NONE) {
GRPC_CLOSURE_SCHED(exec_ctx, on_initiate,
GRPC_ERROR_REF(t->closed_with_error));
GRPC_CLOSURE_SCHED(exec_ctx, on_ack, GRPC_ERROR_REF(t->closed_with_error));
return;
}
- grpc_chttp2_ping_queue *pq = &t->ping_queue;
+ grpc_chttp2_ping_queue* pq = &t->ping_queue;
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate,
GRPC_ERROR_NONE);
grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack,
GRPC_ERROR_NONE);
}
-static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+static void retry_initiate_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
t->ping_state.is_delayed_ping_timer_set = false;
if (error == GRPC_ERROR_NONE) {
grpc_chttp2_initiate_write(exec_ctx, t,
@@ -1731,11 +1734,11 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
}
}
-void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+void grpc_chttp2_ack_ping(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
uint64_t id) {
- grpc_chttp2_ping_queue *pq = &t->ping_queue;
+ grpc_chttp2_ping_queue* pq = &t->ping_queue;
if (pq->inflight_id != id) {
- char *from = grpc_endpoint_get_peer(t->ep);
+ char* from = grpc_endpoint_get_peer(t->ep);
gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64, from, id);
gpr_free(from);
return;
@@ -1747,8 +1750,8 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
-static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
+static void send_goaway(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error;
grpc_slice slice;
@@ -1761,8 +1764,8 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error);
}
-void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+void grpc_chttp2_add_ping_strike(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
t->ping_recv_state.ping_strikes++;
if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes &&
t->ping_policy.max_ping_strikes != 0) {
@@ -1772,19 +1775,20 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
/*The transport will be closed after the write is done */
close_transport_locked(
- exec_ctx, t, grpc_error_set_int(
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
+ exec_ctx, t,
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
}
-static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
- void *stream_op,
- grpc_error *error_ignored) {
- grpc_transport_op *op = (grpc_transport_op *)stream_op;
- grpc_chttp2_transport *t =
- (grpc_chttp2_transport *)op->handler_private.extra_arg;
- grpc_error *close_transport = op->disconnect_with_error;
+static void perform_transport_op_locked(grpc_exec_ctx* exec_ctx,
+ void* stream_op,
+ grpc_error* error_ignored) {
+ grpc_transport_op* op = (grpc_transport_op*)stream_op;
+ grpc_chttp2_transport* t =
+ (grpc_chttp2_transport*)op->handler_private.extra_arg;
+ grpc_error* close_transport = op->disconnect_with_error;
if (op->goaway_error) {
send_goaway(exec_ctx, t, op->goaway_error);
@@ -1825,10 +1829,10 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op");
}
-static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_transport_op *op) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
- char *msg = grpc_transport_op_string(op);
+static void perform_transport_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_transport_op* op) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
+ char* msg = grpc_transport_op_string(op);
gpr_free(msg);
op->handler_private.extra_arg = gt;
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
@@ -1843,9 +1847,9 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
* INPUT PROCESSING - GENERAL
*/
-void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
if (s->recv_initial_metadata_ready != NULL &&
s->published_metadata[0] != GRPC_METADATA_NOT_PUBLISHED) {
if (s->seen_error) {
@@ -1862,10 +1866,10 @@ void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
- grpc_error *error = GRPC_ERROR_NONE;
+void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
+ grpc_error* error = GRPC_ERROR_NONE;
if (s->recv_message_ready != NULL) {
*s->recv_message = NULL;
if (s->final_metadata_requested && s->seen_error) {
@@ -1946,9 +1950,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
if (s->recv_trailing_metadata_finished != NULL && s->read_closed &&
s->write_closed) {
@@ -2000,10 +2004,10 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
}
}
-static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- uint32_t id, grpc_error *error) {
- grpc_chttp2_stream *s =
- (grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id);
+static void remove_stream(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ uint32_t id, grpc_error* error) {
+ grpc_chttp2_stream* s =
+ (grpc_chttp2_stream*)grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
if (t->incoming_stream == s) {
t->incoming_stream = NULL;
@@ -2011,7 +2015,7 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
if (s->pending_byte_stream) {
if (s->on_next != NULL) {
- grpc_chttp2_incoming_byte_stream *bs = s->data_parser.parsing_frame;
+ grpc_chttp2_incoming_byte_stream* bs = s->data_parser.parsing_frame;
if (error == GRPC_ERROR_NONE) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
}
@@ -2042,9 +2046,9 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
maybe_start_some_streams(exec_ctx, t);
}
-void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_error *due_to_error) {
+void grpc_chttp2_cancel_stream(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+ grpc_error* due_to_error) {
if (!t->is_client && !s->sent_trailing_metadata &&
grpc_error_has_clear_grpc_status(due_to_error)) {
close_from_api(exec_ctx, t, s, due_to_error);
@@ -2069,8 +2073,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, due_to_error);
}
-void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_error *error) {
+void grpc_chttp2_fake_status(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_error* error) {
grpc_status_code status;
grpc_slice slice;
grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, NULL);
@@ -2109,7 +2113,7 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error);
}
-static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) {
+static void add_error(grpc_error* error, grpc_error** refs, size_t* nrefs) {
if (error == GRPC_ERROR_NONE) return;
for (size_t i = 0; i < *nrefs; i++) {
if (error == refs[i]) {
@@ -2120,14 +2124,14 @@ static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) {
++*nrefs;
}
-static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
- const char *master_error_msg) {
- grpc_error *refs[3];
+static grpc_error* removal_error(grpc_error* extra_error, grpc_chttp2_stream* s,
+ const char* master_error_msg) {
+ grpc_error* refs[3];
size_t nrefs = 0;
add_error(s->read_closed_error, refs, &nrefs);
add_error(s->write_closed_error, refs, &nrefs);
add_error(extra_error, refs, &nrefs);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (nrefs > 0) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(master_error_msg,
refs, nrefs);
@@ -2136,11 +2140,11 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
-static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_chttp2_write_cb **list,
- grpc_error *error) {
+static void flush_write_list(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_chttp2_write_cb** list,
+ grpc_error* error) {
while (*list) {
- grpc_chttp2_write_cb *cb = *list;
+ grpc_chttp2_write_cb* cb = *list;
*list = cb->next;
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
GRPC_ERROR_REF(error),
@@ -2151,9 +2155,9 @@ static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ERROR_UNREF(error);
}
-void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_error *error) {
+void grpc_chttp2_fail_pending_writes(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_error* error) {
error =
removal_error(error, s, "Pending writes failed due to stream closure");
s->send_initial_metadata = NULL;
@@ -2175,10 +2179,10 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
}
-void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, int close_reads,
- int close_writes, grpc_error *error) {
+void grpc_chttp2_mark_stream_closed(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, int close_reads,
+ int close_writes, grpc_error* error) {
if (s->read_closed && s->write_closed) {
/* already closed */
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
@@ -2199,7 +2203,7 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
}
if (s->read_closed && s->write_closed) {
became_closed = true;
- grpc_error *overall_error =
+ grpc_error* overall_error =
removal_error(GRPC_ERROR_REF(error), s, "Stream removed");
if (s->id != 0) {
remove_stream(exec_ctx, t, s->id, GRPC_ERROR_REF(overall_error));
@@ -2227,14 +2231,14 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_error *error) {
+static void close_from_api(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_error* error) {
grpc_slice hdr;
grpc_slice status_hdr;
grpc_slice http_status_hdr;
grpc_slice content_type_hdr;
grpc_slice message_pfx;
- uint8_t *p;
+ uint8_t* p;
uint32_t len = 0;
grpc_status_code grpc_status;
grpc_slice slice;
@@ -2387,20 +2391,20 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
typedef struct {
- grpc_exec_ctx *exec_ctx;
- grpc_error *error;
- grpc_chttp2_transport *t;
+ grpc_exec_ctx* exec_ctx;
+ grpc_error* error;
+ grpc_chttp2_transport* t;
} cancel_stream_cb_args;
-static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
- cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data;
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream;
+static void cancel_stream_cb(void* user_data, uint32_t key, void* stream) {
+ cancel_stream_cb_args* args = (cancel_stream_cb_args*)user_data;
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)stream;
grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
GRPC_ERROR_REF(args->error));
}
-static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
+static void end_all_the_calls(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error) {
cancel_stream_cb_args args = {exec_ctx, error, t};
grpc_chttp2_stream_map_for_each(&t->stream_map, cancel_stream_cb, &args);
GRPC_ERROR_UNREF(error);
@@ -2411,7 +2415,7 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
*/
template <class F>
-static void WithUrgency(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+static void WithUrgency(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
grpc_core::chttp2::FlowControlAction::Urgency urgency,
grpc_chttp2_initiate_write_reason reason, F action) {
switch (urgency) {
@@ -2427,8 +2431,8 @@ static void WithUrgency(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
void grpc_chttp2_act_on_flowctl_action(
- grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s) {
+ grpc_exec_ctx* exec_ctx, const grpc_core::chttp2::FlowControlAction& action,
+ grpc_chttp2_transport* t, grpc_chttp2_stream* s) {
WithUrgency(
exec_ctx, t, action.send_stream_update(),
GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
@@ -2450,17 +2454,17 @@ void grpc_chttp2_act_on_flowctl_action(
});
}
-static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static grpc_error* try_http_parsing(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
grpc_http_parser parser;
size_t i = 0;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_http_parser_init(&parser, GRPC_HTTP_RESPONSE, &response);
- grpc_error *parse_error = GRPC_ERROR_NONE;
+ grpc_error* parse_error = GRPC_ERROR_NONE;
for (; i < t->read_buffer.count && parse_error == GRPC_ERROR_NONE; i++) {
parse_error =
grpc_http_parser_parse(&parser, t->read_buffer.slices[i], NULL);
@@ -2480,27 +2484,27 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
return error;
}
-static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
+static void read_action_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error) {
GPR_TIMER_BEGIN("reading_action_locked", 0);
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
GRPC_ERROR_REF(error);
- grpc_error *err = error;
+ grpc_error* err = error;
if (err != GRPC_ERROR_NONE) {
err = grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Endpoint read failed", &err, 1),
GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
t->write_state);
}
- GPR_SWAP(grpc_error *, err, error);
+ GPR_SWAP(grpc_error*, err, error);
GRPC_ERROR_UNREF(err);
if (t->closed_with_error == GRPC_ERROR_NONE) {
GPR_TIMER_BEGIN("reading_action.parse", 0);
size_t i = 0;
- grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
+ grpc_error* errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
t->flow_control->bdp_estimator()->AddIncomingBytes(
@@ -2522,7 +2526,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_BEGIN("post_parse_locked", 0);
if (t->initial_window_update != 0) {
if (t->initial_window_update > 0) {
- grpc_chttp2_stream *s;
+ grpc_chttp2_stream* s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
grpc_chttp2_initiate_write(
@@ -2569,16 +2573,16 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
// t is reffed prior to calling the first time, and once the callback chain
// that kicks off finishes, it's unreffed
-static void schedule_bdp_ping_locked(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static void schedule_bdp_ping_locked(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
t->flow_control->bdp_estimator()->SchedulePing();
send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
&t->finish_bdp_ping_locked);
}
-static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+static void start_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string,
grpc_error_string(error));
@@ -2590,9 +2594,9 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
t->flow_control->bdp_estimator()->StartPing();
}
-static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+static void finish_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string,
grpc_error_string(error));
@@ -2611,9 +2615,9 @@ static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
&t->next_bdp_ping_timer_expired_locked);
}
-static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx *exec_ctx,
- void *tp, grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
+static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx* exec_ctx,
+ void* tp, grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
GPR_ASSERT(t->have_next_bdp_ping_timer);
t->have_next_bdp_ping_timer = false;
if (error != GRPC_ERROR_NONE) {
@@ -2623,7 +2627,7 @@ static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx *exec_ctx,
schedule_bdp_ping_locked(exec_ctx, t);
}
-void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
+void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args,
bool is_client) {
size_t i;
if (args) {
@@ -2681,9 +2685,9 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
}
}
-static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
+static void init_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
if (t->destroying || t->closed_with_error != GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@@ -2712,18 +2716,18 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping");
}
-static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
+static void start_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
&t->keepalive_watchdog_fired_locked);
}
-static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
+static void finish_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
@@ -2737,9 +2741,9 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end");
}
-static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
+static void keepalive_watchdog_fired_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@@ -2764,10 +2768,10 @@ static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
* CALLBACK LOOP
*/
-static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static void connectivity_state_set(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_connectivity_state state,
- grpc_error *error, const char *reason) {
+ grpc_error* error, const char* reason) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
grpc_connectivity_state_set(exec_ctx, &t->channel_callback.state_tracker,
@@ -2778,15 +2782,15 @@ static void connectivity_state_set(grpc_exec_ctx *exec_ctx,
* POLLSET STUFF
*/
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset *pollset) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+static void set_pollset(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_pollset* pollset) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
grpc_endpoint_add_to_pollset(exec_ctx, t->ep, pollset);
}
-static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset_set *pollset_set) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
+static void set_pollset_set(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_pollset_set* pollset_set) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, pollset_set);
}
@@ -2794,9 +2798,9 @@ static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
* BYTE STREAM
*/
-static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)arg;
+static void reset_byte_stream(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)arg;
s->pending_byte_stream = false;
if (error == GRPC_ERROR_NONE) {
@@ -2813,20 +2817,20 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_incoming_byte_stream *bs) {
+static void incoming_byte_stream_unref(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_incoming_byte_stream* bs) {
if (gpr_unref(&bs->refs)) {
gpr_free(bs);
}
}
-static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
- void *argp,
- grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs =
- (grpc_chttp2_incoming_byte_stream *)argp;
- grpc_chttp2_transport *t = bs->transport;
- grpc_chttp2_stream *s = bs->stream;
+static void incoming_byte_stream_next_locked(grpc_exec_ctx* exec_ctx,
+ void* argp,
+ grpc_error* error_ignored) {
+ grpc_chttp2_incoming_byte_stream* bs =
+ (grpc_chttp2_incoming_byte_stream*)argp;
+ grpc_chttp2_transport* t = bs->transport;
+ grpc_chttp2_stream* s = bs->stream;
size_t cur_length = s->frame_storage.length;
if (!s->read_closed) {
@@ -2868,14 +2872,14 @@ static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
incoming_byte_stream_unref(exec_ctx, bs);
}
-static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
+static bool incoming_byte_stream_next(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
size_t max_size_hint,
- grpc_closure *on_complete) {
+ grpc_closure* on_complete) {
GPR_TIMER_BEGIN("incoming_byte_stream_next", 0);
- grpc_chttp2_incoming_byte_stream *bs =
- (grpc_chttp2_incoming_byte_stream *)byte_stream;
- grpc_chttp2_stream *s = bs->stream;
+ grpc_chttp2_incoming_byte_stream* bs =
+ (grpc_chttp2_incoming_byte_stream*)byte_stream;
+ grpc_chttp2_stream* s = bs->stream;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
GPR_TIMER_END("incoming_byte_stream_next", 0);
return true;
@@ -2894,14 +2898,14 @@ static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx,
}
}
-static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_slice *slice) {
+static grpc_error* incoming_byte_stream_pull(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_slice* slice) {
GPR_TIMER_BEGIN("incoming_byte_stream_pull", 0);
- grpc_chttp2_incoming_byte_stream *bs =
- (grpc_chttp2_incoming_byte_stream *)byte_stream;
- grpc_chttp2_stream *s = bs->stream;
- grpc_error *error;
+ grpc_chttp2_incoming_byte_stream* bs =
+ (grpc_chttp2_incoming_byte_stream*)byte_stream;
+ grpc_chttp2_stream* s = bs->stream;
+ grpc_error* error;
if (s->unprocessed_incoming_frames_buffer.length > 0) {
if (!s->unprocessed_incoming_frames_decompressed) {
@@ -2945,27 +2949,28 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
- void *byte_stream,
- grpc_error *error_ignored);
+static void incoming_byte_stream_destroy_locked(grpc_exec_ctx* exec_ctx,
+ void* byte_stream,
+ grpc_error* error_ignored);
-static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream) {
+static void incoming_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream) {
GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
- grpc_chttp2_incoming_byte_stream *bs =
- (grpc_chttp2_incoming_byte_stream *)byte_stream;
+ grpc_chttp2_incoming_byte_stream* bs =
+ (grpc_chttp2_incoming_byte_stream*)byte_stream;
GRPC_CLOSURE_SCHED(
- exec_ctx, GRPC_CLOSURE_INIT(
- &bs->destroy_action, incoming_byte_stream_destroy_locked,
- bs, grpc_combiner_scheduler(bs->transport->combiner)),
+ exec_ctx,
+ GRPC_CLOSURE_INIT(&bs->destroy_action,
+ incoming_byte_stream_destroy_locked, bs,
+ grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}
static void incoming_byte_stream_publish_error(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error) {
- grpc_chttp2_stream *s = bs->stream;
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_error* error) {
+ grpc_chttp2_stream* s = bs->stream;
GPR_ASSERT(error != GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
@@ -2976,13 +2981,13 @@ static void incoming_byte_stream_publish_error(
GRPC_ERROR_REF(error));
}
-grpc_error *grpc_chttp2_incoming_byte_stream_push(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_slice slice, grpc_slice *slice_out) {
- grpc_chttp2_stream *s = bs->stream;
+grpc_error* grpc_chttp2_incoming_byte_stream_push(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_slice slice, grpc_slice* slice_out) {
+ grpc_chttp2_stream* s = bs->stream;
if (bs->remaining_bytes < GRPC_SLICE_LENGTH(slice)) {
- grpc_error *error =
+ grpc_error* error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream");
GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
@@ -2997,10 +3002,10 @@ grpc_error *grpc_chttp2_incoming_byte_stream_push(
}
}
-grpc_error *grpc_chttp2_incoming_byte_stream_finished(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error, bool reset_on_error) {
- grpc_chttp2_stream *s = bs->stream;
+grpc_error* grpc_chttp2_incoming_byte_stream_finished(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_error* error, bool reset_on_error) {
+ grpc_chttp2_stream* s = bs->stream;
if (error == GRPC_ERROR_NONE) {
if (bs->remaining_bytes != 0) {
@@ -3014,11 +3019,11 @@ grpc_error *grpc_chttp2_incoming_byte_stream_finished(
return error;
}
-static void incoming_byte_stream_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_error *error) {
- grpc_chttp2_incoming_byte_stream *bs =
- (grpc_chttp2_incoming_byte_stream *)byte_stream;
+static void incoming_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_error* error) {
+ grpc_chttp2_incoming_byte_stream* bs =
+ (grpc_chttp2_incoming_byte_stream*)byte_stream;
GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished(
exec_ctx, bs, error, true /* reset_on_error */));
}
@@ -3027,13 +3032,13 @@ static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
incoming_byte_stream_next, incoming_byte_stream_pull,
incoming_byte_stream_shutdown, incoming_byte_stream_destroy};
-static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
- void *byte_stream,
- grpc_error *error_ignored) {
- grpc_chttp2_incoming_byte_stream *bs =
- (grpc_chttp2_incoming_byte_stream *)byte_stream;
- grpc_chttp2_stream *s = bs->stream;
- grpc_chttp2_transport *t = s->t;
+static void incoming_byte_stream_destroy_locked(grpc_exec_ctx* exec_ctx,
+ void* byte_stream,
+ grpc_error* error_ignored) {
+ grpc_chttp2_incoming_byte_stream* bs =
+ (grpc_chttp2_incoming_byte_stream*)byte_stream;
+ grpc_chttp2_stream* s = bs->stream;
+ grpc_chttp2_transport* t = s->t;
GPR_ASSERT(bs->base.vtable == &grpc_chttp2_incoming_byte_stream_vtable);
incoming_byte_stream_unref(exec_ctx, bs);
@@ -3042,11 +3047,11 @@ static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
}
-grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+grpc_chttp2_incoming_byte_stream* grpc_chttp2_incoming_byte_stream_create(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t, grpc_chttp2_stream* s,
uint32_t frame_size, uint32_t flags) {
- grpc_chttp2_incoming_byte_stream *incoming_byte_stream =
- (grpc_chttp2_incoming_byte_stream *)gpr_malloc(
+ grpc_chttp2_incoming_byte_stream* incoming_byte_stream =
+ (grpc_chttp2_incoming_byte_stream*)gpr_malloc(
sizeof(*incoming_byte_stream));
incoming_byte_stream->base.length = frame_size;
incoming_byte_stream->remaining_bytes = frame_size;
@@ -3064,8 +3069,8 @@ grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
* RESOURCE QUOTAS
*/
-static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static void post_benign_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
if (!t->benign_reclaimer_registered) {
t->benign_reclaimer_registered = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
@@ -3075,8 +3080,8 @@ static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx,
}
}
-static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static void post_destructive_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
if (!t->destructive_reclaimer_registered) {
t->destructive_reclaimer_registered = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
@@ -3086,9 +3091,9 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
}
}
-static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
+static void benign_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
@@ -3116,14 +3121,14 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer");
}
-static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
+static void destructive_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
- grpc_chttp2_stream *s =
- (grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map);
+ grpc_chttp2_stream* s =
+ (grpc_chttp2_stream*)grpc_chttp2_stream_map_rand(&t->stream_map);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
@@ -3152,7 +3157,7 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
* MONITORING
*/
-const char *grpc_chttp2_initiate_write_reason_string(
+const char* grpc_chttp2_initiate_write_reason_string(
grpc_chttp2_initiate_write_reason reason) {
switch (reason) {
case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
@@ -3199,9 +3204,9 @@ const char *grpc_chttp2_initiate_write_reason_string(
GPR_UNREACHABLE_CODE(return "unknown");
}
-static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx,
- grpc_transport *t) {
- return ((grpc_chttp2_transport *)t)->ep;
+static grpc_endpoint* chttp2_get_endpoint(grpc_exec_ctx* exec_ctx,
+ grpc_transport* t) {
+ return ((grpc_chttp2_transport*)t)->ep;
}
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
@@ -3215,21 +3220,21 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
destroy_transport,
chttp2_get_endpoint};
-static const grpc_transport_vtable *get_vtable(void) { return &vtable; }
+static const grpc_transport_vtable* get_vtable(void) { return &vtable; }
-grpc_transport *grpc_create_chttp2_transport(
- grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
- grpc_endpoint *ep, int is_client) {
- grpc_chttp2_transport *t =
- (grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport));
+grpc_transport* grpc_create_chttp2_transport(
+ grpc_exec_ctx* exec_ctx, const grpc_channel_args* channel_args,
+ grpc_endpoint* ep, int is_client) {
+ grpc_chttp2_transport* t =
+ (grpc_chttp2_transport*)gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}
-void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_slice_buffer *read_buffer) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
+void grpc_chttp2_transport_start_reading(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_slice_buffer* read_buffer) {
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)transport;
GRPC_CHTTP2_REF_TRANSPORT(
t, "reading_action"); /* matches unref inside reading_action */
if (read_buffer != NULL) {
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 321fca4c82..972104f62c 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -35,15 +35,15 @@ extern grpc_tracer_flag grpc_trace_http2_stream_state;
extern grpc_tracer_flag grpc_trace_chttp2_refcount;
#endif
-grpc_transport *grpc_create_chttp2_transport(
- grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
- grpc_endpoint *ep, int is_client);
+grpc_transport* grpc_create_chttp2_transport(
+ grpc_exec_ctx* exec_ctx, const grpc_channel_args* channel_args,
+ grpc_endpoint* ep, int is_client);
/// Takes ownership of \a read_buffer, which (if non-NULL) contains
/// leftover bytes previously read from the endpoint (e.g., by handshakers).
-void grpc_chttp2_transport_start_reading(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_slice_buffer *read_buffer);
+void grpc_chttp2_transport_start_reading(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_slice_buffer* read_buffer);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc
index 40545bc74b..64f6b3c917 100644
--- a/src/core/ext/transport/chttp2/transport/flow_control.cc
+++ b/src/core/ext/transport/chttp2/transport/flow_control.cc
@@ -224,9 +224,9 @@ grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) {
incoming_frame_size, acked_stream_window, sent_stream_window);
} else {
char* msg;
- gpr_asprintf(&msg, "frame of size %" PRId64
- " overflows local window of %" PRId64,
- incoming_frame_size, acked_stream_window);
+ gpr_asprintf(
+ &msg, "frame of size %" PRId64 " overflows local window of %" PRId64,
+ incoming_frame_size, acked_stream_window);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.cc b/src/core/ext/transport/chttp2/transport/frame_data.cc
index 73aaab1802..7d2c7f5ab9 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_data.cc
@@ -30,14 +30,14 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/transport.h"
-grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) {
+grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser) {
parser->state = GRPC_CHTTP2_DATA_FH_0;
parser->parsing_frame = NULL;
return GRPC_ERROR_NONE;
}
-void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_data_parser *parser) {
+void grpc_chttp2_data_parser_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_data_parser* parser) {
if (parser->parsing_frame != NULL) {
GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished(
exec_ctx, parser->parsing_frame,
@@ -46,14 +46,14 @@ void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(parser->error);
}
-grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
+grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
uint8_t flags,
uint32_t stream_id,
- grpc_chttp2_stream *s) {
+ grpc_chttp2_stream* s) {
if (flags & ~GRPC_CHTTP2_DATA_FLAG_END_STREAM) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "unsupported data flags: 0x%02x", flags);
- grpc_error *err =
+ grpc_error* err =
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg),
GRPC_ERROR_INT_STREAM_ID, (intptr_t)stream_id);
gpr_free(msg);
@@ -69,12 +69,12 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
return GRPC_ERROR_NONE;
}
-void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf,
+void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
uint32_t write_bytes, int is_eof,
- grpc_transport_one_way_stats *stats,
- grpc_slice_buffer *outbuf) {
+ grpc_transport_one_way_stats* stats,
+ grpc_slice_buffer* outbuf) {
grpc_slice hdr;
- uint8_t *p;
+ uint8_t* p;
static const size_t header_size = 9;
hdr = GRPC_SLICE_MALLOC(header_size);
@@ -97,17 +97,17 @@ void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf,
stats->data_bytes += write_bytes;
}
-grpc_error *grpc_deframe_unprocessed_incoming_frames(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_data_parser *p, grpc_chttp2_stream *s,
- grpc_slice_buffer *slices, grpc_slice *slice_out,
- grpc_byte_stream **stream_out) {
- grpc_error *error = GRPC_ERROR_NONE;
- grpc_chttp2_transport *t = s->t;
+grpc_error* grpc_deframe_unprocessed_incoming_frames(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_data_parser* p, grpc_chttp2_stream* s,
+ grpc_slice_buffer* slices, grpc_slice* slice_out,
+ grpc_byte_stream** stream_out) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_chttp2_transport* t = s->t;
while (slices->count > 0) {
- uint8_t *beg = NULL;
- uint8_t *end = NULL;
- uint8_t *cur = NULL;
+ uint8_t* beg = NULL;
+ uint8_t* end = NULL;
+ uint8_t* cur = NULL;
grpc_slice slice = grpc_slice_buffer_take_first(slices);
@@ -115,7 +115,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
end = GRPC_SLICE_END_PTR(slice);
cur = beg;
uint32_t message_flags;
- char *msg;
+ char* msg;
if (cur == end) {
grpc_slice_unref_internal(exec_ctx, slice);
@@ -289,9 +289,9 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames(
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_data_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
if (!s->pending_byte_stream) {
grpc_slice_ref_internal(slice);
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index 81ec5361a3..96f823a0ad 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -49,40 +49,40 @@ typedef struct {
grpc_chttp2_stream_state state;
uint8_t frame_type;
uint32_t frame_size;
- grpc_error *error;
+ grpc_error* error;
bool is_frame_compressed;
- grpc_chttp2_incoming_byte_stream *parsing_frame;
+ grpc_chttp2_incoming_byte_stream* parsing_frame;
} grpc_chttp2_data_parser;
/* initialize per-stream state for data frame parsing */
-grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser);
+grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser);
-void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_data_parser *parser);
+void grpc_chttp2_data_parser_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_data_parser* parser);
/* start processing a new data frame */
-grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
+grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
uint8_t flags,
uint32_t stream_id,
- grpc_chttp2_stream *s);
+ grpc_chttp2_stream* s);
/* handle a slice of a data frame - is_last indicates the last slice of a
frame */
-grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_data_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
-void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf,
+void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
uint32_t write_bytes, int is_eof,
- grpc_transport_one_way_stats *stats,
- grpc_slice_buffer *outbuf);
+ grpc_transport_one_way_stats* stats,
+ grpc_slice_buffer* outbuf);
-grpc_error *grpc_deframe_unprocessed_incoming_frames(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_data_parser *p, grpc_chttp2_stream *s,
- grpc_slice_buffer *slices, grpc_slice *slice_out,
- grpc_byte_stream **stream_out);
+grpc_error* grpc_deframe_unprocessed_incoming_frames(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_data_parser* p, grpc_chttp2_stream* s,
+ grpc_slice_buffer* slices, grpc_slice* slice_out,
+ grpc_byte_stream** stream_out);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.cc b/src/core/ext/transport/chttp2/transport/frame_goaway.cc
index 78ec08e177..6be1d0e0f0 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.cc
@@ -25,42 +25,42 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p) {
+void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser* p) {
p->debug_data = NULL;
}
-void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p) {
+void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser* p) {
gpr_free(p->debug_data);
}
-grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
+grpc_error* grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser* p,
uint32_t length,
uint8_t flags) {
if (length < 8) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "goaway frame too short (%d bytes)", length);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
gpr_free(p->debug_data);
p->debug_length = length - 8;
- p->debug_data = (char *)gpr_malloc(p->debug_length);
+ p->debug_data = (char*)gpr_malloc(p->debug_length);
p->debug_pos = 0;
p->state = GRPC_CHTTP2_GOAWAY_LSI0;
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
- void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_goaway_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
- uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
- uint8_t *const end = GRPC_SLICE_END_PTR(slice);
- uint8_t *cur = beg;
- grpc_chttp2_goaway_parser *p = (grpc_chttp2_goaway_parser *)parser;
+ uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t* const end = GRPC_SLICE_END_PTR(slice);
+ uint8_t* cur = beg;
+ grpc_chttp2_goaway_parser* p = (grpc_chttp2_goaway_parser*)parser;
switch (p->state) {
case GRPC_CHTTP2_GOAWAY_LSI0:
@@ -147,9 +147,9 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
- grpc_slice_buffer *slice_buffer) {
+ grpc_slice_buffer* slice_buffer) {
grpc_slice header = GRPC_SLICE_MALLOC(9 + 4 + 4);
- uint8_t *p = GRPC_SLICE_START_PTR(header);
+ uint8_t* p = GRPC_SLICE_START_PTR(header);
uint32_t frame_length;
GPR_ASSERT(GRPC_SLICE_LENGTH(debug_data) < UINT32_MAX - 4 - 4);
frame_length = 4 + 4 + (uint32_t)GRPC_SLICE_LENGTH(debug_data);
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h
index 7b3aa45f3f..9790d0b08d 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.h
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h
@@ -45,24 +45,24 @@ typedef struct {
grpc_chttp2_goaway_parse_state state;
uint32_t last_stream_id;
uint32_t error_code;
- char *debug_data;
+ char* debug_data;
uint32_t debug_length;
uint32_t debug_pos;
} grpc_chttp2_goaway_parser;
-void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p);
-void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p);
-grpc_error *grpc_chttp2_goaway_parser_begin_frame(
- grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
- void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser* p);
+void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser* p);
+grpc_error* grpc_chttp2_goaway_parser_begin_frame(
+ grpc_chttp2_goaway_parser* parser, uint32_t length, uint8_t flags);
+grpc_error* grpc_chttp2_goaway_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
- grpc_slice_buffer *slice_buffer);
+ grpc_slice_buffer* slice_buffer);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.cc b/src/core/ext/transport/chttp2/transport/frame_ping.cc
index 1cfa883ee1..d0feb51922 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.cc
@@ -29,7 +29,7 @@ static bool g_disable_ping_ack = false;
grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes) {
grpc_slice slice = GRPC_SLICE_MALLOC(9 + 8);
- uint8_t *p = GRPC_SLICE_START_PTR(slice);
+ uint8_t* p = GRPC_SLICE_START_PTR(slice);
*p++ = 0;
*p++ = 0;
@@ -52,13 +52,13 @@ grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes) {
return slice;
}
-grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
+grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
uint32_t length,
uint8_t flags) {
if (flags & 0xfe || length != 8) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "invalid ping: length=%d, flags=%02x", length, flags);
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
@@ -68,14 +68,14 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_ping_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
- uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
- uint8_t *const end = GRPC_SLICE_END_PTR(slice);
- uint8_t *cur = beg;
- grpc_chttp2_ping_parser *p = (grpc_chttp2_ping_parser *)parser;
+ uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t* const end = GRPC_SLICE_END_PTR(slice);
+ uint8_t* cur = beg;
+ grpc_chttp2_ping_parser* p = (grpc_chttp2_ping_parser*)parser;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte));
@@ -112,7 +112,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
if (!g_disable_ping_ack) {
if (t->ping_ack_count == t->ping_ack_capacity) {
t->ping_ack_capacity = GPR_MAX(t->ping_ack_capacity * 3 / 2, 3);
- t->ping_acks = (uint64_t *)gpr_realloc(
+ t->ping_acks = (uint64_t*)gpr_realloc(
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h
index ffc2f0cf2f..034aad002e 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.h
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.h
@@ -35,11 +35,11 @@ typedef struct {
grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes);
-grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
+grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_ping_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
/* Test-only function for disabling ping ack */
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
index 0133b6efa2..05a7f056a4 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
@@ -27,11 +27,11 @@
#include "src/core/lib/transport/http2_errors.h"
grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
- grpc_transport_one_way_stats *stats) {
+ grpc_transport_one_way_stats* stats) {
static const size_t frame_size = 13;
grpc_slice slice = GRPC_SLICE_MALLOC(frame_size);
stats->framing_bytes += frame_size;
- uint8_t *p = GRPC_SLICE_START_PTR(slice);
+ uint8_t* p = GRPC_SLICE_START_PTR(slice);
// Frame size.
*p++ = 0;
@@ -55,13 +55,13 @@ grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code,
return slice;
}
-grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
- grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags) {
+grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
+ grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags) {
if (length != 4) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "invalid rst_stream: length=%d, flags=%02x", length,
flags);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -69,15 +69,15 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
- void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
- uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
- uint8_t *const end = GRPC_SLICE_END_PTR(slice);
- uint8_t *cur = beg;
- grpc_chttp2_rst_stream_parser *p = (grpc_chttp2_rst_stream_parser *)parser;
+ uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t* const end = GRPC_SLICE_END_PTR(slice);
+ uint8_t* cur = beg;
+ grpc_chttp2_rst_stream_parser* p = (grpc_chttp2_rst_stream_parser*)parser;
while (p->byte != 4 && cur != end) {
p->reason_bytes[p->byte] = *cur;
@@ -92,9 +92,9 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
(((uint32_t)p->reason_bytes[1]) << 16) |
(((uint32_t)p->reason_bytes[2]) << 8) |
(((uint32_t)p->reason_bytes[3]));
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (reason != GRPC_HTTP2_NO_ERROR || s->metadata_buffer[1].size == 0) {
- char *message;
+ char* message;
gpr_asprintf(&message, "Received RST_STREAM with error code %d", reason);
error = grpc_error_set_int(
grpc_error_set_str(GRPC_ERROR_CREATE_FROM_STATIC_STRING("RST_STREAM"),
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
index 102ffdb3f3..3f5417e993 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
@@ -34,14 +34,14 @@ typedef struct {
} grpc_chttp2_rst_stream_parser;
grpc_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code,
- grpc_transport_one_way_stats *stats);
-
-grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
- grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
- void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+ grpc_transport_one_way_stats* stats);
+
+grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
+ grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags);
+grpc_error* grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
#ifdef __cplusplus
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.cc b/src/core/ext/transport/chttp2/transport/frame_settings.cc
index db0245bb57..d33da721a5 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.cc
@@ -31,7 +31,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/transport/http2_errors.h"
-static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
+static uint8_t* fill_header(uint8_t* out, uint32_t length, uint8_t flags) {
*out++ = (uint8_t)(length >> 16);
*out++ = (uint8_t)(length >> 8);
*out++ = (uint8_t)(length);
@@ -44,13 +44,13 @@ static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) {
return out;
}
-grpc_slice grpc_chttp2_settings_create(uint32_t *old_settings,
- const uint32_t *new_settings,
+grpc_slice grpc_chttp2_settings_create(uint32_t* old_settings,
+ const uint32_t* new_settings,
uint32_t force_mask, size_t count) {
size_t i;
uint32_t n = 0;
grpc_slice output;
- uint8_t *p;
+ uint8_t* p;
for (i = 0; i < count; i++) {
n += (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0);
@@ -82,9 +82,9 @@ grpc_slice grpc_chttp2_settings_ack_create(void) {
return output;
}
-grpc_error *grpc_chttp2_settings_parser_begin_frame(
- grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
- uint32_t *settings) {
+grpc_error* grpc_chttp2_settings_parser_begin_frame(
+ grpc_chttp2_settings_parser* parser, uint32_t length, uint8_t flags,
+ uint32_t* settings) {
parser->target_settings = settings;
memcpy(parser->incoming_settings, settings,
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
@@ -108,14 +108,14 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
}
}
-grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_settings_parser_parse(grpc_exec_ctx* exec_ctx, void* p,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
- grpc_chttp2_settings_parser *parser = (grpc_chttp2_settings_parser *)p;
- const uint8_t *cur = GRPC_SLICE_START_PTR(slice);
- const uint8_t *end = GRPC_SLICE_END_PTR(slice);
- char *msg;
+ grpc_chttp2_settings_parser* parser = (grpc_chttp2_settings_parser*)p;
+ const uint8_t* cur = GRPC_SLICE_START_PTR(slice);
+ const uint8_t* end = GRPC_SLICE_END_PTR(slice);
+ char* msg;
grpc_chttp2_setting_id id;
if (parser->is_ack) {
@@ -180,7 +180,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
cur++;
if (grpc_wire_id_to_setting_id(parser->id, &id)) {
- const grpc_chttp2_setting_parameters *sp =
+ const grpc_chttp2_setting_parameters* sp =
&grpc_chttp2_settings_parameters[id];
if (parser->value < sp->min_value || parser->value > sp->max_value) {
switch (sp->invalid_value_behavior) {
@@ -195,7 +195,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index 3364da1520..18bde92815 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -40,7 +40,7 @@ typedef enum {
typedef struct {
grpc_chttp2_settings_parse_state state;
- uint32_t *target_settings;
+ uint32_t* target_settings;
uint8_t is_ack;
uint16_t id;
uint32_t value;
@@ -48,18 +48,18 @@ typedef struct {
} grpc_chttp2_settings_parser;
/* Create a settings frame by diffing old & new, and updating old to be new */
-grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *newval,
+grpc_slice grpc_chttp2_settings_create(uint32_t* old, const uint32_t* newval,
uint32_t force_mask, size_t count);
/* Create an ack settings frame */
grpc_slice grpc_chttp2_settings_ack_create(void);
-grpc_error *grpc_chttp2_settings_parser_begin_frame(
- grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags,
- uint32_t *settings);
-grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx,
- void *parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_settings_parser_begin_frame(
+ grpc_chttp2_settings_parser* parser, uint32_t length, uint8_t flags,
+ uint32_t* settings);
+grpc_error* grpc_chttp2_settings_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
#ifdef __cplusplus
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.cc b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
index 15eaf59285..62a4587ac6 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
@@ -24,11 +24,11 @@
#include <grpc/support/string_util.h>
grpc_slice grpc_chttp2_window_update_create(
- uint32_t id, uint32_t window_update, grpc_transport_one_way_stats *stats) {
+ uint32_t id, uint32_t window_update, grpc_transport_one_way_stats* stats) {
static const size_t frame_size = 13;
grpc_slice slice = GRPC_SLICE_MALLOC(frame_size);
stats->header_bytes += frame_size;
- uint8_t *p = GRPC_SLICE_START_PTR(slice);
+ uint8_t* p = GRPC_SLICE_START_PTR(slice);
GPR_ASSERT(window_update);
@@ -49,13 +49,13 @@ grpc_slice grpc_chttp2_window_update_create(
return slice;
}
-grpc_error *grpc_chttp2_window_update_parser_begin_frame(
- grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags) {
+grpc_error* grpc_chttp2_window_update_parser_begin_frame(
+ grpc_chttp2_window_update_parser* parser, uint32_t length, uint8_t flags) {
if (flags || length != 4) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "invalid window update: length=%d, flags=%02x", length,
flags);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -64,14 +64,14 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_window_update_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_slice slice, int is_last) {
- uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
- uint8_t *const end = GRPC_SLICE_END_PTR(slice);
- uint8_t *cur = beg;
- grpc_chttp2_window_update_parser *p =
- (grpc_chttp2_window_update_parser *)parser;
+grpc_error* grpc_chttp2_window_update_parser_parse(
+ grpc_exec_ctx* exec_ctx, void* parser, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_slice slice, int is_last) {
+ uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t* const end = GRPC_SLICE_END_PTR(slice);
+ uint8_t* cur = beg;
+ grpc_chttp2_window_update_parser* p =
+ (grpc_chttp2_window_update_parser*)parser;
while (p->byte != 4 && cur != end) {
p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte));
@@ -86,9 +86,9 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
if (p->byte == 4) {
uint32_t received_update = p->amount;
if (received_update == 0 || (received_update & 0x80000000u)) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "invalid window update bytes: %d", p->amount);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h
index 400f9f5398..daf7d2da6b 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.h
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h
@@ -35,13 +35,13 @@ typedef struct {
} grpc_chttp2_window_update_parser;
grpc_slice grpc_chttp2_window_update_create(
- uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats *stats);
+ uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats* stats);
-grpc_error *grpc_chttp2_window_update_parser_begin_frame(
- grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags);
-grpc_error *grpc_chttp2_window_update_parser_parse(
- grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_slice slice, int is_last);
+grpc_error* grpc_chttp2_window_update_parser_begin_frame(
+ grpc_chttp2_window_update_parser* parser, uint32_t length, uint8_t flags);
+grpc_error* grpc_chttp2_window_update_parser_parse(
+ grpc_exec_ctx* exec_ctx, void* parser, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_slice slice, int is_last);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
index 0ea50e394b..3636440905 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
@@ -70,15 +70,15 @@ typedef struct {
uint8_t seen_regular_header;
/* output stream id */
uint32_t stream_id;
- grpc_slice_buffer *output;
- grpc_transport_one_way_stats *stats;
+ grpc_slice_buffer* output;
+ grpc_transport_one_way_stats* stats;
/* maximum size of a frame */
size_t max_frame_size;
bool use_true_binary_metadata;
} framer_state;
/* fills p (which is expected to be 9 bytes long) with a data frame header */
-static void fill_header(uint8_t *p, uint8_t type, uint32_t id, size_t len,
+static void fill_header(uint8_t* p, uint8_t type, uint32_t id, size_t len,
uint8_t flags) {
GPR_ASSERT(len < 16777316);
*p++ = (uint8_t)(len >> 16);
@@ -93,7 +93,7 @@ static void fill_header(uint8_t *p, uint8_t type, uint32_t id, size_t len,
}
/* finish a frame - fill in the previously reserved header */
-static void finish_frame(framer_state *st, int is_header_boundary,
+static void finish_frame(framer_state* st, int is_header_boundary,
int is_last_in_stream) {
uint8_t type = 0xff;
type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
@@ -109,7 +109,7 @@ static void finish_frame(framer_state *st, int is_header_boundary,
/* begin a new frame: reserve off header space, remember how many bytes we'd
output before beginning */
-static void begin_frame(framer_state *st) {
+static void begin_frame(framer_state* st) {
st->header_idx =
grpc_slice_buffer_add_indexed(st->output, GRPC_SLICE_MALLOC(9));
st->output_length_at_start_of_frame = st->output->length;
@@ -118,7 +118,7 @@ static void begin_frame(framer_state *st) {
/* make sure that the current frame is of the type desired, and has sufficient
space to add at least about_to_add bytes -- finishes the current frame if
needed */
-static void ensure_space(framer_state *st, size_t need_bytes) {
+static void ensure_space(framer_state* st, size_t need_bytes) {
if (st->output->length - st->output_length_at_start_of_frame + need_bytes <=
st->max_frame_size) {
return;
@@ -128,7 +128,7 @@ static void ensure_space(framer_state *st, size_t need_bytes) {
}
/* increment a filter count, halve all counts if one element reaches max */
-static void inc_filter(uint8_t idx, uint32_t *sum, uint8_t *elems) {
+static void inc_filter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
elems[idx]++;
if (elems[idx] < 255) {
(*sum)++;
@@ -142,7 +142,7 @@ static void inc_filter(uint8_t idx, uint32_t *sum, uint8_t *elems) {
}
}
-static void add_header_data(framer_state *st, grpc_slice slice) {
+static void add_header_data(framer_state* st, grpc_slice slice) {
size_t len = GRPC_SLICE_LENGTH(slice);
size_t remaining;
if (len == 0) return;
@@ -160,13 +160,13 @@ static void add_header_data(framer_state *st, grpc_slice slice) {
}
}
-static uint8_t *add_tiny_header_data(framer_state *st, size_t len) {
+static uint8_t* add_tiny_header_data(framer_state* st, size_t len) {
ensure_space(st, len);
st->stats->header_bytes += len;
return grpc_slice_buffer_tiny_add(st->output, len);
}
-static void evict_entry(grpc_chttp2_hpack_compressor *c) {
+static void evict_entry(grpc_chttp2_hpack_compressor* c) {
c->tail_remote_index++;
GPR_ASSERT(c->tail_remote_index > 0);
GPR_ASSERT(c->table_size >=
@@ -181,7 +181,7 @@ static void evict_entry(grpc_chttp2_hpack_compressor *c) {
// Reserve space in table for the new element, evict entries if needed.
// Return the new index of the element. Return 0 to indicate not adding to
// table.
-static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor *c,
+static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c,
size_t elem_size) {
uint32_t new_index = c->tail_remote_index + c->table_elems + 1;
GPR_ASSERT(elem_size < 65536);
@@ -208,14 +208,14 @@ static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor *c,
}
/* dummy function */
-static void add_nothing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c, grpc_mdelem elem,
+static void add_nothing(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
size_t elem_size) {}
// Add a key to the dynamic table. Both key and value will be added to table at
// the decoder.
-static void add_key_with_index(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
+static void add_key_with_index(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
grpc_mdelem elem, uint32_t new_index) {
if (new_index == 0) {
return;
@@ -257,8 +257,8 @@ static void add_key_with_index(grpc_exec_ctx *exec_ctx,
}
/* add an element to the decoder table */
-static void add_elem_with_index(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
+static void add_elem_with_index(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
grpc_mdelem elem, uint32_t new_index) {
if (new_index == 0) {
return;
@@ -301,21 +301,21 @@ static void add_elem_with_index(grpc_exec_ctx *exec_ctx,
add_key_with_index(exec_ctx, c, elem, new_index);
}
-static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
+static void add_elem(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_compressor* c,
grpc_mdelem elem, size_t elem_size) {
uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
add_elem_with_index(exec_ctx, c, elem, new_index);
}
-static void add_key(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
+static void add_key(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_compressor* c,
grpc_mdelem elem, size_t elem_size) {
uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
add_key_with_index(exec_ctx, c, elem, new_index);
}
-static void emit_indexed(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c, uint32_t elem_index,
- framer_state *st) {
+static void emit_indexed(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c, uint32_t elem_index,
+ framer_state* st) {
GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx);
uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
@@ -328,7 +328,7 @@ typedef struct {
bool insert_null_before_wire_value;
} wire_value;
-static wire_value get_wire_value(grpc_exec_ctx *exec_ctx, grpc_mdelem elem,
+static wire_value get_wire_value(grpc_exec_ctx* exec_ctx, grpc_mdelem elem,
bool true_binary_enabled) {
wire_value wire_val;
if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
@@ -359,15 +359,15 @@ static size_t wire_value_length(wire_value v) {
return GPR_SLICE_LENGTH(v.data) + v.insert_null_before_wire_value;
}
-static void add_wire_value(framer_state *st, wire_value v) {
+static void add_wire_value(framer_state* st, wire_value v) {
if (v.insert_null_before_wire_value) *add_tiny_header_data(st, 1) = 0;
add_header_data(st, v.data);
}
-static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_incidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
uint32_t key_index, grpc_mdelem elem,
- framer_state *st) {
+ framer_state* st) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
wire_value value =
@@ -383,10 +383,10 @@ static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx,
add_wire_value(st, value);
}
-static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_noidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
uint32_t key_index, grpc_mdelem elem,
- framer_state *st) {
+ framer_state* st) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx);
uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
wire_value value =
@@ -402,10 +402,10 @@ static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx,
add_wire_value(st, value);
}
-static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
uint32_t unused_index, grpc_mdelem elem,
- framer_state *st) {
+ framer_state* st) {
GPR_ASSERT(unused_index == 0);
GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx);
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
@@ -426,10 +426,10 @@ static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
add_wire_value(st, value);
}
-static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
+static void emit_lithdr_noidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
uint32_t unused_index, grpc_mdelem elem,
- framer_state *st) {
+ framer_state* st) {
GPR_ASSERT(unused_index == 0);
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx);
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
@@ -450,22 +450,22 @@ static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx,
add_wire_value(st, value);
}
-static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor *c,
- framer_state *st) {
+static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor* c,
+ framer_state* st) {
uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(c->max_table_size, 3);
GRPC_CHTTP2_WRITE_VARINT(c->max_table_size, 3, 0x20,
add_tiny_header_data(st, len), len);
c->advertise_table_size_change = 0;
}
-static uint32_t dynidx(grpc_chttp2_hpack_compressor *c, uint32_t elem_index) {
+static uint32_t dynidx(grpc_chttp2_hpack_compressor* c, uint32_t elem_index) {
return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
c->table_elems - elem_index;
}
/* encode an mdelem */
-static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
- grpc_mdelem elem, framer_state *st) {
+static void hpack_enc(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_compressor* c,
+ grpc_mdelem elem, framer_state* st) {
GPR_ASSERT(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)) > 0);
if (GRPC_SLICE_START_PTR(GRPC_MDKEY(elem))[0] != ':') { /* regular header */
st->seen_regular_header = 1;
@@ -476,8 +476,8 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
}
if (GRPC_TRACER_ON(grpc_http_trace)) {
- char *k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
- char *v = NULL;
+ char* k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
+ char* v = NULL;
if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
v = grpc_dump_slice(GRPC_MDVALUE(elem), GPR_DUMP_HEX);
} else {
@@ -540,11 +540,10 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
- void (*maybe_add)(grpc_exec_ctx *, grpc_chttp2_hpack_compressor *,
- grpc_mdelem, size_t) =
- should_add_elem ? add_elem : add_nothing;
- void (*emit)(grpc_exec_ctx *, grpc_chttp2_hpack_compressor *, uint32_t,
- grpc_mdelem, framer_state *) =
+ void (*maybe_add)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, grpc_mdelem,
+ size_t) = should_add_elem ? add_elem : add_nothing;
+ void (*emit)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, uint32_t,
+ grpc_mdelem, framer_state*) =
should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx;
/* no hits for the elem... maybe there's a key? */
@@ -582,9 +581,9 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
#define STRLEN_LIT(x) (sizeof(x) - 1)
#define TIMEOUT_KEY "grpc-timeout"
-static void deadline_enc(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c, grpc_millis deadline,
- framer_state *st) {
+static void deadline_enc(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c, grpc_millis deadline,
+ framer_state* st) {
char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_mdelem mdelem;
grpc_http2_encode_timeout(deadline - grpc_exec_ctx_now(exec_ctx),
@@ -597,14 +596,14 @@ static void deadline_enc(grpc_exec_ctx *exec_ctx,
static uint32_t elems_for_bytes(uint32_t bytes) { return (bytes + 31) / 32; }
-void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
+void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c) {
memset(c, 0, sizeof(*c));
c->max_table_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->cap_table_elems = elems_for_bytes(c->max_table_size);
c->max_table_elems = c->cap_table_elems;
c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->table_elem_size =
- (uint16_t *)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
+ (uint16_t*)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
memset(c->table_elem_size, 0,
sizeof(*c->table_elem_size) * c->cap_table_elems);
for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
@@ -612,8 +611,8 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
}
}
-void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c) {
+void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c) {
int i;
for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
if (c->entries_keys[i].refcount != &terminal_slice_refcount) {
@@ -625,15 +624,15 @@ void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx *exec_ctx,
}
void grpc_chttp2_hpack_compressor_set_max_usable_size(
- grpc_chttp2_hpack_compressor *c, uint32_t max_table_size) {
+ grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) {
c->max_usable_size = max_table_size;
grpc_chttp2_hpack_compressor_set_max_table_size(
c, GPR_MIN(c->max_table_size, max_table_size));
}
-static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
- uint16_t *table_elem_size =
- (uint16_t *)gpr_malloc(sizeof(*table_elem_size) * new_cap);
+static void rebuild_elems(grpc_chttp2_hpack_compressor* c, uint32_t new_cap) {
+ uint16_t* table_elem_size =
+ (uint16_t*)gpr_malloc(sizeof(*table_elem_size) * new_cap);
uint32_t i;
memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
@@ -651,7 +650,7 @@ static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
}
void grpc_chttp2_hpack_compressor_set_max_table_size(
- grpc_chttp2_hpack_compressor *c, uint32_t max_table_size) {
+ grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) {
max_table_size = GPR_MIN(max_table_size, c->max_usable_size);
if (max_table_size == c->max_table_size) {
return;
@@ -675,13 +674,13 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
}
}
-void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
- grpc_mdelem **extra_headers,
+void grpc_chttp2_encode_header(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
+ grpc_mdelem** extra_headers,
size_t extra_headers_size,
- grpc_metadata_batch *metadata,
- const grpc_encode_header_options *options,
- grpc_slice_buffer *outbuf) {
+ grpc_metadata_batch* metadata,
+ const grpc_encode_header_options* options,
+ grpc_slice_buffer* outbuf) {
GPR_ASSERT(options->stream_id != 0);
framer_state st;
@@ -705,7 +704,7 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
hpack_enc(exec_ctx, c, *extra_headers[i], &st);
}
grpc_metadata_batch_assert_ok(metadata);
- for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) {
+ for (grpc_linked_mdelem* l = metadata->list.head; l; l = l->next) {
hpack_enc(exec_ctx, c, l->md, &st);
}
grpc_millis deadline = metadata->deadline;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 16316b63f7..fd01d1621a 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -68,32 +68,32 @@ typedef struct {
uint32_t indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES];
uint32_t indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
- uint16_t *table_elem_size;
+ uint16_t* table_elem_size;
} grpc_chttp2_hpack_compressor;
-void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c);
-void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c);
+void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c);
+void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c);
void grpc_chttp2_hpack_compressor_set_max_table_size(
- grpc_chttp2_hpack_compressor *c, uint32_t max_table_size);
+ grpc_chttp2_hpack_compressor* c, uint32_t max_table_size);
void grpc_chttp2_hpack_compressor_set_max_usable_size(
- grpc_chttp2_hpack_compressor *c, uint32_t max_table_size);
+ grpc_chttp2_hpack_compressor* c, uint32_t max_table_size);
typedef struct {
uint32_t stream_id;
bool is_eof;
bool use_true_binary_metadata;
size_t max_frame_size;
- grpc_transport_one_way_stats *stats;
+ grpc_transport_one_way_stats* stats;
} grpc_encode_header_options;
-void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_compressor *c,
- grpc_mdelem **extra_headers,
+void grpc_chttp2_encode_header(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_compressor* c,
+ grpc_mdelem** extra_headers,
size_t extra_headers_size,
- grpc_metadata_batch *metadata,
- const grpc_encode_header_options *options,
- grpc_slice_buffer *outbuf);
+ grpc_metadata_batch* metadata,
+ const grpc_encode_header_options* options,
+ grpc_slice_buffer* outbuf);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.cc b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
index 7c17229122..1181402918 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
@@ -61,97 +61,97 @@ typedef enum {
a set of indirect jumps, and so not waste stack space. */
/* forward declarations for parsing states */
-static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end, grpc_error *error);
-static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-
-static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_value_string_with_indexed_key(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_value_string_with_literal_key(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-
-static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-
-static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end);
-static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
-static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end);
+static grpc_error* parse_begin(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_error(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end, grpc_error* error);
+static grpc_error* still_parse_error(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_illegal_op(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+
+static grpc_error* parse_string_prefix(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_key_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_value_string_with_indexed_key(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_value_string_with_literal_key(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+
+static grpc_error* parse_value0(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_value1(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_value2(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_value3(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_value4(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_value5up(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+
+static grpc_error* parse_indexed_field(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_indexed_field_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_lithdr_incidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_lithdr_incidx_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_lithdr_notidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_lithdr_notidx_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_lithdr_notidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_lithdr_nvridx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_lithdr_nvridx_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_lithdr_nvridx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end);
+static grpc_error* parse_max_tbl_size(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
+static grpc_error* parse_max_tbl_size_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end);
/* we translate the first byte of a hpack field into one of these decoding
cases, then use a lookup table to jump directly to the appropriate parser.
@@ -649,11 +649,11 @@ static const uint8_t inverse_base64[256] = {
};
/* emission helpers */
-static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
+static grpc_error* on_hdr(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p,
grpc_mdelem md, int add_to_table) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
- char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
- char *v = NULL;
+ char* k = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char* v = NULL;
if (grpc_is_binary_header(GRPC_MDKEY(md))) {
v = grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX);
} else {
@@ -671,7 +671,7 @@ static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
if (add_to_table) {
GPR_ASSERT(GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_INTERNED ||
GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_STATIC);
- grpc_error *err = grpc_chttp2_hptbl_add(exec_ctx, &p->table, md);
+ grpc_error* err = grpc_chttp2_hptbl_add(exec_ctx, &p->table, md);
if (err != GRPC_ERROR_NONE) return err;
}
if (p->on_header == NULL) {
@@ -682,9 +682,9 @@ static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
return GRPC_ERROR_NONE;
}
-static grpc_slice take_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- grpc_chttp2_hpack_parser_string *str,
+static grpc_slice take_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ grpc_chttp2_hpack_parser_string* str,
bool intern) {
grpc_slice s;
if (!str->copied) {
@@ -708,18 +708,18 @@ static grpc_slice take_string(grpc_exec_ctx *exec_ctx,
}
/* jump to the next state */
-static grpc_error *parse_next(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_next(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
p->state = *p->next_state++;
return p->state(exec_ctx, p, cur, end);
}
/* begin parsing a header: all functionality is encoded into lookup tables
above */
-static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_begin(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
if (cur == end) {
p->state = parse_begin;
return GRPC_ERROR_NONE;
@@ -729,9 +729,9 @@ static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx,
}
/* stream dependency and prioritization data: we just skip it */
-static grpc_error *parse_stream_weight(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_stream_weight(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (cur == end) {
p->state = parse_stream_weight;
return GRPC_ERROR_NONE;
@@ -740,9 +740,9 @@ static grpc_error *parse_stream_weight(grpc_exec_ctx *exec_ctx,
return p->after_prioritization(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep3(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_stream_dep3(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (cur == end) {
p->state = parse_stream_dep3;
return GRPC_ERROR_NONE;
@@ -751,9 +751,9 @@ static grpc_error *parse_stream_dep3(grpc_exec_ctx *exec_ctx,
return parse_stream_weight(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep2(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_stream_dep2(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (cur == end) {
p->state = parse_stream_dep2;
return GRPC_ERROR_NONE;
@@ -762,9 +762,9 @@ static grpc_error *parse_stream_dep2(grpc_exec_ctx *exec_ctx,
return parse_stream_dep3(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep1(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_stream_dep1(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (cur == end) {
p->state = parse_stream_dep1;
return GRPC_ERROR_NONE;
@@ -773,9 +773,9 @@ static grpc_error *parse_stream_dep1(grpc_exec_ctx *exec_ctx,
return parse_stream_dep2(exec_ctx, p, cur + 1, end);
}
-static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_stream_dep0(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (cur == end) {
p->state = parse_stream_dep0;
return GRPC_ERROR_NONE;
@@ -785,10 +785,10 @@ static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx,
}
/* emit an indexed field; jumps to begin the next field on completion */
-static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_indexed_field(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (GRPC_MDISNULL(md)) {
return grpc_error_set_int(
@@ -799,25 +799,25 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
}
GRPC_MDELEM_REF(md);
GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx);
- grpc_error *err = on_hdr(exec_ctx, p, md, 0);
+ grpc_error* err = on_hdr(exec_ctx, p, md, 0);
if (err != GRPC_ERROR_NONE) return err;
return parse_begin(exec_ctx, p, cur, end);
}
/* parse an indexed field with index < 127 */
-static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_indexed_field(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
p->dynamic_table_update_allowed = 0;
p->index = (*cur) & 0x7f;
return finish_indexed_field(exec_ctx, p, cur + 1, end);
}
/* parse an indexed field with index >= 127 */
-static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_indexed_field_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_indexed_field};
p->dynamic_table_update_allowed = 0;
@@ -828,14 +828,14 @@ static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx,
}
/* finish a literal header with incremental indexing */
-static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_lithdr_incidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx);
- grpc_error *err = on_hdr(
+ grpc_error* err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
take_string(exec_ctx, p, &p->value, true)),
@@ -845,12 +845,12 @@ static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx,
}
/* finish a literal header with incremental indexing with no index */
-static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx);
- grpc_error *err = on_hdr(
+ grpc_error* err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
take_string(exec_ctx, p, &p->value, true)),
@@ -860,9 +860,9 @@ static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header with incremental indexing; index < 63 */
-static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_lithdr_incidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_incidx};
p->dynamic_table_update_allowed = 0;
@@ -872,10 +872,10 @@ static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header with incremental indexing; index >= 63 */
-static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_lithdr_incidx_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_incidx};
@@ -887,10 +887,10 @@ static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header with incremental indexing; index = 0 */
-static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_incidx_v};
@@ -900,14 +900,14 @@ static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx,
}
/* finish a literal header without incremental indexing */
-static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_lithdr_notidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx);
- grpc_error *err = on_hdr(
+ grpc_error* err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
take_string(exec_ctx, p, &p->value, false)),
@@ -917,12 +917,12 @@ static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx,
}
/* finish a literal header without incremental indexing with index = 0 */
-static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_lithdr_notidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx);
- grpc_error *err = on_hdr(
+ grpc_error* err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
take_string(exec_ctx, p, &p->value, false)),
@@ -932,9 +932,9 @@ static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header without incremental indexing; index < 15 */
-static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_lithdr_notidx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_notidx};
p->dynamic_table_update_allowed = 0;
@@ -944,10 +944,10 @@ static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header without incremental indexing; index >= 15 */
-static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_lithdr_notidx_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_notidx};
@@ -959,10 +959,10 @@ static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header without incremental indexing; index == 0 */
-static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_lithdr_notidx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_notidx_v};
@@ -972,14 +972,14 @@ static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx,
}
/* finish a literal header that is never indexed */
-static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_lithdr_nvridx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx);
- grpc_error *err = on_hdr(
+ grpc_error* err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
take_string(exec_ctx, p, &p->value, false)),
@@ -989,12 +989,12 @@ static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
}
/* finish a literal header that is never indexed with an extra value */
-static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_lithdr_nvridx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx);
- grpc_error *err = on_hdr(
+ grpc_error* err = on_hdr(
exec_ctx, p,
grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
take_string(exec_ctx, p, &p->value, false)),
@@ -1004,9 +1004,9 @@ static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header that is never indexed; index < 15 */
-static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_lithdr_nvridx(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_nvridx};
p->dynamic_table_update_allowed = 0;
@@ -1016,10 +1016,10 @@ static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header that is never indexed; index >= 15 */
-static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_lithdr_nvridx_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_nvridx};
@@ -1031,10 +1031,10 @@ static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx,
}
/* parse a literal header that is never indexed; index == 0 */
-static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_lithdr_nvridx_v(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
@@ -1044,22 +1044,22 @@ static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx,
}
/* finish parsing a max table size change */
-static grpc_error *finish_max_tbl_size(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* finish_max_tbl_size(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
}
- grpc_error *err =
+ grpc_error* err =
grpc_chttp2_hptbl_set_current_table_size(exec_ctx, &p->table, p->index);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_begin(exec_ctx, p, cur, end);
}
/* parse a max table size change, max size < 15 */
-static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_max_tbl_size(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
exec_ctx, p, cur, end,
@@ -1072,10 +1072,10 @@ static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
}
/* parse a max table size change, max size >= 15 */
-static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_max_tbl_size_x(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur,
+ const uint8_t* end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_max_tbl_size};
if (p->dynamic_table_update_allowed == 0) {
@@ -1092,9 +1092,9 @@ static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
}
/* a parse error: jam the parse state into parse_error, and return error */
-static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end, grpc_error *err) {
+static grpc_error* parse_error(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end, grpc_error* err) {
GPR_ASSERT(err != GRPC_ERROR_NONE);
if (p->last_error == GRPC_ERROR_NONE) {
p->last_error = GRPC_ERROR_REF(err);
@@ -1103,28 +1103,28 @@ static grpc_error *parse_error(grpc_exec_ctx *exec_ctx,
return err;
}
-static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* still_parse_error(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
return GRPC_ERROR_REF(p->last_error);
}
-static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_illegal_op(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
GPR_ASSERT(cur != end);
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Illegal hpack op code %d", *cur);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
/* parse the 1st byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value0(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
if (cur == end) {
p->state = parse_value0;
return GRPC_ERROR_NONE;
@@ -1141,9 +1141,9 @@ static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx,
/* parse the 2nd byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value1(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
if (cur == end) {
p->state = parse_value1;
return GRPC_ERROR_NONE;
@@ -1160,9 +1160,9 @@ static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx,
/* parse the 3rd byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value2(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
if (cur == end) {
p->state = parse_value2;
return GRPC_ERROR_NONE;
@@ -1179,9 +1179,9 @@ static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx,
/* parse the 4th byte of a varint into p->parsing.value
no overflow is possible */
-static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value3(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
if (cur == end) {
p->state = parse_value3;
return GRPC_ERROR_NONE;
@@ -1198,13 +1198,13 @@ static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx,
/* parse the 5th byte of a varint into p->parsing.value
depending on the byte, we may overflow, and care must be taken */
-static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value4(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
uint8_t c;
uint32_t cur_value;
uint32_t add_value;
- char *msg;
+ char* msg;
if (cur == end) {
p->state = parse_value4;
@@ -1235,7 +1235,7 @@ error:
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x on byte 5",
*p->parsing.value, *cur);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1243,9 +1243,9 @@ error:
/* parse any trailing bytes in a varint: it's possible to append an arbitrary
number of 0x80's and not affect the value - a zero will terminate - and
anything else will overflow */
-static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_value5up(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
while (cur != end && *cur == 0x80) {
++cur;
}
@@ -1259,20 +1259,20 @@ static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
return parse_next(exec_ctx, p, cur + 1, end);
}
- char *msg;
+ char* msg;
gpr_asprintf(&msg,
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x sometime after byte 5",
*p->parsing.value, *cur);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
/* parse a string prefix */
-static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_string_prefix(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (cur == end) {
p->state = parse_string_prefix;
return GRPC_ERROR_NONE;
@@ -1289,24 +1289,24 @@ static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx,
}
/* append some bytes to a string */
-static void append_bytes(grpc_chttp2_hpack_parser_string *str,
- const uint8_t *data, size_t length) {
+static void append_bytes(grpc_chttp2_hpack_parser_string* str,
+ const uint8_t* data, size_t length) {
if (length == 0) return;
if (length + str->data.copied.length > str->data.copied.capacity) {
GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX);
str->data.copied.capacity = (uint32_t)(str->data.copied.length + length);
str->data.copied.str =
- (char *)gpr_realloc(str->data.copied.str, str->data.copied.capacity);
+ (char*)gpr_realloc(str->data.copied.str, str->data.copied.capacity);
}
memcpy(str->data.copied.str + str->data.copied.length, data, length);
GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length);
str->data.copied.length += (uint32_t)length;
}
-static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
- grpc_chttp2_hpack_parser_string *str = p->parsing.str;
+static grpc_error* append_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
+ grpc_chttp2_hpack_parser_string* str = p->parsing.str;
uint32_t bits;
uint8_t decoded[3];
switch ((binary_state)p->binary) {
@@ -1403,12 +1403,12 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here")));
}
-static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* finish_str(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
uint8_t decoded[2];
uint32_t bits;
- grpc_chttp2_hpack_parser_string *str = p->parsing.str;
+ grpc_chttp2_hpack_parser_string* str = p->parsing.str;
switch ((binary_state)p->binary) {
case NOT_BINARY:
break;
@@ -1423,10 +1423,10 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
case B64_BYTE2:
bits = p->base64_buffer;
if (bits & 0xffff) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%04x",
bits & 0xffff);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1436,10 +1436,10 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
case B64_BYTE3:
bits = p->base64_buffer;
if (bits & 0xff) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%02x",
bits & 0xff);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1452,14 +1452,14 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
}
/* decode a nibble from a huffman encoded stream */
-static grpc_error *huff_nibble(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, uint8_t nibble) {
+static grpc_error* huff_nibble(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, uint8_t nibble) {
int16_t emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble];
int16_t next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
if (emit != -1) {
if (emit >= 0 && emit < 256) {
uint8_t c = (uint8_t)emit;
- grpc_error *err = append_string(exec_ctx, p, &c, (&c) + 1);
+ grpc_error* err = append_string(exec_ctx, p, &c, (&c) + 1);
if (err != GRPC_ERROR_NONE) return err;
} else {
assert(emit == 256);
@@ -1470,11 +1470,11 @@ static grpc_error *huff_nibble(grpc_exec_ctx *exec_ctx,
}
/* decode full bytes from a huffman encoded stream */
-static grpc_error *add_huff_bytes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* add_huff_bytes(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
for (; cur != end; ++cur) {
- grpc_error *err = huff_nibble(exec_ctx, p, *cur >> 4);
+ grpc_error* err = huff_nibble(exec_ctx, p, *cur >> 4);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
err = huff_nibble(exec_ctx, p, *cur & 0xf);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
@@ -1484,9 +1484,9 @@ static grpc_error *add_huff_bytes(grpc_exec_ctx *exec_ctx,
/* decode some string bytes based on the current decoding mode
(huffman or not) */
-static grpc_error *add_str_bytes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* add_str_bytes(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
if (p->huff) {
return add_huff_bytes(exec_ctx, p, cur, end);
} else {
@@ -1495,19 +1495,19 @@ static grpc_error *add_str_bytes(grpc_exec_ctx *exec_ctx,
}
/* parse a string - tries to do large chunks at a time */
-static grpc_error *parse_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
size_t remaining = p->strlen - p->strgot;
size_t given = (size_t)(end - cur);
if (remaining <= given) {
- grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + remaining);
+ grpc_error* err = add_str_bytes(exec_ctx, p, cur, cur + remaining);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
err = finish_str(exec_ctx, p, cur + remaining, end);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_next(exec_ctx, p, cur + remaining, end);
} else {
- grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + given);
+ grpc_error* err = add_str_bytes(exec_ctx, p, cur, cur + given);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
GPR_ASSERT(given <= UINT32_MAX - p->strgot);
p->strgot += (uint32_t)given;
@@ -1517,17 +1517,17 @@ static grpc_error *parse_string(grpc_exec_ctx *exec_ctx,
}
/* begin parsing a string - performs setup, calls parse_string */
-static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end,
+static grpc_error* begin_parse_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end,
uint8_t binary,
- grpc_chttp2_hpack_parser_string *str) {
+ grpc_chttp2_hpack_parser_string* str) {
if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen &&
p->current_slice_refcount != NULL) {
GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
str->copied = false;
str->data.referenced.refcount = p->current_slice_refcount;
- str->data.referenced.data.refcounted.bytes = (uint8_t *)cur;
+ str->data.referenced.data.refcounted.bytes = (uint8_t*)cur;
str->data.referenced.data.refcounted.length = p->strlen;
grpc_slice_ref_internal(str->data.referenced);
return parse_next(exec_ctx, p, cur + p->strlen, end);
@@ -1556,23 +1556,23 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx,
}
/* parse the key string */
-static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end) {
+static grpc_error* parse_key_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end) {
return begin_parse_string(exec_ctx, p, cur, end, NOT_BINARY, &p->key);
}
/* check if a key represents a binary header or not */
-static bool is_binary_literal_header(grpc_chttp2_hpack_parser *p) {
+static bool is_binary_literal_header(grpc_chttp2_hpack_parser* p) {
return grpc_is_binary_header(
p->key.copied ? grpc_slice_from_static_buffer(p->key.data.copied.str,
p->key.data.copied.length)
: p->key.data.referenced);
}
-static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
- bool *is) {
+static grpc_error* is_binary_indexed_header(grpc_chttp2_hpack_parser* p,
+ bool* is) {
grpc_mdelem elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (GRPC_MDISNULL(elem)) {
return grpc_error_set_int(
@@ -1586,33 +1586,33 @@ static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
}
/* parse the value string */
-static grpc_error *parse_value_string(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
- const uint8_t *cur, const uint8_t *end,
+static grpc_error* parse_value_string(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
+ const uint8_t* cur, const uint8_t* end,
bool is_binary) {
return begin_parse_string(exec_ctx, p, cur, end,
is_binary ? BINARY_BEGIN : NOT_BINARY, &p->value);
}
-static grpc_error *parse_value_string_with_indexed_key(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value_string_with_indexed_key(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
bool is_binary = false;
- grpc_error *err = is_binary_indexed_header(p, &is_binary);
+ grpc_error* err = is_binary_indexed_header(p, &is_binary);
if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
return parse_value_string(exec_ctx, p, cur, end, is_binary);
}
-static grpc_error *parse_value_string_with_literal_key(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur,
- const uint8_t *end) {
+static grpc_error* parse_value_string_with_literal_key(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+ const uint8_t* end) {
return parse_value_string(exec_ctx, p, cur, end, is_binary_literal_header(p));
}
/* PUBLIC INTERFACE */
-void grpc_chttp2_hpack_parser_init(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p) {
+void grpc_chttp2_hpack_parser_init(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p) {
p->on_header = NULL;
p->on_header_user_data = NULL;
p->state = parse_begin;
@@ -1629,13 +1629,13 @@ void grpc_chttp2_hpack_parser_init(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hptbl_init(exec_ctx, &p->table);
}
-void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p) {
+void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p) {
p->after_prioritization = p->state;
p->state = parse_stream_dep0;
}
-void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p) {
+void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p) {
grpc_chttp2_hptbl_destroy(exec_ctx, &p->table);
GRPC_ERROR_UNREF(p->last_error);
grpc_slice_unref_internal(exec_ctx, p->key.data.referenced);
@@ -1644,18 +1644,18 @@ void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(p->value.data.copied.str);
}
-grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
+grpc_error* grpc_chttp2_hpack_parser_parse(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
grpc_slice slice) {
/* max number of bytes to parse at a time... limits call stack depth on
* compilers without TCO */
#define MAX_PARSE_LENGTH 1024
p->current_slice_refcount = slice.refcount;
- uint8_t *start = GRPC_SLICE_START_PTR(slice);
- uint8_t *end = GRPC_SLICE_END_PTR(slice);
- grpc_error *error = GRPC_ERROR_NONE;
+ uint8_t* start = GRPC_SLICE_START_PTR(slice);
+ uint8_t* end = GRPC_SLICE_END_PTR(slice);
+ grpc_error* error = GRPC_ERROR_NONE;
while (start != end && error == GRPC_ERROR_NONE) {
- uint8_t *target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
+ uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
error = p->state(exec_ctx, p, start, target);
start = target;
}
@@ -1663,17 +1663,17 @@ grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
return error;
}
-typedef void (*maybe_complete_func_type)(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
+typedef void (*maybe_complete_func_type)(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
static const maybe_complete_func_type maybe_complete_funcs[] = {
grpc_chttp2_maybe_complete_recv_initial_metadata,
grpc_chttp2_maybe_complete_recv_trailing_metadata};
-static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
- grpc_error *error) {
- grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
- grpc_chttp2_transport *t = s->t;
+static void force_client_rst_stream(grpc_exec_ctx* exec_ctx, void* sp,
+ grpc_error* error) {
+ grpc_chttp2_stream* s = (grpc_chttp2_stream*)sp;
+ grpc_chttp2_transport* t = s->t;
if (!s->write_closed) {
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
@@ -1685,10 +1685,10 @@ static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
}
-static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- grpc_metadata_batch *initial_metadata) {
+static void parse_stream_compression_md(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
+ grpc_metadata_batch* initial_metadata) {
if (initial_metadata->idx.named.content_encoding == NULL ||
grpc_stream_compression_method_parse(
GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false,
@@ -1698,17 +1698,17 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
}
}
-grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
- void *hpack_parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_header_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* hpack_parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
- grpc_chttp2_hpack_parser *parser = (grpc_chttp2_hpack_parser *)hpack_parser;
+ grpc_chttp2_hpack_parser* parser = (grpc_chttp2_hpack_parser*)hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (s != NULL) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
}
- grpc_error *error = grpc_chttp2_hpack_parser_parse(exec_ctx, parser, slice);
+ grpc_error* error = grpc_chttp2_hpack_parser_parse(exec_ctx, parser, slice);
if (error != GRPC_ERROR_NONE) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return error;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 52014175a0..838c482e4a 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -33,16 +33,16 @@ extern "C" {
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
-typedef grpc_error *(*grpc_chttp2_hpack_parser_state)(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg,
- const uint8_t *end);
+typedef grpc_error* (*grpc_chttp2_hpack_parser_state)(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* beg,
+ const uint8_t* end);
typedef struct {
bool copied;
struct {
grpc_slice referenced;
struct {
- char *str;
+ char* str;
uint32_t length;
uint32_t capacity;
} copied;
@@ -51,23 +51,23 @@ typedef struct {
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
- void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem md);
- void *on_header_user_data;
+ void (*on_header)(grpc_exec_ctx* exec_ctx, void* user_data, grpc_mdelem md);
+ void* on_header_user_data;
- grpc_error *last_error;
+ grpc_error* last_error;
/* current parse state - or a function that implements it */
grpc_chttp2_hpack_parser_state state;
/* future states dependent on the opening op code */
- const grpc_chttp2_hpack_parser_state *next_state;
+ const grpc_chttp2_hpack_parser_state* next_state;
/* what to do after skipping prioritization data */
grpc_chttp2_hpack_parser_state after_prioritization;
/* the refcount of the slice that we're currently parsing */
- grpc_slice_refcount *current_slice_refcount;
+ grpc_slice_refcount* current_slice_refcount;
/* the value we're currently parsing */
union {
- uint32_t *value;
- grpc_chttp2_hpack_parser_string *str;
+ uint32_t* value;
+ grpc_chttp2_hpack_parser_string* str;
} parsing;
/* string parameters for each chunk */
grpc_chttp2_hpack_parser_string key;
@@ -96,23 +96,23 @@ struct grpc_chttp2_hpack_parser {
grpc_chttp2_hptbl table;
};
-void grpc_chttp2_hpack_parser_init(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p);
-void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p);
+void grpc_chttp2_hpack_parser_init(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p);
+void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p);
-void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p);
+void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p);
-grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hpack_parser *p,
+grpc_error* grpc_chttp2_hpack_parser_parse(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hpack_parser* p,
grpc_slice slice);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
-grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
- void *hpack_parser,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+grpc_error* grpc_chttp2_header_parser_parse(grpc_exec_ctx* exec_ctx,
+ void* hpack_parser,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
#ifdef __cplusplus
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.cc b/src/core/ext/transport/chttp2/transport/hpack_table.cc
index 82c284b36e..1a2bc9224e 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.cc
@@ -31,8 +31,8 @@
extern "C" grpc_tracer_flag grpc_http_trace;
static struct {
- const char *key;
- const char *value;
+ const char* key;
+ const char* value;
} static_table[] = {
/* 0: */
{NULL, NULL},
@@ -165,7 +165,7 @@ static uint32_t entries_for_bytes(uint32_t bytes) {
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
}
-void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
+void grpc_chttp2_hptbl_init(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl) {
size_t i;
memset(tbl, 0, sizeof(*tbl));
@@ -173,7 +173,7 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
tbl->max_entries = tbl->cap_entries =
entries_for_bytes(tbl->current_table_bytes);
- tbl->ents = (grpc_mdelem *)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
+ tbl->ents = (grpc_mdelem*)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
@@ -184,8 +184,8 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
}
}
-void grpc_chttp2_hptbl_destroy(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl) {
+void grpc_chttp2_hptbl_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl) {
size_t i;
for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
GRPC_MDELEM_UNREF(exec_ctx, tbl->static_ents[i]);
@@ -197,7 +197,7 @@ void grpc_chttp2_hptbl_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(tbl->ents);
}
-grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
+grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl* tbl,
uint32_t tbl_index) {
/* Static table comes first, just return an entry from it */
if (tbl_index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
@@ -215,7 +215,7 @@ grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
}
/* Evict one element from the table */
-static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
+static void evict1(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl) {
grpc_mdelem first_ent = tbl->ents[tbl->first_ent];
size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(first_ent)) +
GRPC_SLICE_LENGTH(GRPC_MDVALUE(first_ent)) +
@@ -227,8 +227,8 @@ static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
GRPC_MDELEM_UNREF(exec_ctx, first_ent);
}
-static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
- grpc_mdelem *ents = (grpc_mdelem *)gpr_malloc(sizeof(*ents) * new_cap);
+static void rebuild_ents(grpc_chttp2_hptbl* tbl, uint32_t new_cap) {
+ grpc_mdelem* ents = (grpc_mdelem*)gpr_malloc(sizeof(*ents) * new_cap);
uint32_t i;
for (i = 0; i < tbl->num_ents; i++) {
@@ -240,8 +240,8 @@ static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
tbl->first_ent = 0;
}
-void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl,
+void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl,
uint32_t max_bytes) {
if (tbl->max_bytes == max_bytes) {
return;
@@ -255,18 +255,18 @@ void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx *exec_ctx,
tbl->max_bytes = max_bytes;
}
-grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl,
+grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl,
uint32_t bytes) {
if (tbl->current_table_bytes == bytes) {
return GRPC_ERROR_NONE;
}
if (bytes > tbl->max_bytes) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg,
"Attempt to make hpack table %d bytes when max is %d bytes",
bytes, tbl->max_bytes);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -289,21 +289,21 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl, grpc_mdelem md) {
+grpc_error* grpc_chttp2_hptbl_add(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl, grpc_mdelem md) {
/* determine how many bytes of buffer this entry represents */
size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(md)) +
GRPC_SLICE_LENGTH(GRPC_MDVALUE(md)) +
GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
if (tbl->current_table_bytes > tbl->max_bytes) {
- char *msg;
+ char* msg;
gpr_asprintf(
&msg,
"HPACK max table size reduced to %d but not reflected by hpack "
"stream (still at %d)",
tbl->max_bytes, tbl->current_table_bytes);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -341,7 +341,7 @@ grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
}
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
- const grpc_chttp2_hptbl *tbl, grpc_mdelem md) {
+ const grpc_chttp2_hptbl* tbl, grpc_mdelem md) {
grpc_chttp2_hptbl_find_result r = {0, 0};
uint32_t i;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.h b/src/core/ext/transport/chttp2/transport/hpack_table.h
index a3ce2730a8..ddc8888f86 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.h
@@ -68,26 +68,26 @@ typedef struct {
/* a circular buffer of headers - this is stored in the opposite order to
what hpack specifies, in order to simplify table management a little...
meaning lookups need to SUBTRACT from the end position */
- grpc_mdelem *ents;
+ grpc_mdelem* ents;
grpc_mdelem static_ents[GRPC_CHTTP2_LAST_STATIC_ENTRY];
} grpc_chttp2_hptbl;
/* initialize a hpack table */
-void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl);
-void grpc_chttp2_hptbl_destroy(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl);
-void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl,
+void grpc_chttp2_hptbl_init(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl);
+void grpc_chttp2_hptbl_destroy(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl);
+void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl,
uint32_t max_bytes);
-grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl,
+grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl,
uint32_t bytes);
/* lookup a table entry based on its hpack index */
-grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
+grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl* tbl,
uint32_t index);
/* add a table entry to the index */
-grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_hptbl *tbl,
+grpc_error* grpc_chttp2_hptbl_add(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_hptbl* tbl,
grpc_mdelem md) GRPC_MUST_USE_RESULT;
/* Find a key/value pair in the table... returns the index in the table of the
most similar entry, or 0 if the value was not found */
@@ -96,7 +96,7 @@ typedef struct {
int has_value;
} grpc_chttp2_hptbl_find_result;
grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
- const grpc_chttp2_hptbl *tbl, grpc_mdelem md);
+ const grpc_chttp2_hptbl* tbl, grpc_mdelem md);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/http2_settings.cc b/src/core/ext/transport/chttp2/transport/http2_settings.cc
index 46b7c0c49c..0aab864400 100644
--- a/src/core/ext/transport/chttp2/transport/http2_settings.cc
+++ b/src/core/ext/transport/chttp2/transport/http2_settings.cc
@@ -25,7 +25,7 @@
const uint16_t grpc_setting_id_to_wire_id[] = {1, 2, 3, 4, 5, 6, 65027};
-bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) {
+bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id* out) {
uint32_t i = wire_id - 1;
uint32_t x = i % 256;
uint32_t y = i / 256;
diff --git a/src/core/ext/transport/chttp2/transport/http2_settings.h b/src/core/ext/transport/chttp2/transport/http2_settings.h
index 0f76106dce..86069b498b 100644
--- a/src/core/ext/transport/chttp2/transport/http2_settings.h
+++ b/src/core/ext/transport/chttp2/transport/http2_settings.h
@@ -41,7 +41,7 @@ extern "C" {
#endif
extern const uint16_t grpc_setting_id_to_wire_id[];
-bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);
+bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id* out);
typedef enum {
GRPC_CHTTP2_CLAMP_INVALID_VALUE,
@@ -49,7 +49,7 @@ typedef enum {
} grpc_chttp2_invalid_value_behavior;
typedef struct {
- const char *name;
+ const char* name;
uint32_t default_value;
uint32_t min_value;
uint32_t max_value;
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index 187ce0ea87..15f80fb8a1 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -26,31 +26,32 @@
#include <grpc/support/log.h>
void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) {
+ grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena) {
buffer->arena = arena;
grpc_metadata_batch_init(&buffer->batch);
buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE;
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer) {
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer) {
grpc_metadata_batch_destroy(exec_ctx, &buffer->batch);
}
-grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
return grpc_metadata_batch_add_tail(
- exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc(
- buffer->arena, sizeof(grpc_linked_mdelem)),
+ exec_ctx, &buffer->batch,
+ (grpc_linked_mdelem*)gpr_arena_alloc(buffer->arena,
+ sizeof(grpc_linked_mdelem)),
elem);
}
-grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+grpc_error* grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
grpc_mdelem elem) {
- for (grpc_linked_mdelem *l = buffer->batch.list.head; l != NULL;
+ for (grpc_linked_mdelem* l = buffer->batch.list.head; l != NULL;
l = l->next) {
if (grpc_slice_eq(GRPC_MDKEY(l->md), GRPC_MDKEY(elem))) {
GRPC_MDELEM_UNREF(exec_ctx, l->md);
@@ -62,13 +63,13 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
}
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline) {
+ grpc_chttp2_incoming_metadata_buffer* buffer, grpc_millis deadline) {
buffer->batch.deadline = deadline;
}
void grpc_chttp2_incoming_metadata_buffer_publish(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
- grpc_metadata_batch *batch) {
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
+ grpc_metadata_batch* batch) {
*batch = buffer->batch;
grpc_metadata_batch_init(&buffer->batch);
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index a0e01f2c4d..7ccb4a0126 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -26,28 +26,28 @@ extern "C" {
#endif
typedef struct {
- gpr_arena *arena;
+ gpr_arena* arena;
grpc_metadata_batch batch;
size_t size; // total size of metadata
} grpc_chttp2_incoming_metadata_buffer;
/** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena);
+ grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena);
void grpc_chttp2_incoming_metadata_buffer_destroy(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer);
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer);
void grpc_chttp2_incoming_metadata_buffer_publish(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
- grpc_metadata_batch *batch);
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
+ grpc_metadata_batch* batch);
-grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
grpc_mdelem elem) GRPC_MUST_USE_RESULT;
-grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+grpc_error* grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
grpc_mdelem elem) GRPC_MUST_USE_RESULT;
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_millis deadline);
+ grpc_chttp2_incoming_metadata_buffer* buffer, grpc_millis deadline);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 9e0796e820..a5a0a804a2 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -100,7 +100,7 @@ typedef enum {
GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM,
} grpc_chttp2_initiate_write_reason;
-const char *grpc_chttp2_initiate_write_reason_string(
+const char* grpc_chttp2_initiate_write_reason_string(
grpc_chttp2_initiate_write_reason reason);
typedef struct {
@@ -171,13 +171,13 @@ typedef enum {
} grpc_chttp2_deframe_transport_state;
typedef struct {
- grpc_chttp2_stream *head;
- grpc_chttp2_stream *tail;
+ grpc_chttp2_stream* head;
+ grpc_chttp2_stream* tail;
} grpc_chttp2_stream_list;
typedef struct {
- grpc_chttp2_stream *next;
- grpc_chttp2_stream *prev;
+ grpc_chttp2_stream* next;
+ grpc_chttp2_stream* prev;
} grpc_chttp2_stream_link;
/* We keep several sets of connection wide parameters */
@@ -201,8 +201,8 @@ typedef enum {
typedef struct grpc_chttp2_write_cb {
int64_t call_at_byte;
- grpc_closure *closure;
- struct grpc_chttp2_write_cb *next;
+ grpc_closure* closure;
+ struct grpc_chttp2_write_cb* next;
} grpc_chttp2_write_cb;
/* forward declared in frame_data.h */
@@ -210,8 +210,8 @@ struct grpc_chttp2_incoming_byte_stream {
grpc_byte_stream base;
gpr_refcount refs;
- grpc_chttp2_transport *transport; /* immutable */
- grpc_chttp2_stream *stream; /* immutable */
+ grpc_chttp2_transport* transport; /* immutable */
+ grpc_chttp2_stream* stream; /* immutable */
/* Accessed only by transport thread when stream->pending_byte_stream == false
* Accessed only by application thread when stream->pending_byte_stream ==
@@ -224,7 +224,7 @@ struct grpc_chttp2_incoming_byte_stream {
struct {
grpc_closure closure;
size_t max_size_hint;
- grpc_closure *on_complete;
+ grpc_closure* on_complete;
} next_action;
grpc_closure destroy_action;
grpc_closure finished_action;
@@ -240,10 +240,10 @@ typedef enum {
struct grpc_chttp2_transport {
grpc_transport base; /* must be first */
gpr_refcount refs;
- grpc_endpoint *ep;
- char *peer_string;
+ grpc_endpoint* ep;
+ char* peer_string;
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
@@ -255,7 +255,7 @@ struct grpc_chttp2_transport {
/** is the transport destroying itself? */
uint8_t destroying;
/** has the upper layer closed the transport? */
- grpc_error *closed_with_error;
+ grpc_error* closed_with_error;
/** is there a read request to the endpoint outstanding? */
uint8_t endpoint_reading;
@@ -280,13 +280,13 @@ struct grpc_chttp2_transport {
/** address to place a newly accepted stream - set and unset by
grpc_chttp2_parsing_accept_stream; used by init_stream to
publish the accepted server stream */
- grpc_chttp2_stream **accepting_stream;
+ grpc_chttp2_stream** accepting_stream;
struct {
/* accept stream callback */
- void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_transport *transport, const void *server_data);
- void *accept_stream_user_data;
+ void (*accept_stream)(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_transport* transport, const void* server_data);
+ void* accept_stream_user_data;
/** connectivity tracking */
grpc_connectivity_state_tracker state_tracker;
@@ -337,7 +337,7 @@ struct grpc_chttp2_transport {
/** ping acks */
size_t ping_ack_count;
size_t ping_ack_capacity;
- uint64_t *ping_acks;
+ uint64_t* ping_acks;
grpc_chttp2_server_ping_recv_state ping_recv_state;
/** parser for headers */
@@ -370,10 +370,10 @@ struct grpc_chttp2_transport {
uint32_t incoming_stream_id;
/* active parser */
- void *parser_data;
- grpc_chttp2_stream *incoming_stream;
- grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+ void* parser_data;
+ grpc_chttp2_stream* incoming_stream;
+ grpc_error* (*parser)(grpc_exec_ctx* exec_ctx, void* parser_user_data,
+ grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
/* goaway data */
@@ -381,7 +381,7 @@ struct grpc_chttp2_transport {
uint32_t goaway_last_stream_index;
grpc_slice goaway_text;
- grpc_chttp2_write_cb *write_cb_pool;
+ grpc_chttp2_write_cb* write_cb_pool;
/* bdp estimator */
grpc_closure next_bdp_ping_timer_expired_locked;
@@ -390,7 +390,7 @@ struct grpc_chttp2_transport {
/* if non-NULL, close the transport with this error when writes are finished
*/
- grpc_error *close_transport_on_writes_finished;
+ grpc_error* close_transport_on_writes_finished;
/* a list of closures to run after writes are finished */
grpc_closure_list run_after_write;
@@ -440,11 +440,11 @@ typedef enum {
} grpc_published_metadata_method;
struct grpc_chttp2_stream {
- grpc_chttp2_transport *t;
- grpc_stream_refcount *refcount;
+ grpc_chttp2_transport* t;
+ grpc_stream_refcount* refcount;
grpc_closure destroy_stream;
- grpc_closure *destroy_stream_arg;
+ grpc_closure* destroy_stream_arg;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
@@ -453,29 +453,29 @@ struct grpc_chttp2_stream {
uint32_t id;
/** things the upper layers would like to send */
- grpc_metadata_batch *send_initial_metadata;
- grpc_closure *send_initial_metadata_finished;
- grpc_metadata_batch *send_trailing_metadata;
- grpc_closure *send_trailing_metadata_finished;
+ grpc_metadata_batch* send_initial_metadata;
+ grpc_closure* send_initial_metadata_finished;
+ grpc_metadata_batch* send_trailing_metadata;
+ grpc_closure* send_trailing_metadata_finished;
- grpc_byte_stream *fetching_send_message;
+ grpc_byte_stream* fetching_send_message;
uint32_t fetched_send_message_length;
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
int64_t flow_controlled_bytes_flowed;
grpc_closure complete_fetch_locked;
- grpc_closure *fetching_send_message_finished;
+ grpc_closure* fetching_send_message_finished;
- grpc_metadata_batch *recv_initial_metadata;
- grpc_closure *recv_initial_metadata_ready;
- bool *trailing_metadata_available;
- grpc_byte_stream **recv_message;
- grpc_closure *recv_message_ready;
- grpc_metadata_batch *recv_trailing_metadata;
- grpc_closure *recv_trailing_metadata_finished;
+ grpc_metadata_batch* recv_initial_metadata;
+ grpc_closure* recv_initial_metadata_ready;
+ bool* trailing_metadata_available;
+ grpc_byte_stream** recv_message;
+ grpc_closure* recv_message_ready;
+ grpc_metadata_batch* recv_trailing_metadata;
+ grpc_closure* recv_trailing_metadata_finished;
- grpc_transport_stream_stats *collecting_stats;
+ grpc_transport_stream_stats* collecting_stats;
grpc_transport_stream_stats stats;
/** Is this stream closed for writing. */
@@ -494,9 +494,9 @@ struct grpc_chttp2_stream {
bool received_trailing_metadata;
/** the error that resulted in this stream being read-closed */
- grpc_error *read_closed_error;
+ grpc_error* read_closed_error;
/** the error that resulted in this stream being write-closed */
- grpc_error *write_closed_error;
+ grpc_error* write_closed_error;
grpc_published_metadata_method published_metadata[2];
bool final_metadata_requested;
@@ -509,16 +509,16 @@ struct grpc_chttp2_stream {
* Accessed only by application thread when stream->pending_byte_stream ==
* true */
grpc_slice_buffer unprocessed_incoming_frames_buffer;
- grpc_closure *on_next; /* protected by t combiner */
+ grpc_closure* on_next; /* protected by t combiner */
bool pending_byte_stream; /* protected by t combiner */
grpc_closure reset_byte_stream;
- grpc_error *byte_stream_error; /* protected by t combiner */
+ grpc_error* byte_stream_error; /* protected by t combiner */
bool received_last_frame; /* protected by t combiner */
grpc_millis deadline;
/** saw some stream level error */
- grpc_error *forced_close_error;
+ grpc_error* forced_close_error;
/** how many header frames have we received? */
uint8_t header_frames_received;
/** parsing state for data frames */
@@ -537,9 +537,9 @@ struct grpc_chttp2_stream {
grpc_slice_buffer flow_controlled_buffer;
- grpc_chttp2_write_cb *on_flow_controlled_cbs;
- grpc_chttp2_write_cb *on_write_finished_cbs;
- grpc_chttp2_write_cb *finish_after_write;
+ grpc_chttp2_write_cb* on_flow_controlled_cbs;
+ grpc_chttp2_write_cb* on_write_finished_cbs;
+ grpc_chttp2_write_cb* finish_after_write;
size_t sending_bytes;
/* Stream compression method to be used. */
@@ -547,9 +547,9 @@ struct grpc_chttp2_stream {
/* Stream decompression method to be used. */
grpc_stream_compression_method stream_decompression_method;
/** Stream compression decompress context */
- grpc_stream_compression_context *stream_decompression_ctx;
+ grpc_stream_compression_context* stream_decompression_ctx;
/** Stream compression compress context */
- grpc_stream_compression_context *stream_compression_ctx;
+ grpc_stream_compression_context* stream_compression_ctx;
/** Buffer storing data that is compressed but not sent */
grpc_slice_buffer compressed_data_buffer;
@@ -577,8 +577,8 @@ struct grpc_chttp2_stream {
The actual call chain is documented in the implementation of this function.
*/
-void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+void grpc_chttp2_initiate_write(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_chttp2_initiate_write_reason reason);
typedef struct {
@@ -591,85 +591,85 @@ typedef struct {
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
-void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error);
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t);
+void grpc_chttp2_end_write(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
-grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+grpc_error* grpc_chttp2_perform_read(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_slice slice);
-bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
+bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
/** Get a writable stream
returns non-zero if there was a stream available */
-bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s);
-bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-
-bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t);
-bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s);
-
-void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-bool grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s);
-
-void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s);
-void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-
-void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s);
-void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-
-void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s);
-bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
+bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s);
+bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+
+bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport* t);
+bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s);
+
+void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+bool grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s);
+
+void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s);
+void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+
+void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s);
+void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+
+void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s);
+bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
/********* Flow Control ***************/
// Takes in a flow control action and performs all the needed operations.
void grpc_chttp2_act_on_flowctl_action(
- grpc_exec_ctx *exec_ctx, const grpc_core::chttp2::FlowControlAction &action,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s);
+ grpc_exec_ctx* exec_ctx, const grpc_core::chttp2::FlowControlAction& action,
+ grpc_chttp2_transport* t, grpc_chttp2_stream* s);
/********* End of Flow Control ***************/
-grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
+grpc_chttp2_stream* grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport* t,
uint32_t id);
-grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
uint32_t id);
-void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text);
-void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
+void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
-void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
- grpc_closure **pclosure,
- grpc_error *error, const char *desc);
+void grpc_chttp2_complete_closure_step(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
+ grpc_closure** pclosure,
+ grpc_error* error, const char* desc);
#define GRPC_HEADER_SIZE_IN_BYTES 5
#define MAX_SIZE_T (~(size_t)0)
@@ -685,31 +685,31 @@ extern grpc_tracer_flag grpc_flowctl_trace;
if (!(GRPC_TRACER_ON(grpc_http_trace))) \
; \
else \
- stmt
+ stmt
-void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *stream, grpc_error *error);
-void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, int close_reads,
- int close_writes, grpc_error *error);
-void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
+void grpc_chttp2_fake_status(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* stream, grpc_error* error);
+void grpc_chttp2_mark_stream_closed(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, int close_reads,
+ int close_writes, grpc_error* error);
+void grpc_chttp2_start_writing(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
#ifndef NDEBUG
#define GRPC_CHTTP2_STREAM_REF(stream, reason) \
grpc_chttp2_stream_ref(stream, reason)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream, reason)
-void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason);
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s,
- const char *reason);
+void grpc_chttp2_stream_ref(grpc_chttp2_stream* s, const char* reason);
+void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s,
+ const char* reason);
#else
#define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_chttp2_stream_unref(exec_ctx, stream)
-void grpc_chttp2_stream_ref(grpc_chttp2_stream *s);
-void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
+void grpc_chttp2_stream_ref(grpc_chttp2_stream* s);
+void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s);
#endif
#ifndef NDEBUG
@@ -717,69 +717,69 @@ void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s);
grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
-void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, const char *reason,
- const char *file, int line);
-void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason,
- const char *file, int line);
+void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, const char* reason,
+ const char* file, int line);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
+ const char* file, int line);
#else
#define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
-void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-void grpc_chttp2_ref_transport(grpc_chttp2_transport *t);
+void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+void grpc_chttp2_ref_transport(grpc_chttp2_transport* t);
#endif
-grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+grpc_chttp2_incoming_byte_stream* grpc_chttp2_incoming_byte_stream_create(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t, grpc_chttp2_stream* s,
uint32_t frame_size, uint32_t flags);
-grpc_error *grpc_chttp2_incoming_byte_stream_push(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_slice slice, grpc_slice *slice_out);
-grpc_error *grpc_chttp2_incoming_byte_stream_finished(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error, bool reset_on_error);
+grpc_error* grpc_chttp2_incoming_byte_stream_push(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_slice slice, grpc_slice* slice_out);
+grpc_error* grpc_chttp2_incoming_byte_stream_finished(
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_error* error, bool reset_on_error);
void grpc_chttp2_incoming_byte_stream_notify(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
- grpc_error *error);
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
+ grpc_error* error);
-void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
+void grpc_chttp2_ack_ping(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
uint64_t id);
/** Add a new ping strike to ping_recv_state.ping_strikes. If
ping_recv_state.ping_strikes > ping_policy.max_ping_strikes, it sends GOAWAY
with error code ENHANCE_YOUR_CALM and additional debug data resembling
"too_many_pings" followed by immediately closing the connection. */
-void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
+void grpc_chttp2_add_ping_strike(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
-void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-
-void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- grpc_error *due_to_error);
-
-void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s);
-
-void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_error *error);
+void grpc_chttp2_mark_stream_writable(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+
+void grpc_chttp2_cancel_stream(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+ grpc_error* due_to_error);
+
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s);
+
+void grpc_chttp2_fail_pending_writes(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_error* error);
/** Set the default keepalive configurations, must only be called at
initialization */
-void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
+void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args,
bool is_client);
#ifdef __cplusplus
diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc
index efa5791d3f..8a3774d688 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.cc
+++ b/src/core/ext/transport/chttp2/transport/parsing.cc
@@ -31,38 +31,38 @@
#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/timeout_encoding.h"
-static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static grpc_error* init_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_header_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
int is_continuation);
-static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t);
-static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static grpc_error* init_data_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_rst_stream_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_settings_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_window_update_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_ping_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_goaway_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t);
+static grpc_error* init_skip_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
int is_header);
-static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_slice slice,
+static grpc_error* parse_frame_slice(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, grpc_slice slice,
int is_last);
-grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+grpc_error* grpc_chttp2_perform_read(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
grpc_slice slice) {
- uint8_t *beg = GRPC_SLICE_START_PTR(slice);
- uint8_t *end = GRPC_SLICE_END_PTR(slice);
- uint8_t *cur = beg;
- grpc_error *err;
+ uint8_t* beg = GRPC_SLICE_START_PTR(slice);
+ uint8_t* end = GRPC_SLICE_END_PTR(slice);
+ uint8_t* cur = beg;
+ grpc_error* err;
if (cur == end) return GRPC_ERROR_NONE;
@@ -93,7 +93,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
case GRPC_DTS_CLIENT_PREFIX_23:
while (cur != end && t->deframe_state != GRPC_DTS_FH_0) {
if (*cur != GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state]) {
- char *msg;
+ char* msg;
gpr_asprintf(
&msg,
"Connect string mismatch: expected '%c' (%d) got '%c' (%d) "
@@ -200,7 +200,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
} else if (t->incoming_frame_size >
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Frame size %d is larger than max frame size %d",
t->incoming_frame_size,
t->settings[GRPC_ACKED_SETTINGS]
@@ -216,10 +216,11 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
if ((uint32_t)(end - cur) == t->incoming_frame_size) {
- err = parse_frame_slice(
- exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
- (size_t)(end - beg)),
- 1);
+ err =
+ parse_frame_slice(exec_ctx, t,
+ grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
+ (size_t)(end - beg)),
+ 1);
if (err != GRPC_ERROR_NONE) {
return err;
}
@@ -240,10 +241,11 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
t->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
- err = parse_frame_slice(
- exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
- (size_t)(end - beg)),
- 0);
+ err =
+ parse_frame_slice(exec_ctx, t,
+ grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
+ (size_t)(end - beg)),
+ 0);
if (err != GRPC_ERROR_NONE) {
return err;
}
@@ -256,36 +258,36 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GPR_UNREACHABLE_CODE(return 0);
}
-static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static grpc_error* init_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
if (t->is_first_frame &&
t->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
- char *msg;
+ char* msg;
gpr_asprintf(
&msg, "Expected SETTINGS frame as the first frame, got frame type %d",
t->incoming_frame_type);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
t->is_first_frame = false;
if (t->expect_continuation_stream_id != 0) {
if (t->incoming_frame_type != GRPC_CHTTP2_FRAME_CONTINUATION) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Expected CONTINUATION frame, got frame type %02x",
t->incoming_frame_type);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
if (t->expect_continuation_stream_id != t->incoming_stream_id) {
- char *msg;
+ char* msg;
gpr_asprintf(
&msg,
"Expected CONTINUATION frame for grpc_chttp2_stream %08x, got "
"grpc_chttp2_stream %08x",
t->expect_continuation_stream_id, t->incoming_stream_id);
- grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -317,18 +319,18 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
}
}
-static grpc_error *skip_parser(grpc_exec_ctx *exec_ctx, void *parser,
- grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+static grpc_error* skip_parser(grpc_exec_ctx* exec_ctx, void* parser,
+ grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
return GRPC_ERROR_NONE;
}
-static void skip_header(grpc_exec_ctx *exec_ctx, void *tp, grpc_mdelem md) {
+static void skip_header(grpc_exec_ctx* exec_ctx, void* tp, grpc_mdelem md) {
GRPC_MDELEM_UNREF(exec_ctx, md);
}
-static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static grpc_error* init_skip_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
int is_header) {
if (is_header) {
uint8_t is_eoh = t->expect_continuation_stream_id != 0;
@@ -344,17 +346,17 @@ static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
init_skip_frame_parser(exec_ctx, t,
t->parser == grpc_chttp2_header_parser_parse);
}
-static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_chttp2_stream *s =
+static grpc_error* init_data_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_chttp2_stream* s =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
- grpc_error *err = GRPC_ERROR_NONE;
+ grpc_error* err = GRPC_ERROR_NONE;
grpc_core::chttp2::FlowControlAction action;
if (s == nullptr) {
err = t->flow_control->RecvData(t->incoming_frame_size);
@@ -404,20 +406,20 @@ error_handler:
}
}
-static void free_timeout(void *p) { gpr_free(p); }
+static void free_timeout(void* p) { gpr_free(p); }
-static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
+static void on_initial_header(grpc_exec_ctx* exec_ctx, void* tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
- grpc_chttp2_stream *s = t->incoming_stream;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
+ grpc_chttp2_stream* s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
GPR_ASSERT(s != NULL);
if (GRPC_TRACER_ON(grpc_http_trace)) {
- char *key = grpc_slice_to_c_string(GRPC_MDKEY(md));
- char *value =
+ char* key = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char* value =
grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id,
t->is_client ? "CLI" : "SVR", key, value);
@@ -432,21 +434,21 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
}
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
- grpc_millis *cached_timeout =
- static_cast<grpc_millis *>(grpc_mdelem_get_user_data(md, free_timeout));
+ grpc_millis* cached_timeout =
+ static_cast<grpc_millis*>(grpc_mdelem_get_user_data(md, free_timeout));
grpc_millis timeout;
if (cached_timeout != NULL) {
timeout = *cached_timeout;
} else {
if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), &timeout)) {
- char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
gpr_free(val);
timeout = GRPC_MILLIS_INF_FUTURE;
}
if (GRPC_MDELEM_IS_INTERNED(md)) {
/* store the result */
- cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
+ cached_timeout = (grpc_millis*)gpr_malloc(sizeof(grpc_millis));
*cached_timeout = timeout;
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}
@@ -476,7 +478,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
s->seen_error = true;
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
- grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add(
+ grpc_error* error = grpc_chttp2_incoming_metadata_buffer_add(
exec_ctx, &s->metadata_buffer[0], md);
if (error != GRPC_ERROR_NONE) {
grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
@@ -490,18 +492,18 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("on_initial_header", 0);
}
-static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
+static void on_trailing_header(grpc_exec_ctx* exec_ctx, void* tp,
grpc_mdelem md) {
- grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
- grpc_chttp2_stream *s = t->incoming_stream;
+ grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
+ grpc_chttp2_stream* s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);
GPR_ASSERT(s != NULL);
if (GRPC_TRACER_ON(grpc_http_trace)) {
- char *key = grpc_slice_to_c_string(GRPC_MDKEY(md));
- char *value =
+ char* key = grpc_slice_to_c_string(GRPC_MDKEY(md));
+ char* value =
grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", s->id,
t->is_client ? "CLI" : "SVR", key, value);
@@ -534,7 +536,7 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
s->seen_error = true;
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
- grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add(
+ grpc_error* error = grpc_chttp2_incoming_metadata_buffer_add(
exec_ctx, &s->metadata_buffer[1], md);
if (error != GRPC_ERROR_NONE) {
grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
@@ -547,12 +549,12 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_END("on_trailing_header", 0);
}
-static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t,
+static grpc_error* init_header_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t,
int is_continuation) {
uint8_t is_eoh =
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
- grpc_chttp2_stream *s;
+ grpc_chttp2_stream* s;
/* TODO(ctiller): when to increment header_frames_received? */
@@ -662,14 +664,14 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_error *err = grpc_chttp2_window_update_parser_begin_frame(
+static grpc_error* init_window_update_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_error* err = grpc_chttp2_window_update_parser_begin_frame(
&t->simple.window_update, t->incoming_frame_size,
t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
if (t->incoming_stream_id != 0) {
- grpc_chttp2_stream *s = t->incoming_stream =
+ grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == NULL) {
return init_skip_frame_parser(exec_ctx, t, 0);
@@ -681,9 +683,9 @@ static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_error *err = grpc_chttp2_ping_parser_begin_frame(
+static grpc_error* init_ping_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_error* err = grpc_chttp2_ping_parser_begin_frame(
&t->simple.ping, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
t->parser = grpc_chttp2_ping_parser_parse;
@@ -691,12 +693,12 @@ static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_error *err = grpc_chttp2_rst_stream_parser_begin_frame(
+static grpc_error* init_rst_stream_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_error* err = grpc_chttp2_rst_stream_parser_begin_frame(
&t->simple.rst_stream, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
- grpc_chttp2_stream *s = t->incoming_stream =
+ grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (!t->incoming_stream) {
return init_skip_frame_parser(exec_ctx, t, 0);
@@ -707,9 +709,9 @@ static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_error *err = grpc_chttp2_goaway_parser_begin_frame(
+static grpc_error* init_goaway_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_error* err = grpc_chttp2_goaway_parser_begin_frame(
&t->goaway_parser, t->incoming_frame_size, t->incoming_frame_flags);
if (err != GRPC_ERROR_NONE) return err;
t->parser = grpc_chttp2_goaway_parser_parse;
@@ -717,14 +719,14 @@ static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
+static grpc_error* init_settings_frame_parser(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
if (t->incoming_stream_id != 0) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Settings frame received for grpc_chttp2_stream");
}
- grpc_error *err = grpc_chttp2_settings_parser_begin_frame(
+ grpc_error* err = grpc_chttp2_settings_parser_begin_frame(
&t->simple.settings, t->incoming_frame_size, t->incoming_frame_flags,
t->settings[GRPC_PEER_SETTINGS]);
if (err != GRPC_ERROR_NONE) {
@@ -744,16 +746,16 @@ static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t, grpc_slice slice,
+static grpc_error* parse_frame_slice(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t, grpc_slice slice,
int is_last) {
- grpc_chttp2_stream *s = t->incoming_stream;
- grpc_error *err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last);
+ grpc_chttp2_stream* s = t->incoming_stream;
+ grpc_error* err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last);
if (err == GRPC_ERROR_NONE) {
return err;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
if (GRPC_TRACER_ON(grpc_http_trace)) {
- const char *msg = grpc_error_string(err);
+ const char* msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "%s", msg);
}
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.cc b/src/core/ext/transport/chttp2/transport/stream_lists.cc
index 9f731a397f..8d25ab277d 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.cc
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.cc
@@ -21,7 +21,7 @@
#include <grpc/support/log.h>
-static const char *stream_list_id_string(grpc_chttp2_stream_list_id id) {
+static const char* stream_list_id_string(grpc_chttp2_stream_list_id id) {
switch (id) {
case GRPC_CHTTP2_LIST_WRITABLE:
return "writable";
@@ -44,17 +44,17 @@ grpc_tracer_flag grpc_trace_http2_stream_state =
/* core list management */
-static bool stream_list_empty(grpc_chttp2_transport *t,
+static bool stream_list_empty(grpc_chttp2_transport* t,
grpc_chttp2_stream_list_id id) {
return t->lists[id].head == NULL;
}
-static bool stream_list_pop(grpc_chttp2_transport *t,
- grpc_chttp2_stream **stream,
+static bool stream_list_pop(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** stream,
grpc_chttp2_stream_list_id id) {
- grpc_chttp2_stream *s = t->lists[id].head;
+ grpc_chttp2_stream* s = t->lists[id].head;
if (s) {
- grpc_chttp2_stream *new_head = s->links[id].next;
+ grpc_chttp2_stream* new_head = s->links[id].next;
GPR_ASSERT(s->included[id]);
if (new_head) {
t->lists[id].head = new_head;
@@ -73,7 +73,7 @@ static bool stream_list_pop(grpc_chttp2_transport *t,
return s != 0;
}
-static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_chttp2_stream_list_id id) {
GPR_ASSERT(s->included[id]);
s->included[id] = 0;
@@ -94,8 +94,8 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
}
}
-static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+static bool stream_list_maybe_remove(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_chttp2_stream_list_id id) {
if (s->included[id]) {
stream_list_remove(t, s, id);
@@ -105,10 +105,10 @@ static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
}
}
-static void stream_list_add_tail(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s,
+static void stream_list_add_tail(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s,
grpc_chttp2_stream_list_id id) {
- grpc_chttp2_stream *old_tail;
+ grpc_chttp2_stream* old_tail;
GPR_ASSERT(!s->included[id]);
old_tail = t->lists[id].tail;
s->links[id].next = NULL;
@@ -126,7 +126,7 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
}
}
-static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
+static bool stream_list_add(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_chttp2_stream_list_id id) {
if (s->included[id]) {
return false;
@@ -137,77 +137,77 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
/* wrappers for specializations */
-bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
GPR_ASSERT(s->id != 0);
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s) {
+bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE);
}
-bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING);
}
-bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) {
+bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport* t) {
return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING);
}
-bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s) {
+bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING);
}
-void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s) {
+bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY);
}
-void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s) {
+bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
-void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
-bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream **s) {
+bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream** s) {
return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
-bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
- grpc_chttp2_stream *s) {
+bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s) {
return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.cc b/src/core/ext/transport/chttp2/transport/stream_map.cc
index d6079a9a33..c863191795 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.cc
+++ b/src/core/ext/transport/chttp2/transport/stream_map.cc
@@ -24,22 +24,22 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
-void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
+void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map* map,
size_t initial_capacity) {
GPR_ASSERT(initial_capacity > 1);
- map->keys = (uint32_t *)gpr_malloc(sizeof(uint32_t) * initial_capacity);
- map->values = (void **)gpr_malloc(sizeof(void *) * initial_capacity);
+ map->keys = (uint32_t*)gpr_malloc(sizeof(uint32_t) * initial_capacity);
+ map->values = (void**)gpr_malloc(sizeof(void*) * initial_capacity);
map->count = 0;
map->free = 0;
map->capacity = initial_capacity;
}
-void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map *map) {
+void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map* map) {
gpr_free(map->keys);
gpr_free(map->values);
}
-static size_t compact(uint32_t *keys, void **values, size_t count) {
+static size_t compact(uint32_t* keys, void** values, size_t count) {
size_t i, out;
for (i = 0, out = 0; i < count; i++) {
@@ -53,12 +53,12 @@ static size_t compact(uint32_t *keys, void **values, size_t count) {
return out;
}
-void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
- void *value) {
+void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map* map, uint32_t key,
+ void* value) {
size_t count = map->count;
size_t capacity = map->capacity;
- uint32_t *keys = map->keys;
- void **values = map->values;
+ uint32_t* keys = map->keys;
+ void** values = map->values;
GPR_ASSERT(count == 0 || keys[count - 1] < key);
GPR_ASSERT(value);
@@ -73,9 +73,9 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
won't help much */
map->capacity = capacity = 3 * capacity / 2;
map->keys = keys =
- (uint32_t *)gpr_realloc(keys, capacity * sizeof(uint32_t));
+ (uint32_t*)gpr_realloc(keys, capacity * sizeof(uint32_t));
map->values = values =
- (void **)gpr_realloc(values, capacity * sizeof(void *));
+ (void**)gpr_realloc(values, capacity * sizeof(void*));
}
}
@@ -84,12 +84,12 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
map->count = count + 1;
}
-static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
+static void** find(grpc_chttp2_stream_map* map, uint32_t key) {
size_t min_idx = 0;
size_t max_idx = map->count;
size_t mid_idx;
- uint32_t *keys = map->keys;
- void **values = map->values;
+ uint32_t* keys = map->keys;
+ void** values = map->values;
uint32_t mid_key;
if (max_idx == 0) return NULL;
@@ -112,9 +112,9 @@ static void **find(grpc_chttp2_stream_map *map, uint32_t key) {
return NULL;
}
-void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
- void **pvalue = find(map, key);
- void *out = NULL;
+void* grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map* map, uint32_t key) {
+ void** pvalue = find(map, key);
+ void* out = NULL;
if (pvalue != NULL) {
out = *pvalue;
*pvalue = NULL;
@@ -129,16 +129,16 @@ void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) {
return out;
}
-void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key) {
- void **pvalue = find(map, key);
+void* grpc_chttp2_stream_map_find(grpc_chttp2_stream_map* map, uint32_t key) {
+ void** pvalue = find(map, key);
return pvalue != NULL ? *pvalue : NULL;
}
-size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) {
+size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map* map) {
return map->count - map->free;
}
-void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) {
+void* grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map* map) {
if (map->count == map->free) {
return NULL;
}
@@ -149,10 +149,10 @@ void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) {
return map->values[((size_t)rand()) % map->count];
}
-void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
- void (*f)(void *user_data, uint32_t key,
- void *value),
- void *user_data) {
+void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map* map,
+ void (*f)(void* user_data, uint32_t key,
+ void* value),
+ void* user_data) {
size_t i;
for (i = 0; i < map->count; i++) {
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.h b/src/core/ext/transport/chttp2/transport/stream_map.h
index 7ab6a4f5ed..c89d20047c 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.h
+++ b/src/core/ext/transport/chttp2/transport/stream_map.h
@@ -34,40 +34,40 @@ extern "C" {
Adds are restricted to strictly higher keys than previously seen (this is
guaranteed by http2). */
typedef struct {
- uint32_t *keys;
- void **values;
+ uint32_t* keys;
+ void** values;
size_t count;
size_t free;
size_t capacity;
} grpc_chttp2_stream_map;
-void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
+void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map* map,
size_t initial_capacity);
-void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map *map);
+void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map* map);
/* Add a new key: given http2 semantics, new keys must always be greater than
existing keys - this is asserted */
-void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key,
- void *value);
+void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map* map, uint32_t key,
+ void* value);
/* Delete an existing key - returns the previous value of the key if it existed,
or NULL otherwise */
-void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key);
+void* grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map* map, uint32_t key);
/* Return an existing key, or NULL if it does not exist */
-void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key);
+void* grpc_chttp2_stream_map_find(grpc_chttp2_stream_map* map, uint32_t key);
/* Return a random entry */
-void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map);
+void* grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map* map);
/* How many (populated) entries are in the stream map? */
-size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map);
+size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map* map);
/* Callback on each stream */
-void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map,
- void (*f)(void *user_data, uint32_t key,
- void *value),
- void *user_data);
+void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map* map,
+ void (*f)(void* user_data, uint32_t key,
+ void* value),
+ void* user_data);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index ff76a5fcdb..6154bdb682 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -27,24 +27,24 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/http2_errors.h"
-static void add_to_write_list(grpc_chttp2_write_cb **list,
- grpc_chttp2_write_cb *cb) {
+static void add_to_write_list(grpc_chttp2_write_cb** list,
+ grpc_chttp2_write_cb* cb) {
cb->next = *list;
*list = cb;
}
-static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb,
- grpc_error *error) {
+static void finish_write_cb(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, grpc_chttp2_write_cb* cb,
+ grpc_error* error) {
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
"finish_write_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
-static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
- grpc_chttp2_transport *t) {
- grpc_chttp2_ping_queue *pq = &t->ping_queue;
+static void maybe_initiate_ping(grpc_exec_ctx* exec_ctx,
+ grpc_chttp2_transport* t) {
+ grpc_chttp2_ping_queue* pq = &t->ping_queue;
if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
/* no ping needed: wait */
return;
@@ -114,16 +114,16 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
(t->ping_state.pings_before_data_required != 0);
}
-static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s, int64_t send_bytes,
- grpc_chttp2_write_cb **list, int64_t *ctr,
- grpc_error *error) {
+static bool update_list(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s, int64_t send_bytes,
+ grpc_chttp2_write_cb** list, int64_t* ctr,
+ grpc_error* error) {
bool sched_any = false;
- grpc_chttp2_write_cb *cb = *list;
+ grpc_chttp2_write_cb* cb = *list;
*list = NULL;
*ctr += send_bytes;
while (cb) {
- grpc_chttp2_write_cb *next = cb->next;
+ grpc_chttp2_write_cb* next = cb->next;
if (cb->call_at_byte <= *ctr) {
sched_any = true;
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
@@ -136,8 +136,8 @@ static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
return sched_any;
}
-static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
- const char *staller) {
+static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+ const char* staller) {
gpr_log(
GPR_DEBUG,
"%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR ":flowed=%" PRId64
@@ -155,7 +155,7 @@ static void report_stall(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
s->flow_control->remote_window_delta());
}
-static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
+static bool stream_ref_if_not_destroyed(gpr_refcount* r) {
gpr_atm count;
do {
count = gpr_atm_acq_load(&r->count);
@@ -165,12 +165,12 @@ static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
}
/* How many bytes would we like to put on the wire during a single syscall */
-static uint32_t target_write_size(grpc_chttp2_transport *t) {
+static uint32_t target_write_size(grpc_chttp2_transport* t) {
return 1024 * 1024;
}
// Returns true if initial_metadata contains only default headers.
-static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) {
+static bool is_default_initial_metadata(grpc_metadata_batch* initial_metadata) {
return initial_metadata->list.default_count == initial_metadata->list.count;
}
@@ -179,13 +179,13 @@ class StreamWriteContext;
class WriteContext {
public:
- WriteContext(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) : t_(t) {
+ WriteContext(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t) : t_(t) {
GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
}
// TODO(ctiller): make this the destructor
- void FlushStats(grpc_exec_ctx *exec_ctx) {
+ void FlushStats(grpc_exec_ctx* exec_ctx) {
GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
exec_ctx, initial_metadata_writes_);
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes_);
@@ -194,7 +194,7 @@ class WriteContext {
GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, flow_control_writes_);
}
- void FlushSettings(grpc_exec_ctx *exec_ctx) {
+ void FlushSettings(grpc_exec_ctx* exec_ctx) {
if (t_->dirtied_local_settings && !t_->sent_local_settings) {
grpc_slice_buffer_add(
&t_->outbuf, grpc_chttp2_settings_create(
@@ -208,13 +208,13 @@ class WriteContext {
}
}
- void FlushQueuedBuffers(grpc_exec_ctx *exec_ctx) {
+ void FlushQueuedBuffers(grpc_exec_ctx* exec_ctx) {
/* simple writes are queued to qbuf, and flushed here */
grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
GPR_ASSERT(t_->qbuf.count == 0);
}
- void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
+ void FlushWindowUpdates(grpc_exec_ctx* exec_ctx) {
uint32_t transport_announce =
t_->flow_control->MaybeSendUpdate(t_->outbuf.count > 0);
if (transport_announce) {
@@ -234,7 +234,7 @@ class WriteContext {
t_->ping_ack_count = 0;
}
- void EnactHpackSettings(grpc_exec_ctx *exec_ctx) {
+ void EnactHpackSettings(grpc_exec_ctx* exec_ctx) {
grpc_chttp2_hpack_compressor_set_max_table_size(
&t_->hpack_compressor,
t_->settings[GRPC_PEER_SETTINGS]
@@ -242,7 +242,7 @@ class WriteContext {
}
void UpdateStreamsNoLongerStalled() {
- grpc_chttp2_stream *s;
+ grpc_chttp2_stream* s;
while (grpc_chttp2_list_pop_stalled_by_transport(t_, &s)) {
if (t_->closed_with_error == GRPC_ERROR_NONE &&
grpc_chttp2_list_add_writable_stream(t_, s)) {
@@ -253,13 +253,13 @@ class WriteContext {
}
}
- grpc_chttp2_stream *NextStream() {
+ grpc_chttp2_stream* NextStream() {
if (t_->outbuf.length > target_write_size(t_)) {
result_.partial = true;
return nullptr;
}
- grpc_chttp2_stream *s;
+ grpc_chttp2_stream* s;
if (!grpc_chttp2_list_pop_writable_stream(t_, &s)) {
return nullptr;
}
@@ -281,7 +281,7 @@ class WriteContext {
void NoteScheduledResults() { result_.early_results_scheduled = true; }
- grpc_chttp2_transport *transport() const { return t_; }
+ grpc_chttp2_transport* transport() const { return t_; }
grpc_chttp2_begin_write_result Result() {
result_.writing = t_->outbuf.count > 0;
@@ -289,7 +289,7 @@ class WriteContext {
}
private:
- grpc_chttp2_transport *const t_;
+ grpc_chttp2_transport* const t_;
/* stats histogram counters: we increment these throughout this function,
and at the end publish to the central stats histograms */
@@ -302,8 +302,8 @@ class WriteContext {
class DataSendContext {
public:
- DataSendContext(WriteContext *write_context, grpc_chttp2_transport *t,
- grpc_chttp2_stream *s)
+ DataSendContext(WriteContext* write_context, grpc_chttp2_transport* t,
+ grpc_chttp2_stream* s)
: write_context_(write_context),
t_(t),
s_(s),
@@ -373,7 +373,7 @@ class DataSendContext {
bool is_last_frame() const { return is_last_frame_; }
- void CallCallbacks(grpc_exec_ctx *exec_ctx) {
+ void CallCallbacks(grpc_exec_ctx* exec_ctx) {
if (update_list(exec_ctx, t_, s_,
(int64_t)(s_->sending_bytes - sending_bytes_before_),
&s_->on_flow_controlled_cbs,
@@ -383,16 +383,16 @@ class DataSendContext {
}
private:
- WriteContext *write_context_;
- grpc_chttp2_transport *t_;
- grpc_chttp2_stream *s_;
+ WriteContext* write_context_;
+ grpc_chttp2_transport* t_;
+ grpc_chttp2_stream* s_;
const size_t sending_bytes_before_;
bool is_last_frame_ = false;
};
class StreamWriteContext {
public:
- StreamWriteContext(WriteContext *write_context, grpc_chttp2_stream *s)
+ StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s)
: write_context_(write_context), t_(write_context->transport()), s_(s) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
@@ -402,7 +402,7 @@ class StreamWriteContext {
s->flow_control->announced_window_delta())));
}
- void FlushInitialMetadata(grpc_exec_ctx *exec_ctx) {
+ void FlushInitialMetadata(grpc_exec_ctx* exec_ctx) {
/* send initial metadata if it's available */
if (s_->sent_initial_metadata) return;
if (s_->send_initial_metadata == nullptr) return;
@@ -443,7 +443,7 @@ class StreamWriteContext {
"send_initial_metadata_finished");
}
- void FlushWindowUpdates(grpc_exec_ctx *exec_ctx) {
+ void FlushWindowUpdates(grpc_exec_ctx* exec_ctx) {
/* send any window updates */
const uint32_t stream_announce = s_->flow_control->MaybeSendUpdate();
if (stream_announce == 0) return;
@@ -455,7 +455,7 @@ class StreamWriteContext {
write_context_->IncWindowUpdateWrites();
}
- void FlushData(grpc_exec_ctx *exec_ctx) {
+ void FlushData(grpc_exec_ctx* exec_ctx) {
if (!s_->sent_initial_metadata) return;
if (s_->flow_controlled_buffer.length == 0 &&
@@ -499,7 +499,7 @@ class StreamWriteContext {
write_context_->IncMessageWrites();
}
- void FlushTrailingMetadata(grpc_exec_ctx *exec_ctx) {
+ void FlushTrailingMetadata(grpc_exec_ctx* exec_ctx) {
if (!s_->sent_initial_metadata) return;
if (s_->send_trailing_metadata == NULL) return;
@@ -555,7 +555,7 @@ class StreamWriteContext {
}
}
- void SentLastFrame(grpc_exec_ctx *exec_ctx) {
+ void SentLastFrame(grpc_exec_ctx* exec_ctx) {
s_->send_trailing_metadata = NULL;
s_->sent_trailing_metadata = true;
@@ -568,17 +568,17 @@ class StreamWriteContext {
GRPC_ERROR_NONE);
}
- WriteContext *const write_context_;
- grpc_chttp2_transport *const t_;
- grpc_chttp2_stream *const s_;
+ WriteContext* const write_context_;
+ grpc_chttp2_transport* const t_;
+ grpc_chttp2_stream* const s_;
bool stream_became_writable_ = false;
- grpc_mdelem *extra_headers_for_trailing_metadata_[2];
+ grpc_mdelem* extra_headers_for_trailing_metadata_[2];
size_t num_extra_headers_for_trailing_metadata_ = 0;
};
} // namespace
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
- grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
+ grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t) {
WriteContext ctx(exec_ctx, t);
ctx.FlushSettings(exec_ctx);
ctx.FlushPingAcks();
@@ -591,7 +591,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
- while (grpc_chttp2_stream *s = ctx.NextStream()) {
+ while (grpc_chttp2_stream* s = ctx.NextStream()) {
StreamWriteContext stream_ctx(&ctx, s);
stream_ctx.FlushInitialMetadata(exec_ctx);
stream_ctx.FlushWindowUpdates(exec_ctx);
@@ -619,10 +619,10 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
return ctx.Result();
}
-void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
- grpc_error *error) {
+void grpc_chttp2_end_write(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+ grpc_error* error) {
GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
- grpc_chttp2_stream *s;
+ grpc_chttp2_stream* s;
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sending_bytes != 0) {
diff --git a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
index b280487ca3..d590ba0371 100644
--- a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
+++ b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
@@ -33,20 +33,20 @@
// Cronet transport object
typedef struct cronet_transport {
grpc_transport base; // must be first element in this structure
- void *engine;
- char *host;
+ void* engine;
+ char* host;
} cronet_transport;
extern grpc_transport_vtable grpc_cronet_vtable;
-GRPCAPI grpc_channel *grpc_cronet_secure_channel_create(
- void *engine, const char *target, const grpc_channel_args *args,
- void *reserved) {
+GRPCAPI grpc_channel* grpc_cronet_secure_channel_create(
+ void* engine, const char* target, const grpc_channel_args* args,
+ void* reserved) {
gpr_log(GPR_DEBUG,
"grpc_create_cronet_transport: stream_engine = %p, target=%s", engine,
target);
- grpc_transport *ct =
+ grpc_transport* ct =
grpc_create_cronet_transport(engine, target, args, reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index 97e4f7d72b..0b1ddf8839 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.cc
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -75,17 +75,17 @@ enum e_op_id {
/* Cronet callbacks. See cronet_c_for_grpc.h for documentation for each. */
-static void on_stream_ready(bidirectional_stream *);
+static void on_stream_ready(bidirectional_stream*);
static void on_response_headers_received(
- bidirectional_stream *, const bidirectional_stream_header_array *,
- const char *);
-static void on_write_completed(bidirectional_stream *, const char *);
-static void on_read_completed(bidirectional_stream *, char *, int);
+ bidirectional_stream*, const bidirectional_stream_header_array*,
+ const char*);
+static void on_write_completed(bidirectional_stream*, const char*);
+static void on_read_completed(bidirectional_stream*, char*, int);
static void on_response_trailers_received(
- bidirectional_stream *, const bidirectional_stream_header_array *);
-static void on_succeeded(bidirectional_stream *);
-static void on_failed(bidirectional_stream *, int);
-static void on_canceled(bidirectional_stream *);
+ bidirectional_stream*, const bidirectional_stream_header_array*);
+static void on_succeeded(bidirectional_stream*);
+static void on_failed(bidirectional_stream*, int);
+static void on_canceled(bidirectional_stream*);
static bidirectional_stream_callback cronet_callbacks = {
on_stream_ready,
on_response_headers_received,
@@ -99,8 +99,8 @@ static bidirectional_stream_callback cronet_callbacks = {
/* Cronet transport object */
struct grpc_cronet_transport {
grpc_transport base; /* must be first element in this structure */
- stream_engine *engine;
- char *host;
+ stream_engine* engine;
+ char* host;
bool use_packet_coalescing;
};
typedef struct grpc_cronet_transport grpc_cronet_transport;
@@ -109,14 +109,14 @@ typedef struct grpc_cronet_transport grpc_cronet_transport;
http://www.catb.org/esr/structure-packing/#_structure_reordering: */
struct read_state {
/* vars to store data coming from server */
- char *read_buffer;
+ char* read_buffer;
bool length_field_received;
int received_bytes;
int remaining_bytes;
int length_field;
bool compressed;
char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES];
- char *payload_field;
+ char* payload_field;
bool read_stream_closed;
/* vars for holding data destined for the application */
@@ -132,7 +132,7 @@ struct read_state {
};
struct write_state {
- char *write_buffer;
+ char* write_buffer;
};
/* track state of one stream op */
@@ -150,7 +150,7 @@ struct op_state {
bool pending_recv_trailing_metadata;
/* Cronet has not issued a callback of a bidirectional read */
bool pending_read_from_cronet;
- grpc_error *cancel_error;
+ grpc_error* cancel_error;
/* data structure for storing data coming from server */
struct read_state rs;
/* data structure for storing data going to the server */
@@ -161,22 +161,22 @@ struct op_and_state {
grpc_transport_stream_op_batch op;
struct op_state state;
bool done;
- struct stream_obj *s; /* Pointer back to the stream object */
- struct op_and_state *next; /* next op_and_state in the linked list */
+ struct stream_obj* s; /* Pointer back to the stream object */
+ struct op_and_state* next; /* next op_and_state in the linked list */
};
struct op_storage {
int num_pending_ops;
- struct op_and_state *head;
+ struct op_and_state* head;
};
struct stream_obj {
- gpr_arena *arena;
- struct op_and_state *oas;
- grpc_transport_stream_op_batch *curr_op;
- grpc_cronet_transport *curr_ct;
- grpc_stream *curr_gs;
- bidirectional_stream *cbs;
+ gpr_arena* arena;
+ struct op_and_state* oas;
+ grpc_transport_stream_op_batch* curr_op;
+ grpc_cronet_transport* curr_ct;
+ grpc_stream* curr_gs;
+ bidirectional_stream* cbs;
bidirectional_stream_header_array header_array;
/* Stream level state. Some state will be tracked both at stream and stream_op
@@ -190,7 +190,7 @@ struct stream_obj {
gpr_mu mu;
/* Refcount object of the stream */
- grpc_stream_refcount *refcount;
+ grpc_stream_refcount* refcount;
};
typedef struct stream_obj stream_obj;
@@ -199,30 +199,30 @@ typedef struct stream_obj stream_obj;
grpc_cronet_stream_ref((stream), (reason))
#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_cronet_stream_unref((exec_ctx), (stream), (reason))
-void grpc_cronet_stream_ref(stream_obj *s, const char *reason) {
+void grpc_cronet_stream_ref(stream_obj* s, const char* reason) {
grpc_stream_ref(s->refcount, reason);
}
-void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s,
- const char *reason) {
+void grpc_cronet_stream_unref(grpc_exec_ctx* exec_ctx, stream_obj* s,
+ const char* reason) {
grpc_stream_unref(exec_ctx, s->refcount, reason);
}
#else
#define GRPC_CRONET_STREAM_REF(stream, reason) grpc_cronet_stream_ref((stream))
#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
grpc_cronet_stream_unref((exec_ctx), (stream))
-void grpc_cronet_stream_ref(stream_obj *s) { grpc_stream_ref(s->refcount); }
-void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s) {
+void grpc_cronet_stream_ref(stream_obj* s) { grpc_stream_ref(s->refcount); }
+void grpc_cronet_stream_unref(grpc_exec_ctx* exec_ctx, stream_obj* s) {
grpc_stream_unref(exec_ctx, s->refcount);
}
#endif
-static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
- struct op_and_state *oas);
+static enum e_op_result execute_stream_op(grpc_exec_ctx* exec_ctx,
+ struct op_and_state* oas);
/*
Utility function to translate enum into string for printing
*/
-static const char *op_result_string(enum e_op_result i) {
+static const char* op_result_string(enum e_op_result i) {
switch (i) {
case ACTION_TAKEN_WITH_CALLBACK:
return "ACTION_TAKEN_WITH_CALLBACK";
@@ -234,7 +234,7 @@ static const char *op_result_string(enum e_op_result i) {
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-static const char *op_id_string(enum e_op_id i) {
+static const char* op_id_string(enum e_op_id i) {
switch (i) {
case OP_SEND_INITIAL_METADATA:
return "OP_SEND_INITIAL_METADATA";
@@ -268,7 +268,7 @@ static const char *op_id_string(enum e_op_id i) {
return "UNKNOWN";
}
-static void null_and_maybe_free_read_buffer(stream_obj *s) {
+static void null_and_maybe_free_read_buffer(stream_obj* s) {
if (s->state.rs.read_buffer &&
s->state.rs.read_buffer != s->state.rs.grpc_header_bytes) {
gpr_free(s->state.rs.read_buffer);
@@ -276,7 +276,7 @@ static void null_and_maybe_free_read_buffer(stream_obj *s) {
s->state.rs.read_buffer = NULL;
}
-static void maybe_flush_read(stream_obj *s) {
+static void maybe_flush_read(stream_obj* s) {
/* To enter flush read state (discarding all the buffered messages in
* transport layer), two conditions must be satisfied: 1) non-zero grpc status
* has been received, and 2) an op requesting the status code
@@ -289,7 +289,7 @@ static void maybe_flush_read(stream_obj *s) {
CRONET_LOG(GPR_DEBUG, "%p: Flush read", s);
s->state.flush_read = true;
null_and_maybe_free_read_buffer(s);
- s->state.rs.read_buffer = (char *)gpr_malloc(GRPC_FLUSH_READ_SIZE);
+ s->state.rs.read_buffer = (char*)gpr_malloc(GRPC_FLUSH_READ_SIZE);
if (!s->state.pending_read_from_cronet) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
@@ -300,8 +300,8 @@ static void maybe_flush_read(stream_obj *s) {
}
}
-static grpc_error *make_error_with_desc(int error_code, const char *desc) {
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
+static grpc_error* make_error_with_desc(int error_code, const char* desc) {
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error_code);
return error;
}
@@ -309,13 +309,13 @@ static grpc_error *make_error_with_desc(int error_code, const char *desc) {
/*
Add a new stream op to op storage.
*/
-static void add_to_storage(struct stream_obj *s,
- grpc_transport_stream_op_batch *op) {
- struct op_storage *storage = &s->storage;
+static void add_to_storage(struct stream_obj* s,
+ grpc_transport_stream_op_batch* op) {
+ struct op_storage* storage = &s->storage;
/* add new op at the beginning of the linked list. The memory is freed
in remove_from_storage */
- struct op_and_state *new_op =
- (struct op_and_state *)gpr_malloc(sizeof(struct op_and_state));
+ struct op_and_state* new_op =
+ (struct op_and_state*)gpr_malloc(sizeof(struct op_and_state));
memcpy(&new_op->op, op, sizeof(grpc_transport_stream_op_batch));
memset(&new_op->state, 0, sizeof(new_op->state));
new_op->s = s;
@@ -339,9 +339,9 @@ static void add_to_storage(struct stream_obj *s,
/*
Traverse the linked list and delete op and free memory
*/
-static void remove_from_storage(struct stream_obj *s,
- struct op_and_state *oas) {
- struct op_and_state *curr;
+static void remove_from_storage(struct stream_obj* s,
+ struct op_and_state* oas) {
+ struct op_and_state* curr;
if (s->storage.head == NULL || oas == NULL) {
return;
}
@@ -373,9 +373,9 @@ static void remove_from_storage(struct stream_obj *s,
This can get executed from the Cronet network thread via cronet callback
or on the application supplied thread via the perform_stream_op function.
*/
-static void execute_from_storage(grpc_exec_ctx *exec_ctx, stream_obj *s) {
+static void execute_from_storage(grpc_exec_ctx* exec_ctx, stream_obj* s) {
gpr_mu_lock(&s->mu);
- for (struct op_and_state *curr = s->storage.head; curr != NULL;) {
+ for (struct op_and_state* curr = s->storage.head; curr != NULL;) {
CRONET_LOG(GPR_DEBUG, "calling op at %p. done = %d", curr, curr->done);
GPR_ASSERT(curr->done == 0);
enum e_op_result result = execute_stream_op(exec_ctx, curr);
@@ -383,7 +383,7 @@ static void execute_from_storage(grpc_exec_ctx *exec_ctx, stream_obj *s) {
op_result_string(result));
/* if this op is done, then remove it and free memory */
if (curr->done) {
- struct op_and_state *next = curr->next;
+ struct op_and_state* next = curr->next;
remove_from_storage(s, curr);
curr = next;
}
@@ -400,11 +400,11 @@ static void execute_from_storage(grpc_exec_ctx *exec_ctx, stream_obj *s) {
/*
Cronet callback
*/
-static void on_failed(bidirectional_stream *stream, int net_error) {
+static void on_failed(bidirectional_stream* stream, int net_error) {
CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- stream_obj *s = (stream_obj *)stream->annotation;
+ stream_obj* s = (stream_obj*)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_FAILED] = true;
@@ -427,11 +427,11 @@ static void on_failed(bidirectional_stream *stream, int net_error) {
/*
Cronet callback
*/
-static void on_canceled(bidirectional_stream *stream) {
+static void on_canceled(bidirectional_stream* stream) {
CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- stream_obj *s = (stream_obj *)stream->annotation;
+ stream_obj* s = (stream_obj*)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_CANCELED] = true;
@@ -454,11 +454,11 @@ static void on_canceled(bidirectional_stream *stream) {
/*
Cronet callback
*/
-static void on_succeeded(bidirectional_stream *stream) {
+static void on_succeeded(bidirectional_stream* stream) {
CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- stream_obj *s = (stream_obj *)stream->annotation;
+ stream_obj* s = (stream_obj*)stream->annotation;
gpr_mu_lock(&s->mu);
bidirectional_stream_destroy(s->cbs);
s->state.state_callback_received[OP_SUCCEEDED] = true;
@@ -473,11 +473,11 @@ static void on_succeeded(bidirectional_stream *stream) {
/*
Cronet callback
*/
-static void on_stream_ready(bidirectional_stream *stream) {
+static void on_stream_ready(bidirectional_stream* stream) {
CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- stream_obj *s = (stream_obj *)stream->annotation;
- grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
+ stream_obj* s = (stream_obj*)stream->annotation;
+ grpc_cronet_transport* t = (grpc_cronet_transport*)s->curr_ct;
gpr_mu_lock(&s->mu);
s->state.state_op_done[OP_SEND_INITIAL_METADATA] = true;
s->state.state_callback_received[OP_SEND_INITIAL_METADATA] = true;
@@ -503,13 +503,13 @@ static void on_stream_ready(bidirectional_stream *stream) {
Cronet callback
*/
static void on_response_headers_received(
- bidirectional_stream *stream,
- const bidirectional_stream_header_array *headers,
- const char *negotiated_protocol) {
+ bidirectional_stream* stream,
+ const bidirectional_stream_header_array* headers,
+ const char* negotiated_protocol) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
CRONET_LOG(GPR_DEBUG, "R: on_response_headers_received(%p, %p, %s)", stream,
headers, negotiated_protocol);
- stream_obj *s = (stream_obj *)stream->annotation;
+ stream_obj* s = (stream_obj*)stream->annotation;
/* Identify if this is a header or a trailer (in a trailer-only response case)
*/
@@ -526,15 +526,15 @@ static void on_response_headers_received(
grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
s->arena);
for (size_t i = 0; i < headers->count; i++) {
- GRPC_LOG_IF_ERROR(
- "on_response_headers_received",
- grpc_chttp2_incoming_metadata_buffer_add(
- &exec_ctx, &s->state.rs.initial_metadata,
- grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
- headers->headers[i].key)),
- grpc_slice_intern(grpc_slice_from_static_string(
- headers->headers[i].value)))));
+ GRPC_LOG_IF_ERROR("on_response_headers_received",
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &exec_ctx, &s->state.rs.initial_metadata,
+ grpc_mdelem_from_slices(
+ &exec_ctx,
+ grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].key)),
+ grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].value)))));
}
s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
@@ -559,9 +559,9 @@ static void on_response_headers_received(
/*
Cronet callback
*/
-static void on_write_completed(bidirectional_stream *stream, const char *data) {
+static void on_write_completed(bidirectional_stream* stream, const char* data) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- stream_obj *s = (stream_obj *)stream->annotation;
+ stream_obj* s = (stream_obj*)stream->annotation;
CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data);
gpr_mu_lock(&s->mu);
if (s->state.ws.write_buffer) {
@@ -577,10 +577,10 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) {
/*
Cronet callback
*/
-static void on_read_completed(bidirectional_stream *stream, char *data,
+static void on_read_completed(bidirectional_stream* stream, char* data,
int count) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- stream_obj *s = (stream_obj *)stream->annotation;
+ stream_obj* s = (stream_obj*)stream->annotation;
CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
count);
gpr_mu_lock(&s->mu);
@@ -620,13 +620,13 @@ static void on_read_completed(bidirectional_stream *stream, char *data,
Cronet callback
*/
static void on_response_trailers_received(
- bidirectional_stream *stream,
- const bidirectional_stream_header_array *trailers) {
+ bidirectional_stream* stream,
+ const bidirectional_stream_header_array* trailers) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
CRONET_LOG(GPR_DEBUG, "R: on_response_trailers_received(%p,%p)", stream,
trailers);
- stream_obj *s = (stream_obj *)stream->annotation;
- grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
+ stream_obj* s = (stream_obj*)stream->annotation;
+ grpc_cronet_transport* t = (grpc_cronet_transport*)s->curr_ct;
gpr_mu_lock(&s->mu);
memset(&s->state.rs.trailing_metadata, 0,
sizeof(s->state.rs.trailing_metadata));
@@ -636,15 +636,15 @@ static void on_response_trailers_received(
for (size_t i = 0; i < trailers->count; i++) {
CRONET_LOG(GPR_DEBUG, "trailer key=%s, value=%s", trailers->headers[i].key,
trailers->headers[i].value);
- GRPC_LOG_IF_ERROR(
- "on_response_trailers_received",
- grpc_chttp2_incoming_metadata_buffer_add(
- &exec_ctx, &s->state.rs.trailing_metadata,
- grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
- trailers->headers[i].key)),
- grpc_slice_intern(grpc_slice_from_static_string(
- trailers->headers[i].value)))));
+ GRPC_LOG_IF_ERROR("on_response_trailers_received",
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &exec_ctx, &s->state.rs.trailing_metadata,
+ grpc_mdelem_from_slices(
+ &exec_ctx,
+ grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].key)),
+ grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].value)))));
s->state.rs.trailing_metadata_valid = true;
if (0 == strcmp(trailers->headers[i].key, "grpc-status") &&
0 != strcmp(trailers->headers[i].value, "0")) {
@@ -679,17 +679,17 @@ static void on_response_trailers_received(
Utility function that takes the data from s->write_slice_buffer and assembles
into a contiguous byte stream with 5 byte gRPC header prepended.
*/
-static void create_grpc_frame(grpc_exec_ctx *exec_ctx,
- grpc_slice_buffer *write_slice_buffer,
- char **pp_write_buffer,
- size_t *p_write_buffer_size, uint32_t flags) {
+static void create_grpc_frame(grpc_exec_ctx* exec_ctx,
+ grpc_slice_buffer* write_slice_buffer,
+ char** pp_write_buffer,
+ size_t* p_write_buffer_size, uint32_t flags) {
grpc_slice slice = grpc_slice_buffer_take_first(write_slice_buffer);
size_t length = GRPC_SLICE_LENGTH(slice);
*p_write_buffer_size = length + GRPC_HEADER_SIZE_IN_BYTES;
/* This is freed in the on_write_completed callback */
- char *write_buffer = (char *)gpr_malloc(length + GRPC_HEADER_SIZE_IN_BYTES);
+ char* write_buffer = (char*)gpr_malloc(length + GRPC_HEADER_SIZE_IN_BYTES);
*pp_write_buffer = write_buffer;
- uint8_t *p = (uint8_t *)write_buffer;
+ uint8_t* p = (uint8_t*)write_buffer;
/* Append 5 byte header */
/* Compressed flag */
*p++ = (uint8_t)((flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0);
@@ -707,10 +707,10 @@ static void create_grpc_frame(grpc_exec_ctx *exec_ctx,
Convert metadata in a format that Cronet can consume
*/
static void convert_metadata_to_cronet_headers(
- grpc_linked_mdelem *head, const char *host, char **pp_url,
- bidirectional_stream_header **pp_headers, size_t *p_num_headers,
- const char **method) {
- grpc_linked_mdelem *curr = head;
+ grpc_linked_mdelem* head, const char* host, char** pp_url,
+ bidirectional_stream_header** pp_headers, size_t* p_num_headers,
+ const char** method) {
+ grpc_linked_mdelem* curr = head;
/* Walk the linked list and get number of header fields */
size_t num_headers_available = 0;
while (curr != NULL) {
@@ -719,8 +719,8 @@ static void convert_metadata_to_cronet_headers(
}
/* Allocate enough memory. It is freed in the on_stream_ready callback
*/
- bidirectional_stream_header *headers =
- (bidirectional_stream_header *)gpr_malloc(
+ bidirectional_stream_header* headers =
+ (bidirectional_stream_header*)gpr_malloc(
sizeof(bidirectional_stream_header) * num_headers_available);
*pp_headers = headers;
@@ -734,8 +734,8 @@ static void convert_metadata_to_cronet_headers(
while (num_headers < num_headers_available) {
grpc_mdelem mdelem = curr->md;
curr = curr->next;
- char *key = grpc_slice_to_c_string(GRPC_MDKEY(mdelem));
- char *value = grpc_slice_to_c_string(GRPC_MDVALUE(mdelem));
+ char* key = grpc_slice_to_c_string(GRPC_MDKEY(mdelem));
+ char* value = grpc_slice_to_c_string(GRPC_MDVALUE(mdelem));
if (grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_SCHEME) ||
grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_AUTHORITY)) {
/* Cronet populates these fields on its own */
@@ -772,10 +772,10 @@ static void convert_metadata_to_cronet_headers(
*p_num_headers = (size_t)num_headers;
}
-static void parse_grpc_header(const uint8_t *data, int *length,
- bool *compressed) {
+static void parse_grpc_header(const uint8_t* data, int* length,
+ bool* compressed) {
const uint8_t c = *data;
- const uint8_t *p = data + 1;
+ const uint8_t* p = data + 1;
*compressed = ((c & 0x01) == 0x01);
*length = 0;
*length |= ((uint8_t)*p++) << 24;
@@ -784,7 +784,7 @@ static void parse_grpc_header(const uint8_t *data, int *length,
*length |= ((uint8_t)*p++);
}
-static bool header_has_authority(grpc_linked_mdelem *head) {
+static bool header_has_authority(grpc_linked_mdelem* head) {
while (head != NULL) {
if (grpc_slice_eq(GRPC_MDKEY(head->md), GRPC_MDSTR_AUTHORITY)) {
return true;
@@ -798,11 +798,11 @@ static bool header_has_authority(grpc_linked_mdelem *head) {
Op Execution: Decide if one of the actions contained in the stream op can be
executed. This is the heart of the state machine.
*/
-static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
- struct stream_obj *s, struct op_state *op_state,
+static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
+ struct stream_obj* s, struct op_state* op_state,
enum e_op_id op_id) {
- struct op_state *stream_state = &s->state;
- grpc_cronet_transport *t = s->curr_ct;
+ struct op_state* stream_state = &s->state;
+ grpc_cronet_transport* t = s->curr_ct;
bool result = true;
/* When call is canceled, every op can be run, except under following
conditions
@@ -981,12 +981,12 @@ static bool op_can_be_run(grpc_transport_stream_op_batch *curr_op,
/*
TODO (makdharma): Break down this function in smaller chunks for readability.
*/
-static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
- struct op_and_state *oas) {
- grpc_transport_stream_op_batch *stream_op = &oas->op;
- struct stream_obj *s = oas->s;
- grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct;
- struct op_state *stream_state = &s->state;
+static enum e_op_result execute_stream_op(grpc_exec_ctx* exec_ctx,
+ struct op_and_state* oas) {
+ grpc_transport_stream_op_batch* stream_op = &oas->op;
+ struct stream_obj* s = oas->s;
+ grpc_cronet_transport* t = (grpc_cronet_transport*)s->curr_ct;
+ struct op_state* stream_state = &s->state;
enum e_op_result result = NO_ACTION_POSSIBLE;
if (stream_op->send_initial_metadata &&
op_can_be_run(stream_op, s, &oas->state, OP_SEND_INITIAL_METADATA)) {
@@ -1002,8 +1002,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
bidirectional_stream_disable_auto_flush(s->cbs, true);
bidirectional_stream_delay_request_headers_until_flush(s->cbs, true);
}
- char *url = NULL;
- const char *method = "POST";
+ char* url = NULL;
+ const char* method = "POST";
s->header_array.headers = NULL;
convert_metadata_to_cronet_headers(stream_op->payload->send_initial_metadata
.send_initial_metadata->list.head,
@@ -1018,8 +1018,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
unsigned int header_index;
for (header_index = 0; header_index < s->header_array.count;
header_index++) {
- gpr_free((void *)s->header_array.headers[header_index].key);
- gpr_free((void *)s->header_array.headers[header_index].value);
+ gpr_free((void*)s->header_array.headers[header_index].key);
+ gpr_free((void*)s->header_array.headers[header_index].value);
}
stream_state->state_op_done[OP_SEND_INITIAL_METADATA] = true;
if (t->use_packet_coalescing) {
@@ -1177,14 +1177,14 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.remaining_bytes == 0) {
/* Start a read operation for data */
stream_state->rs.length_field_received = true;
- parse_grpc_header((const uint8_t *)stream_state->rs.read_buffer,
+ parse_grpc_header((const uint8_t*)stream_state->rs.read_buffer,
&stream_state->rs.length_field,
&stream_state->rs.compressed);
CRONET_LOG(GPR_DEBUG, "length field = %d",
stream_state->rs.length_field);
if (stream_state->rs.length_field > 0) {
stream_state->rs.read_buffer =
- (char *)gpr_malloc((size_t)stream_state->rs.length_field);
+ (char*)gpr_malloc((size_t)stream_state->rs.length_field);
GPR_ASSERT(stream_state->rs.read_buffer);
stream_state->rs.remaining_bytes = stream_state->rs.length_field;
stream_state->rs.received_bytes = 0;
@@ -1207,9 +1207,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (stream_state->rs.compressed) {
stream_state->rs.sbs.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
- *((grpc_byte_buffer **)
- stream_op->payload->recv_message.recv_message) =
- (grpc_byte_buffer *)&stream_state->rs.sbs;
+ *((grpc_byte_buffer**)stream_op->payload->recv_message.recv_message) =
+ (grpc_byte_buffer*)&stream_state->rs.sbs;
GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@@ -1250,7 +1249,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
CRONET_LOG(GPR_DEBUG, "read operation complete");
grpc_slice read_data_slice =
GRPC_SLICE_MALLOC((uint32_t)stream_state->rs.length_field);
- uint8_t *dst_p = GRPC_SLICE_START_PTR(read_data_slice);
+ uint8_t* dst_p = GRPC_SLICE_START_PTR(read_data_slice);
memcpy(dst_p, stream_state->rs.read_buffer,
(size_t)stream_state->rs.length_field);
null_and_maybe_free_read_buffer(s);
@@ -1265,8 +1264,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (stream_state->rs.compressed) {
stream_state->rs.sbs.base.flags = GRPC_WRITE_INTERNAL_COMPRESS;
}
- *((grpc_byte_buffer **)stream_op->payload->recv_message.recv_message) =
- (grpc_byte_buffer *)&stream_state->rs.sbs;
+ *((grpc_byte_buffer**)stream_op->payload->recv_message.recv_message) =
+ (grpc_byte_buffer*)&stream_state->rs.sbs;
GRPC_CLOSURE_SCHED(exec_ctx,
stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@@ -1351,10 +1350,10 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
Functions used by upper layers to access transport functionality.
*/
-static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena) {
- stream_obj *s = (stream_obj *)gs;
+static int init_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena) {
+ stream_obj* s = (stream_obj*)gs;
s->refcount = refcount;
GRPC_CRONET_STREAM_REF(s, "cronet transport");
@@ -1377,23 +1376,23 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->state.pending_read_from_cronet = false;
s->curr_gs = gs;
- s->curr_ct = (grpc_cronet_transport *)gt;
+ s->curr_ct = (grpc_cronet_transport*)gt;
s->arena = arena;
gpr_mu_init(&s->mu);
return 0;
}
-static void set_pollset_do_nothing(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset *pollset) {}
+static void set_pollset_do_nothing(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_pollset* pollset) {}
-static void set_pollset_set_do_nothing(grpc_exec_ctx *exec_ctx,
- grpc_transport *gt, grpc_stream *gs,
- grpc_pollset_set *pollset_set) {}
+static void set_pollset_set_do_nothing(grpc_exec_ctx* exec_ctx,
+ grpc_transport* gt, grpc_stream* gs,
+ grpc_pollset_set* pollset_set) {}
-static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs,
- grpc_transport_stream_op_batch *op) {
+static void perform_stream_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs,
+ grpc_transport_stream_op_batch* op) {
CRONET_LOG(GPR_DEBUG, "perform_stream_op");
if (op->send_initial_metadata &&
header_has_authority(op->payload->send_initial_metadata
@@ -1413,15 +1412,15 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED);
return;
}
- stream_obj *s = (stream_obj *)gs;
+ stream_obj* s = (stream_obj*)gs;
add_to_storage(s, op);
execute_from_storage(exec_ctx, s);
}
-static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs,
- grpc_closure *then_schedule_closure) {
- stream_obj *s = (stream_obj *)gs;
+static void destroy_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs,
+ grpc_closure* then_schedule_closure) {
+ stream_obj* s = (stream_obj*)gs;
null_and_maybe_free_read_buffer(s);
/* Clean up read_slice_buffer in case there is unread data. */
grpc_slice_buffer_destroy_internal(exec_ctx, &s->state.rs.read_slice_buffer);
@@ -1429,15 +1428,15 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
-static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
+static void destroy_transport(grpc_exec_ctx* exec_ctx, grpc_transport* gt) {}
-static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx,
- grpc_transport *gt) {
+static grpc_endpoint* get_endpoint(grpc_exec_ctx* exec_ctx,
+ grpc_transport* gt) {
return NULL;
}
-static void perform_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_transport_op *op) {}
+static void perform_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_transport_op* op) {}
static const grpc_transport_vtable grpc_cronet_vtable = {
sizeof(stream_obj),
@@ -1451,17 +1450,17 @@ static const grpc_transport_vtable grpc_cronet_vtable = {
destroy_transport,
get_endpoint};
-grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
- const grpc_channel_args *args,
- void *reserved) {
- grpc_cronet_transport *ct =
- (grpc_cronet_transport *)gpr_malloc(sizeof(grpc_cronet_transport));
+grpc_transport* grpc_create_cronet_transport(void* engine, const char* target,
+ const grpc_channel_args* args,
+ void* reserved) {
+ grpc_cronet_transport* ct =
+ (grpc_cronet_transport*)gpr_malloc(sizeof(grpc_cronet_transport));
if (!ct) {
goto error;
}
ct->base.vtable = &grpc_cronet_vtable;
- ct->engine = (stream_engine *)engine;
- ct->host = (char *)gpr_malloc(strlen(target) + 1);
+ ct->engine = (stream_engine*)engine;
+ ct->host = (char*)gpr_malloc(strlen(target) + 1);
if (!ct->host) {
goto error;
}
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.h b/src/core/ext/transport/cronet/transport/cronet_transport.h
index 43ff391f79..7643fdb585 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.h
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.h
@@ -25,9 +25,9 @@
extern "C" {
#endif
-grpc_transport *grpc_create_cronet_transport(void *engine, const char *target,
- const grpc_channel_args *args,
- void *reserved);
+grpc_transport* grpc_create_cronet_transport(void* engine, const char* target,
+ const grpc_channel_args* args,
+ void* reserved);
#ifdef __cplusplus
}
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 1551f5e988..a7a6db8bc2 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -50,20 +50,20 @@ typedef struct {
typedef struct inproc_transport {
grpc_transport base;
- shared_mu *mu;
+ shared_mu* mu;
gpr_refcount refs;
bool is_client;
grpc_connectivity_state_tracker connectivity;
- void (*accept_stream_cb)(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_transport *transport, const void *server_data);
- void *accept_stream_data;
+ void (*accept_stream_cb)(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_transport* transport, const void* server_data);
+ void* accept_stream_data;
bool is_closed;
- struct inproc_transport *other_side;
- struct inproc_stream *stream_list;
+ struct inproc_transport* other_side;
+ struct inproc_stream* stream_list;
} inproc_transport;
typedef struct inproc_stream {
- inproc_transport *t;
+ inproc_transport* t;
grpc_metadata_batch to_read_initial_md;
uint32_t to_read_initial_md_flags;
bool to_read_initial_md_filled;
@@ -80,21 +80,21 @@ typedef struct inproc_stream {
grpc_millis write_buffer_deadline;
grpc_metadata_batch write_buffer_trailing_md;
bool write_buffer_trailing_md_filled;
- grpc_error *write_buffer_cancel_error;
+ grpc_error* write_buffer_cancel_error;
- struct inproc_stream *other_side;
+ struct inproc_stream* other_side;
bool other_side_closed; // won't talk anymore
bool write_buffer_other_side_closed; // on hold
- grpc_stream_refcount *refs;
- grpc_closure *closure_at_destroy;
+ grpc_stream_refcount* refs;
+ grpc_closure* closure_at_destroy;
- gpr_arena *arena;
+ gpr_arena* arena;
- grpc_transport_stream_op_batch *send_message_op;
- grpc_transport_stream_op_batch *send_trailing_md_op;
- grpc_transport_stream_op_batch *recv_initial_md_op;
- grpc_transport_stream_op_batch *recv_message_op;
- grpc_transport_stream_op_batch *recv_trailing_md_op;
+ grpc_transport_stream_op_batch* send_message_op;
+ grpc_transport_stream_op_batch* send_trailing_md_op;
+ grpc_transport_stream_op_batch* recv_initial_md_op;
+ grpc_transport_stream_op_batch* recv_message_op;
+ grpc_transport_stream_op_batch* recv_trailing_md_op;
grpc_slice_buffer recv_message;
grpc_slice_buffer_stream recv_stream;
@@ -107,29 +107,29 @@ typedef struct inproc_stream {
bool closed;
- grpc_error *cancel_self_error;
- grpc_error *cancel_other_error;
+ grpc_error* cancel_self_error;
+ grpc_error* cancel_other_error;
grpc_millis deadline;
bool listed;
- struct inproc_stream *stream_list_prev;
- struct inproc_stream *stream_list_next;
+ struct inproc_stream* stream_list_prev;
+ struct inproc_stream* stream_list_next;
} inproc_stream;
static grpc_closure do_nothing_closure;
-static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
- grpc_error *error);
-static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static bool cancel_stream_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+ grpc_error* error);
+static void op_state_machine(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
-static void ref_transport(inproc_transport *t) {
+static void ref_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
gpr_ref(&t->refs);
}
-static void really_destroy_transport(grpc_exec_ctx *exec_ctx,
- inproc_transport *t) {
+static void really_destroy_transport(grpc_exec_ctx* exec_ctx,
+ inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t);
grpc_connectivity_state_destroy(exec_ctx, &t->connectivity);
if (gpr_unref(&t->mu->refs)) {
@@ -138,7 +138,7 @@ static void really_destroy_transport(grpc_exec_ctx *exec_ctx,
gpr_free(t);
}
-static void unref_transport(grpc_exec_ctx *exec_ctx, inproc_transport *t) {
+static void unref_transport(grpc_exec_ctx* exec_ctx, inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "unref_transport %p", t);
if (gpr_unref(&t->refs)) {
really_destroy_transport(exec_ctx, t);
@@ -153,18 +153,18 @@ static void unref_transport(grpc_exec_ctx *exec_ctx, inproc_transport *t) {
#define STREAM_UNREF(e, refs, reason) grpc_stream_unref(e, refs)
#endif
-static void ref_stream(inproc_stream *s, const char *reason) {
+static void ref_stream(inproc_stream* s, const char* reason) {
INPROC_LOG(GPR_DEBUG, "ref_stream %p %s", s, reason);
STREAM_REF(s->refs, reason);
}
-static void unref_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s,
- const char *reason) {
+static void unref_stream(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+ const char* reason) {
INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason);
STREAM_UNREF(exec_ctx, s->refs, reason);
}
-static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
+static void really_destroy_stream(grpc_exec_ctx* exec_ctx, inproc_stream* s) {
INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
@@ -182,12 +182,12 @@ static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
}
}
-static void log_metadata(const grpc_metadata_batch *md_batch, bool is_client,
+static void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
bool is_initial) {
- for (grpc_linked_mdelem *md = md_batch->list.head; md != NULL;
+ for (grpc_linked_mdelem* md = md_batch->list.head; md != NULL;
md = md->next) {
- char *key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
- char *value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md));
+ char* key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
+ char* value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md));
gpr_log(GPR_INFO, "INPROC:%s:%s: %s: %s", is_initial ? "HDR" : "TRL",
is_client ? "CLI" : "SVR", key, value);
gpr_free(key);
@@ -195,10 +195,10 @@ static void log_metadata(const grpc_metadata_batch *md_batch, bool is_client,
}
}
-static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
- const grpc_metadata_batch *metadata,
- uint32_t flags, grpc_metadata_batch *out_md,
- uint32_t *outflags, bool *markfilled) {
+static grpc_error* fill_in_metadata(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+ const grpc_metadata_batch* metadata,
+ uint32_t flags, grpc_metadata_batch* out_md,
+ uint32_t* outflags, bool* markfilled) {
if (GRPC_TRACER_ON(grpc_inproc_trace)) {
log_metadata(metadata, s->t->is_client, outflags != NULL);
}
@@ -209,11 +209,11 @@ static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
if (markfilled != NULL) {
*markfilled = true;
}
- grpc_error *error = GRPC_ERROR_NONE;
- for (grpc_linked_mdelem *elem = metadata->list.head;
+ grpc_error* error = GRPC_ERROR_NONE;
+ for (grpc_linked_mdelem* elem = metadata->list.head;
(elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) {
- grpc_linked_mdelem *nelem =
- (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*nelem));
+ grpc_linked_mdelem* nelem =
+ (grpc_linked_mdelem*)gpr_arena_alloc(s->arena, sizeof(*nelem));
nelem->md = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@@ -223,12 +223,12 @@ static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
return error;
}
-static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena) {
+static int init_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena) {
INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data);
- inproc_transport *t = (inproc_transport *)gt;
- inproc_stream *s = (inproc_stream *)gs;
+ inproc_transport* t = (inproc_transport*)gt;
+ inproc_stream* s = (inproc_stream*)gs;
s->arena = arena;
s->refs = refcount;
@@ -277,7 +277,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
if (!server_data) {
ref_transport(t);
- inproc_transport *st = t->other_side;
+ inproc_transport* st = t->other_side;
ref_transport(st);
s->other_side = NULL; // will get filled in soon
// Pass the client-side stream address to the server-side for a ref
@@ -286,10 +286,10 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p",
st->accept_stream_cb, st->accept_stream_data);
(*st->accept_stream_cb)(exec_ctx, st->accept_stream_data, &st->base,
- (void *)s);
+ (void*)s);
} else {
// This is the server-side and is being called through accept_stream_cb
- inproc_stream *cs = (inproc_stream *)server_data;
+ inproc_stream* cs = (inproc_stream*)server_data;
s->other_side = cs;
// Ref the server-side stream on behalf of the client now
ref_stream(s, "inproc_init_stream:srv");
@@ -326,15 +326,15 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
return 0; // return value is not important
}
-static void close_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
+static void close_stream_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s) {
if (!s->closed) {
// Release the metadata that we would have written out
grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_initial_md);
grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_trailing_md);
if (s->listed) {
- inproc_stream *p = s->stream_list_prev;
- inproc_stream *n = s->stream_list_next;
+ inproc_stream* p = s->stream_list_prev;
+ inproc_stream* n = s->stream_list_next;
if (p != NULL) {
p->stream_list_next = n;
} else {
@@ -352,8 +352,8 @@ static void close_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
}
// This function means that we are done talking/listening to the other side
-static void close_other_side_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
- const char *reason) {
+static void close_other_side_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+ const char* reason) {
if (s->other_side != NULL) {
// First release the metadata that came from the other side's arena
grpc_metadata_batch_destroy(exec_ctx, &s->to_read_initial_md);
@@ -371,10 +371,10 @@ static void close_other_side_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// this stream_op_batch is only one of the pending operations for this
// stream. This is called when one of the pending operations for the stream
// is done and about to be NULLed out
-static void complete_if_batch_end_locked(grpc_exec_ctx *exec_ctx,
- inproc_stream *s, grpc_error *error,
- grpc_transport_stream_op_batch *op,
- const char *msg) {
+static void complete_if_batch_end_locked(grpc_exec_ctx* exec_ctx,
+ inproc_stream* s, grpc_error* error,
+ grpc_transport_stream_op_batch* op,
+ const char* msg) {
int is_sm = (int)(op == s->send_message_op);
int is_stm = (int)(op == s->send_trailing_md_op);
int is_rim = (int)(op == s->recv_initial_md_op);
@@ -387,9 +387,9 @@ static void complete_if_batch_end_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void maybe_schedule_op_closure_locked(grpc_exec_ctx *exec_ctx,
- inproc_stream *s,
- grpc_error *error) {
+static void maybe_schedule_op_closure_locked(grpc_exec_ctx* exec_ctx,
+ inproc_stream* s,
+ grpc_error* error) {
if (s && s->ops_needed && !s->op_closure_scheduled) {
GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_REF(error));
s->op_closure_scheduled = true;
@@ -397,8 +397,8 @@ static void maybe_schedule_op_closure_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
- grpc_error *error) {
+static void fail_helper_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+ grpc_error* error) {
INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata
@@ -409,10 +409,10 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
- inproc_stream *other = s->other_side;
- grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
+ inproc_stream* other = s->other_side;
+ grpc_metadata_batch* dest = (other == NULL) ? &s->write_buffer_trailing_md
: &other->to_read_trailing_md;
- bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
+ bool* destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
: &other->to_read_trailing_md_filled;
fill_in_metadata(exec_ctx, s, &fake_md, 0, dest, NULL, destfilled);
grpc_metadata_batch_destroy(exec_ctx, &fake_md);
@@ -427,20 +427,20 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
}
}
if (s->recv_initial_md_op) {
- grpc_error *err;
+ grpc_error* err;
if (!s->t->is_client) {
// If this is a server, provide initial metadata with a path and authority
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
- grpc_linked_mdelem *path_md =
- (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*path_md));
+ grpc_linked_mdelem* path_md =
+ (grpc_linked_mdelem*)gpr_arena_alloc(s->arena, sizeof(*path_md));
path_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
GRPC_ERROR_NONE);
- grpc_linked_mdelem *auth_md =
- (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*auth_md));
+ grpc_linked_mdelem* auth_md =
+ (grpc_linked_mdelem*)gpr_arena_alloc(s->arena, sizeof(*auth_md));
auth_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
@@ -509,9 +509,9 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
GRPC_ERROR_UNREF(error);
}
-static void message_transfer_locked(grpc_exec_ctx *exec_ctx,
- inproc_stream *sender,
- inproc_stream *receiver) {
+static void message_transfer_locked(grpc_exec_ctx* exec_ctx,
+ inproc_stream* sender,
+ inproc_stream* receiver) {
size_t remaining =
sender->send_message_op->payload->send_message.send_message->length;
if (receiver->recv_inited) {
@@ -525,7 +525,7 @@ static void message_transfer_locked(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(grpc_byte_stream_next(
exec_ctx, sender->send_message_op->payload->send_message.send_message,
SIZE_MAX, &unused));
- grpc_error *error = grpc_byte_stream_pull(
+ grpc_error* error = grpc_byte_stream_pull(
exec_ctx, sender->send_message_op->payload->send_message.send_message,
&message_slice);
if (error != GRPC_ERROR_NONE) {
@@ -558,8 +558,8 @@ static void message_transfer_locked(grpc_exec_ctx *exec_ctx,
sender->send_message_op = NULL;
}
-static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void op_state_machine(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
// This function gets called when we have contents in the unprocessed reads
// Get what we want based on our ops wanted
// Schedule our appropriate closures
@@ -567,17 +567,17 @@ static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
// Since this is a closure directly invoked by the combiner, it should not
// unref the error parameter explicitly; the combiner will do that implicitly
- grpc_error *new_err = GRPC_ERROR_NONE;
+ grpc_error* new_err = GRPC_ERROR_NONE;
bool needs_close = false;
INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg);
- inproc_stream *s = (inproc_stream *)arg;
- gpr_mu *mu = &s->t->mu->mu; // keep aside in case s gets closed
+ inproc_stream* s = (inproc_stream*)arg;
+ gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed
gpr_mu_lock(mu);
s->op_closure_scheduled = false;
// cancellation takes precedence
- inproc_stream *other = s->other_side;
+ inproc_stream* other = s->other_side;
if (s->cancel_self_error != GRPC_ERROR_NONE) {
fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_self_error));
@@ -612,9 +612,9 @@ static void op_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
(!s->send_message_op ||
(s->t->is_client &&
(s->trailing_md_recvd || s->to_read_trailing_md_filled)))) {
- grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
+ grpc_metadata_batch* dest = (other == NULL) ? &s->write_buffer_trailing_md
: &other->to_read_trailing_md;
- bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
+ bool* destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
: &other->to_read_trailing_md_filled;
if (*destfilled || s->trailing_md_sent) {
// The buffer is already in use; that's an error!
@@ -810,8 +810,8 @@ done:
GRPC_ERROR_UNREF(new_err);
}
-static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
- grpc_error *error) {
+static bool cancel_stream_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+ grpc_error* error) {
bool ret = false; // was the cancel accepted
INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s,
grpc_error_string(error));
@@ -826,10 +826,10 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_metadata_batch cancel_md;
grpc_metadata_batch_init(&cancel_md);
- inproc_stream *other = s->other_side;
- grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
+ inproc_stream* other = s->other_side;
+ grpc_metadata_batch* dest = (other == NULL) ? &s->write_buffer_trailing_md
: &other->to_read_trailing_md;
- bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
+ bool* destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
: &other->to_read_trailing_md_filled;
fill_in_metadata(exec_ctx, s, &cancel_md, 0, dest, NULL, destfilled);
grpc_metadata_batch_destroy(exec_ctx, &cancel_md);
@@ -862,12 +862,12 @@ static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
return ret;
}
-static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs,
- grpc_transport_stream_op_batch *op) {
+static void perform_stream_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs,
+ grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op);
- inproc_stream *s = (inproc_stream *)gs;
- gpr_mu *mu = &s->t->mu->mu; // save aside in case s gets closed
+ inproc_stream* s = (inproc_stream*)gs;
+ gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
gpr_mu_lock(mu);
if (GRPC_TRACER_ON(grpc_inproc_trace)) {
@@ -880,8 +880,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->t->is_client, false);
}
}
- grpc_error *error = GRPC_ERROR_NONE;
- grpc_closure *on_complete = op->on_complete;
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_closure* on_complete = op->on_complete;
if (on_complete == NULL) {
on_complete = &do_nothing_closure;
}
@@ -907,18 +907,18 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
bool needs_close = false;
- inproc_stream *other = s->other_side;
+ inproc_stream* other = s->other_side;
if (error == GRPC_ERROR_NONE &&
(op->send_initial_metadata || op->send_trailing_metadata)) {
if (s->t->is_closed) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
}
if (error == GRPC_ERROR_NONE && op->send_initial_metadata) {
- grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_initial_md
+ grpc_metadata_batch* dest = (other == NULL) ? &s->write_buffer_initial_md
: &other->to_read_initial_md;
- uint32_t *destflags = (other == NULL) ? &s->write_buffer_initial_md_flags
+ uint32_t* destflags = (other == NULL) ? &s->write_buffer_initial_md_flags
: &other->to_read_initial_md_flags;
- bool *destfilled = (other == NULL) ? &s->write_buffer_initial_md_filled
+ bool* destfilled = (other == NULL) ? &s->write_buffer_initial_md_filled
: &other->to_read_initial_md_filled;
if (*destfilled || s->initial_md_sent) {
// The buffer is already in use; that's an error!
@@ -933,7 +933,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
dest, destflags, destfilled);
}
if (s->t->is_client) {
- grpc_millis *dl =
+ grpc_millis* dl =
(other == NULL) ? &s->write_buffer_deadline : &other->deadline;
*dl = GPR_MIN(*dl, op->payload->send_initial_metadata
.send_initial_metadata->deadline);
@@ -972,8 +972,9 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// 4. We want to receive a message and there is a message ready
// 5. There is trailing metadata, even if nothing specifically wants
// that because that can shut down the receive message as well
- if ((op->send_message && other && ((other->recv_message_op != NULL) ||
- (other->recv_trailing_md_op != NULL))) ||
+ if ((op->send_message && other &&
+ ((other->recv_message_op != NULL) ||
+ (other->recv_trailing_md_op != NULL))) ||
(op->send_trailing_metadata && !op->send_message) ||
(op->recv_initial_metadata && s->to_read_initial_md_filled) ||
(op->recv_message && other && (other->send_message_op != NULL)) ||
@@ -1020,8 +1021,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
GRPC_ERROR_UNREF(error);
}
-static void close_transport_locked(grpc_exec_ctx *exec_ctx,
- inproc_transport *t) {
+static void close_transport_locked(grpc_exec_ctx* exec_ctx,
+ inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed);
grpc_connectivity_state_set(
exec_ctx, &t->connectivity, GRPC_CHANNEL_SHUTDOWN,
@@ -1041,9 +1042,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_transport_op *op) {
- inproc_transport *t = (inproc_transport *)gt;
+static void perform_transport_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_transport_op* op) {
+ inproc_transport* t = (inproc_transport*)gt;
INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op);
gpr_mu_lock(&t->mu->mu);
if (op->on_connectivity_state_change) {
@@ -1075,17 +1076,17 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_mu_unlock(&t->mu->mu);
}
-static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs,
- grpc_closure *then_schedule_closure) {
+static void destroy_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs,
+ grpc_closure* then_schedule_closure) {
INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure);
- inproc_stream *s = (inproc_stream *)gs;
+ inproc_stream* s = (inproc_stream*)gs;
s->closure_at_destroy = then_schedule_closure;
really_destroy_stream(exec_ctx, s);
}
-static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
- inproc_transport *t = (inproc_transport *)gt;
+static void destroy_transport(grpc_exec_ctx* exec_ctx, grpc_transport* gt) {
+ inproc_transport* t = (inproc_transport*)gt;
INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t);
gpr_mu_lock(&t->mu->mu);
close_transport_locked(exec_ctx, t);
@@ -1098,24 +1099,24 @@ static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
* INTEGRATION GLUE
*/
-static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset *pollset) {
+static void set_pollset(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_pollset* pollset) {
// Nothing to do here
}
-static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, grpc_pollset_set *pollset_set) {
+static void set_pollset_set(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
+ grpc_stream* gs, grpc_pollset_set* pollset_set) {
// Nothing to do here
}
-static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) {
+static grpc_endpoint* get_endpoint(grpc_exec_ctx* exec_ctx, grpc_transport* t) {
return NULL;
}
/*******************************************************************************
* GLOBAL INIT AND DESTROY
*/
-static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
+static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
void grpc_inproc_transport_init(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -1146,16 +1147,16 @@ static const grpc_transport_vtable inproc_vtable = {
/*******************************************************************************
* Main inproc transport functions
*/
-static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
- grpc_transport **server_transport,
- const grpc_channel_args *server_args,
- grpc_transport **client_transport,
- const grpc_channel_args *client_args) {
+static void inproc_transports_create(grpc_exec_ctx* exec_ctx,
+ grpc_transport** server_transport,
+ const grpc_channel_args* server_args,
+ grpc_transport** client_transport,
+ const grpc_channel_args* client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
- inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st));
- inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct));
+ inproc_transport* st = (inproc_transport*)gpr_zalloc(sizeof(*st));
+ inproc_transport* ct = (inproc_transport*)gpr_zalloc(sizeof(*ct));
// Share one lock between both sides since both sides get affected
- st->mu = ct->mu = (shared_mu *)gpr_malloc(sizeof(*st->mu));
+ st->mu = ct->mu = (shared_mu*)gpr_malloc(sizeof(*st->mu));
gpr_mu_init(&st->mu->mu);
gpr_ref_init(&st->mu->refs, 2);
st->base.vtable = &inproc_vtable;
@@ -1174,37 +1175,37 @@ static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
ct->other_side = st;
st->stream_list = NULL;
ct->stream_list = NULL;
- *server_transport = (grpc_transport *)st;
- *client_transport = (grpc_transport *)ct;
+ *server_transport = (grpc_transport*)st;
+ *client_transport = (grpc_transport*)ct;
}
-grpc_channel *grpc_inproc_channel_create(grpc_server *server,
- grpc_channel_args *args,
- void *reserved) {
+grpc_channel* grpc_inproc_channel_create(grpc_server* server,
+ grpc_channel_args* args,
+ void* reserved) {
GRPC_API_TRACE("grpc_inproc_channel_create(server=%p, args=%p)", 2,
(server, args));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- const grpc_channel_args *server_args = grpc_server_get_channel_args(server);
+ const grpc_channel_args* server_args = grpc_server_get_channel_args(server);
// Add a default authority channel argument for the client
grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING;
- default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY;
- default_authority_arg.value.string = (char *)"inproc.authority";
- grpc_channel_args *client_args =
+ default_authority_arg.key = (char*)GRPC_ARG_DEFAULT_AUTHORITY;
+ default_authority_arg.value.string = (char*)"inproc.authority";
+ grpc_channel_args* client_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);
- grpc_transport *server_transport;
- grpc_transport *client_transport;
+ grpc_transport* server_transport;
+ grpc_transport* client_transport;
inproc_transports_create(&exec_ctx, &server_transport, server_args,
&client_transport, client_args);
grpc_server_setup_transport(&exec_ctx, server, server_transport, NULL,
server_args);
- grpc_channel *channel =
+ grpc_channel* channel =
grpc_channel_create(&exec_ctx, "inproc", client_args,
GRPC_CLIENT_DIRECT_CHANNEL, client_transport);
diff --git a/src/core/ext/transport/inproc/inproc_transport.h b/src/core/ext/transport/inproc/inproc_transport.h
index 37e6d99e99..6e83af3b4c 100644
--- a/src/core/ext/transport/inproc/inproc_transport.h
+++ b/src/core/ext/transport/inproc/inproc_transport.h
@@ -25,9 +25,9 @@
extern "C" {
#endif
-grpc_channel *grpc_inproc_channel_create(grpc_server *server,
- grpc_channel_args *args,
- void *reserved);
+grpc_channel* grpc_inproc_channel_create(grpc_server* server,
+ grpc_channel_args* args,
+ void* reserved);
extern grpc_tracer_flag grpc_inproc_trace;
diff --git a/src/core/lib/backoff/backoff.cc b/src/core/lib/backoff/backoff.cc
index 5dd91da4f3..dc754ddd82 100644
--- a/src/core/lib/backoff/backoff.cc
+++ b/src/core/lib/backoff/backoff.cc
@@ -20,7 +20,7 @@
#include <grpc/support/useful.h>
-void grpc_backoff_init(grpc_backoff *backoff, grpc_millis initial_backoff,
+void grpc_backoff_init(grpc_backoff* backoff, grpc_millis initial_backoff,
double multiplier, double jitter,
grpc_millis min_connect_timeout,
grpc_millis max_backoff) {
@@ -32,8 +32,8 @@ void grpc_backoff_init(grpc_backoff *backoff, grpc_millis initial_backoff,
backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
-grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx *exec_ctx,
- grpc_backoff *backoff) {
+grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx* exec_ctx,
+ grpc_backoff* backoff) {
backoff->current_backoff = backoff->initial_backoff;
const grpc_millis initial_timeout =
GPR_MAX(backoff->initial_backoff, backoff->min_connect_timeout);
@@ -44,12 +44,12 @@ grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx *exec_ctx,
}
/* Generate a random number between 0 and 1. */
-static double generate_uniform_random_number(uint32_t *rng_state) {
+static double generate_uniform_random_number(uint32_t* rng_state) {
*rng_state = (1103515245 * *rng_state + 12345) % ((uint32_t)1 << 31);
return *rng_state / (double)((uint32_t)1 << 31);
}
-static double generate_uniform_random_number_between(uint32_t *rng_state,
+static double generate_uniform_random_number_between(uint32_t* rng_state,
double a, double b) {
if (a == b) return a;
if (a > b) GPR_SWAP(double, a, b); // make sure a < b
@@ -57,8 +57,8 @@ static double generate_uniform_random_number_between(uint32_t *rng_state,
return a + generate_uniform_random_number(rng_state) * range;
}
-grpc_backoff_result grpc_backoff_step(grpc_exec_ctx *exec_ctx,
- grpc_backoff *backoff) {
+grpc_backoff_result grpc_backoff_step(grpc_exec_ctx* exec_ctx,
+ grpc_backoff* backoff) {
backoff->current_backoff = (grpc_millis)(GPR_MIN(
backoff->current_backoff * backoff->multiplier, backoff->max_backoff));
const double jitter = generate_uniform_random_number_between(
@@ -75,6 +75,6 @@ grpc_backoff_result grpc_backoff_step(grpc_exec_ctx *exec_ctx,
return result;
}
-void grpc_backoff_reset(grpc_backoff *backoff) {
+void grpc_backoff_reset(grpc_backoff* backoff) {
backoff->current_backoff = backoff->initial_backoff;
}
diff --git a/src/core/lib/backoff/backoff.h b/src/core/lib/backoff/backoff.h
index 8becf4aab8..1067281403 100644
--- a/src/core/lib/backoff/backoff.h
+++ b/src/core/lib/backoff/backoff.h
@@ -57,24 +57,24 @@ typedef struct {
} grpc_backoff_result;
/// Initialize backoff machinery - does not need to be destroyed
-void grpc_backoff_init(grpc_backoff *backoff, grpc_millis initial_backoff,
+void grpc_backoff_init(grpc_backoff* backoff, grpc_millis initial_backoff,
double multiplier, double jitter,
grpc_millis min_connect_timeout,
grpc_millis max_backoff);
/// Begin retry loop: returns the deadlines to be used for the current attempt
/// and the subsequent retry, if any.
-grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx *exec_ctx,
- grpc_backoff *backoff);
+grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx* exec_ctx,
+ grpc_backoff* backoff);
/// Step a retry loop: returns the deadlines to be used for the current attempt
/// and the subsequent retry, if any.
-grpc_backoff_result grpc_backoff_step(grpc_exec_ctx *exec_ctx,
- grpc_backoff *backoff);
+grpc_backoff_result grpc_backoff_step(grpc_exec_ctx* exec_ctx,
+ grpc_backoff* backoff);
/// Reset the backoff, so the next grpc_backoff_step will be a
/// grpc_backoff_begin.
-void grpc_backoff_reset(grpc_backoff *backoff);
+void grpc_backoff_reset(grpc_backoff* backoff);
#ifdef __cplusplus
}
diff --git a/src/core/lib/channel/channel_args.cc b/src/core/lib/channel/channel_args.cc
index 30248b3c60..23a604301e 100644
--- a/src/core/lib/channel/channel_args.cc
+++ b/src/core/lib/channel/channel_args.cc
@@ -31,7 +31,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/support/string.h"
-static grpc_arg copy_arg(const grpc_arg *src) {
+static grpc_arg copy_arg(const grpc_arg* src) {
grpc_arg dst;
dst.type = src->type;
dst.key = gpr_strdup(src->key);
@@ -51,21 +51,21 @@ static grpc_arg copy_arg(const grpc_arg *src) {
return dst;
}
-grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
- const grpc_arg *to_add,
+grpc_channel_args* grpc_channel_args_copy_and_add(const grpc_channel_args* src,
+ const grpc_arg* to_add,
size_t num_to_add) {
return grpc_channel_args_copy_and_add_and_remove(src, NULL, 0, to_add,
num_to_add);
}
-grpc_channel_args *grpc_channel_args_copy_and_remove(
- const grpc_channel_args *src, const char **to_remove,
+grpc_channel_args* grpc_channel_args_copy_and_remove(
+ const grpc_channel_args* src, const char** to_remove,
size_t num_to_remove) {
return grpc_channel_args_copy_and_add_and_remove(src, to_remove,
num_to_remove, NULL, 0);
}
-static bool should_remove_arg(const grpc_arg *arg, const char **to_remove,
+static bool should_remove_arg(const grpc_arg* arg, const char** to_remove,
size_t num_to_remove) {
for (size_t i = 0; i < num_to_remove; ++i) {
if (strcmp(arg->key, to_remove[i]) == 0) return true;
@@ -73,9 +73,9 @@ static bool should_remove_arg(const grpc_arg *arg, const char **to_remove,
return false;
}
-grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
- const grpc_channel_args *src, const char **to_remove, size_t num_to_remove,
- const grpc_arg *to_add, size_t num_to_add) {
+grpc_channel_args* grpc_channel_args_copy_and_add_and_remove(
+ const grpc_channel_args* src, const char** to_remove, size_t num_to_remove,
+ const grpc_arg* to_add, size_t num_to_add) {
// Figure out how many args we'll be copying.
size_t num_args_to_copy = 0;
if (src != NULL) {
@@ -86,14 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
- grpc_channel_args *dst =
- (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args* dst =
+ (grpc_channel_args*)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
- dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
+ dst->args = (grpc_arg*)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@@ -111,30 +111,30 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
return dst;
}
-grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
+grpc_channel_args* grpc_channel_args_copy(const grpc_channel_args* src) {
return grpc_channel_args_copy_and_add(src, NULL, 0);
}
-grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
- const grpc_channel_args *b) {
+grpc_channel_args* grpc_channel_args_union(const grpc_channel_args* a,
+ const grpc_channel_args* b) {
const size_t max_out = (a->num_args + b->num_args);
- grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
+ grpc_arg* uniques = (grpc_arg*)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
for (size_t i = 0; i < b->num_args; ++i) {
- const char *b_key = b->args[i].key;
+ const char* b_key = b->args[i].key;
if (grpc_channel_args_find(a, b_key) == NULL) { // not found
uniques[uniques_idx++] = b->args[i];
}
}
- grpc_channel_args *result =
+ grpc_channel_args* result =
grpc_channel_args_copy_and_add(NULL, uniques, uniques_idx);
gpr_free(uniques);
return result;
}
-static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
+static int cmp_arg(const grpc_arg* a, const grpc_arg* b) {
int c = GPR_ICMP(a->type, b->type);
if (c != 0) return c;
c = strcmp(a->key, b->key);
@@ -160,26 +160,26 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
-static int cmp_key_stable(const void *ap, const void *bp) {
- const grpc_arg *const *a = (const grpc_arg *const *)ap;
- const grpc_arg *const *b = (const grpc_arg *const *)bp;
+static int cmp_key_stable(const void* ap, const void* bp) {
+ const grpc_arg* const* a = (const grpc_arg* const*)ap;
+ const grpc_arg* const* b = (const grpc_arg* const*)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
-grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
- grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
+grpc_channel_args* grpc_channel_args_normalize(const grpc_channel_args* a) {
+ grpc_arg** args = (grpc_arg**)gpr_malloc(sizeof(grpc_arg*) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
- qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
+ qsort(args, a->num_args, sizeof(grpc_arg*), cmp_key_stable);
- grpc_channel_args *b =
- (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
+ grpc_channel_args* b =
+ (grpc_channel_args*)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
- b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
+ b->args = (grpc_arg*)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}
@@ -188,7 +188,7 @@ grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
return b;
}
-void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) {
+void grpc_channel_args_destroy(grpc_exec_ctx* exec_ctx, grpc_channel_args* a) {
size_t i;
if (!a) return;
for (i = 0; i < a->num_args; i++) {
@@ -210,7 +210,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) {
}
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
- const grpc_channel_args *a) {
+ const grpc_channel_args* a) {
size_t i;
if (a == NULL) return GRPC_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
@@ -224,7 +224,7 @@ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
}
grpc_stream_compression_algorithm
-grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
+grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args* a) {
size_t i;
if (a == NULL) return GRPC_STREAM_COMPRESS_NONE;
for (i = 0; i < a->num_args; ++i) {
@@ -238,22 +238,22 @@ grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) {
return GRPC_STREAM_COMPRESS_NONE;
}
-grpc_channel_args *grpc_channel_args_set_compression_algorithm(
- grpc_channel_args *a, grpc_compression_algorithm algorithm) {
+grpc_channel_args* grpc_channel_args_set_compression_algorithm(
+ grpc_channel_args* a, grpc_compression_algorithm algorithm) {
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char*)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
-grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
- grpc_channel_args *a, grpc_stream_compression_algorithm algorithm) {
+grpc_channel_args* grpc_channel_args_set_stream_compression_algorithm(
+ grpc_channel_args* a, grpc_stream_compression_algorithm algorithm) {
GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
+ tmp.key = (char*)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@@ -261,8 +261,8 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
/** Returns 1 if the argument for compression algorithm's enabled states bitset
* was found in \a a, returning the arg's value in \a states. Otherwise, returns
* 0. */
-static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
- int **states_arg) {
+static int find_compression_algorithm_states_bitset(const grpc_channel_args* a,
+ int** states_arg) {
if (a != NULL) {
size_t i;
for (i = 0; i < a->num_args; ++i) {
@@ -282,7 +282,7 @@ static int find_compression_algorithm_states_bitset(const grpc_channel_args *a,
* was found in \a a, returning the arg's value in \a states. Otherwise, returns
* 0. */
static int find_stream_compression_algorithm_states_bitset(
- const grpc_channel_args *a, int **states_arg) {
+ const grpc_channel_args* a, int** states_arg) {
if (a != NULL) {
size_t i;
for (i = 0; i < a->num_args; ++i) {
@@ -298,17 +298,17 @@ static int find_stream_compression_algorithm_states_bitset(
return 0; /* GPR_FALSE */
}
-grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+grpc_channel_args* grpc_channel_args_compression_algorithm_set_state(
+ grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
grpc_compression_algorithm algorithm, int state) {
- int *states_arg = NULL;
- grpc_channel_args *result = *a;
+ int* states_arg = NULL;
+ grpc_channel_args* result = *a;
const int states_arg_found =
find_compression_algorithm_states_bitset(*a, &states_arg);
if (grpc_channel_args_get_compression_algorithm(*a) == algorithm &&
state == 0) {
- const char *algo_name = NULL;
+ const char* algo_name = NULL;
GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0);
gpr_log(GPR_ERROR,
"Tried to disable default compression algorithm '%s'. The "
@@ -316,21 +316,21 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
algo_name);
} else if (states_arg_found) {
if (state != 0) {
- GPR_BITSET((unsigned *)states_arg, algorithm);
+ GPR_BITSET((unsigned*)states_arg, algorithm);
} else if (algorithm != GRPC_COMPRESS_NONE) {
- GPR_BITCLEAR((unsigned *)states_arg, algorithm);
+ GPR_BITCLEAR((unsigned*)states_arg, algorithm);
}
} else {
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char*)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
- GPR_BITSET((unsigned *)&tmp.value.integer, algorithm);
+ GPR_BITSET((unsigned*)&tmp.value.integer, algorithm);
} else if (algorithm != GRPC_COMPRESS_NONE) {
- GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm);
+ GPR_BITCLEAR((unsigned*)&tmp.value.integer, algorithm);
}
result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
grpc_channel_args_destroy(exec_ctx, *a);
@@ -339,17 +339,17 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
return result;
}
-grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+grpc_channel_args* grpc_channel_args_stream_compression_algorithm_set_state(
+ grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
grpc_stream_compression_algorithm algorithm, int state) {
- int *states_arg = NULL;
- grpc_channel_args *result = *a;
+ int* states_arg = NULL;
+ grpc_channel_args* result = *a;
const int states_arg_found =
find_stream_compression_algorithm_states_bitset(*a, &states_arg);
if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm &&
state == 0) {
- const char *algo_name = NULL;
+ const char* algo_name = NULL;
GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) !=
0);
gpr_log(GPR_ERROR,
@@ -358,21 +358,21 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
algo_name);
} else if (states_arg_found) {
if (state != 0) {
- GPR_BITSET((unsigned *)states_arg, algorithm);
+ GPR_BITSET((unsigned*)states_arg, algorithm);
} else if (algorithm != GRPC_STREAM_COMPRESS_NONE) {
- GPR_BITCLEAR((unsigned *)states_arg, algorithm);
+ GPR_BITCLEAR((unsigned*)states_arg, algorithm);
}
} else {
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
- tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
+ tmp.key = (char*)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
/* all enabled by default */
tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {
- GPR_BITSET((unsigned *)&tmp.value.integer, algorithm);
+ GPR_BITSET((unsigned*)&tmp.value.integer, algorithm);
} else if (algorithm != GRPC_STREAM_COMPRESS_NONE) {
- GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm);
+ GPR_BITCLEAR((unsigned*)&tmp.value.integer, algorithm);
}
result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
grpc_channel_args_destroy(exec_ctx, *a);
@@ -382,8 +382,8 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
}
uint32_t grpc_channel_args_compression_algorithm_get_states(
- const grpc_channel_args *a) {
- int *states_arg;
+ const grpc_channel_args* a) {
+ int* states_arg;
if (find_compression_algorithm_states_bitset(a, &states_arg)) {
return (uint32_t)*states_arg;
} else {
@@ -392,8 +392,8 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
}
uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
- const grpc_channel_args *a) {
- int *states_arg;
+ const grpc_channel_args* a) {
+ int* states_arg;
if (find_stream_compression_algorithm_states_bitset(a, &states_arg)) {
return (uint32_t)*states_arg;
} else {
@@ -402,14 +402,14 @@ uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
}
}
-grpc_channel_args *grpc_channel_args_set_socket_mutator(
- grpc_channel_args *a, grpc_socket_mutator *mutator) {
+grpc_channel_args* grpc_channel_args_set_socket_mutator(
+ grpc_channel_args* a, grpc_socket_mutator* mutator) {
grpc_arg tmp = grpc_socket_mutator_to_arg(mutator);
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
-int grpc_channel_args_compare(const grpc_channel_args *a,
- const grpc_channel_args *b) {
+int grpc_channel_args_compare(const grpc_channel_args* a,
+ const grpc_channel_args* b) {
int c = GPR_ICMP(a->num_args, b->num_args);
if (c != 0) return c;
for (size_t i = 0; i < a->num_args; i++) {
@@ -419,8 +419,8 @@ int grpc_channel_args_compare(const grpc_channel_args *a,
return 0;
}
-const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
- const char *name) {
+const grpc_arg* grpc_channel_args_find(const grpc_channel_args* args,
+ const char* name) {
if (args != NULL) {
for (size_t i = 0; i < args->num_args; ++i) {
if (strcmp(args->args[i].key, name) == 0) {
@@ -431,7 +431,7 @@ const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
return NULL;
}
-int grpc_channel_arg_get_integer(const grpc_arg *arg,
+int grpc_channel_arg_get_integer(const grpc_arg* arg,
const grpc_integer_options options) {
if (arg == NULL) return options.default_value;
if (arg->type != GRPC_ARG_INTEGER) {
@@ -451,7 +451,7 @@ int grpc_channel_arg_get_integer(const grpc_arg *arg,
return arg->value.integer;
}
-bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value) {
+bool grpc_channel_arg_get_bool(const grpc_arg* arg, bool default_value) {
if (arg == NULL) return default_value;
if (arg->type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key);
@@ -469,12 +469,12 @@ bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value) {
}
}
-bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args) {
+bool grpc_channel_args_want_minimal_stack(const grpc_channel_args* args) {
return grpc_channel_arg_get_bool(
grpc_channel_args_find(args, GRPC_ARG_MINIMAL_STACK), false);
}
-grpc_arg grpc_channel_arg_string_create(char *name, char *value) {
+grpc_arg grpc_channel_arg_string_create(char* name, char* value) {
grpc_arg arg;
arg.type = GRPC_ARG_STRING;
arg.key = name;
@@ -482,7 +482,7 @@ grpc_arg grpc_channel_arg_string_create(char *name, char *value) {
return arg;
}
-grpc_arg grpc_channel_arg_integer_create(char *name, int value) {
+grpc_arg grpc_channel_arg_integer_create(char* name, int value) {
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;
arg.key = name;
@@ -491,7 +491,7 @@ grpc_arg grpc_channel_arg_integer_create(char *name, int value) {
}
grpc_arg grpc_channel_arg_pointer_create(
- char *name, void *value, const grpc_arg_pointer_vtable *vtable) {
+ char* name, void* value, const grpc_arg_pointer_vtable* vtable) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = name;
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 1896d35cf4..d36761da57 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -30,56 +30,56 @@ extern "C" {
// Channel args are intentionally immutable, to avoid the need for locking.
/** Copy the arguments in \a src into a new instance */
-grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src);
+grpc_channel_args* grpc_channel_args_copy(const grpc_channel_args* src);
/** Copy the arguments in \a src into a new instance, stably sorting keys */
-grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *src);
+grpc_channel_args* grpc_channel_args_normalize(const grpc_channel_args* src);
/** Copy the arguments in \a src and append \a to_add. If \a to_add is NULL, it
* is equivalent to calling \a grpc_channel_args_copy. */
-grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src,
- const grpc_arg *to_add,
+grpc_channel_args* grpc_channel_args_copy_and_add(const grpc_channel_args* src,
+ const grpc_arg* to_add,
size_t num_to_add);
/** Copies the arguments in \a src except for those whose keys are in
\a to_remove. */
-grpc_channel_args *grpc_channel_args_copy_and_remove(
- const grpc_channel_args *src, const char **to_remove, size_t num_to_remove);
+grpc_channel_args* grpc_channel_args_copy_and_remove(
+ const grpc_channel_args* src, const char** to_remove, size_t num_to_remove);
/** Copies the arguments from \a src except for those whose keys are in
\a to_remove and appends the arguments in \a to_add. */
-grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
- const grpc_channel_args *src, const char **to_remove, size_t num_to_remove,
- const grpc_arg *to_add, size_t num_to_add);
+grpc_channel_args* grpc_channel_args_copy_and_add_and_remove(
+ const grpc_channel_args* src, const char** to_remove, size_t num_to_remove,
+ const grpc_arg* to_add, size_t num_to_add);
/** Perform the union of \a a and \a b, prioritizing \a a entries */
-grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
- const grpc_channel_args *b);
+grpc_channel_args* grpc_channel_args_union(const grpc_channel_args* a,
+ const grpc_channel_args* b);
/** Destroy arguments created by \a grpc_channel_args_copy */
-void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a);
+void grpc_channel_args_destroy(grpc_exec_ctx* exec_ctx, grpc_channel_args* a);
/** Returns the compression algorithm set in \a a. */
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
- const grpc_channel_args *a);
+ const grpc_channel_args* a);
/** Returns the stream compression algorithm set in \a a. */
grpc_stream_compression_algorithm
-grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a);
+grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args* a);
/** Returns a channel arg instance with compression enabled. If \a a is
* non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression
* for the channel. */
-grpc_channel_args *grpc_channel_args_set_compression_algorithm(
- grpc_channel_args *a, grpc_compression_algorithm algorithm);
+grpc_channel_args* grpc_channel_args_set_compression_algorithm(
+ grpc_channel_args* a, grpc_compression_algorithm algorithm);
/** Returns a channel arg instance with stream compression enabled. If \a a is
* non-NULL, its args are copied. N.B. GRPC_STREAM_COMPRESS_NONE disables
* stream compression for the channel. If a value other than
* GRPC_STREAM_COMPRESS_NONE is set, it takes precedence over message-wise
* compression algorithms. */
-grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
- grpc_channel_args *a, grpc_stream_compression_algorithm algorithm);
+grpc_channel_args* grpc_channel_args_set_stream_compression_algorithm(
+ grpc_channel_args* a, grpc_stream_compression_algorithm algorithm);
/** Sets the support for the given compression algorithm. By default, all
* compression algorithms are enabled. It's an error to disable an algorithm set
@@ -88,8 +88,8 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm(
* Returns an instance with the updated algorithm states. The \a a pointer is
* modified to point to the returned instance (which may be different from the
* input value of \a a). */
-grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+grpc_channel_args* grpc_channel_args_compression_algorithm_set_state(
+ grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
grpc_compression_algorithm algorithm, int enabled);
/** Sets the support for the given stream compression algorithm. By default, all
@@ -99,8 +99,8 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
* Returns an instance with the updated algorithm states. The \a a pointer is
* modified to point to the returned instance (which may be different from the
* input value of \a a). */
-grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
- grpc_exec_ctx *exec_ctx, grpc_channel_args **a,
+grpc_channel_args* grpc_channel_args_stream_compression_algorithm_set_state(
+ grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
grpc_stream_compression_algorithm algorithm, int enabled);
/** Returns the bitset representing the support state (true for enabled, false
@@ -109,7 +109,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state(
* The i-th bit of the returned bitset corresponds to the i-th entry in the
* grpc_compression_algorithm enum. */
uint32_t grpc_channel_args_compression_algorithm_get_states(
- const grpc_channel_args *a);
+ const grpc_channel_args* a);
/** Returns the bitset representing the support state (true for enabled, false
* for disabled) for stream compression algorithms.
@@ -117,23 +117,23 @@ uint32_t grpc_channel_args_compression_algorithm_get_states(
* The i-th bit of the returned bitset corresponds to the i-th entry in the
* grpc_stream_compression_algorithm enum. */
uint32_t grpc_channel_args_stream_compression_algorithm_get_states(
- const grpc_channel_args *a);
+ const grpc_channel_args* a);
-int grpc_channel_args_compare(const grpc_channel_args *a,
- const grpc_channel_args *b);
+int grpc_channel_args_compare(const grpc_channel_args* a,
+ const grpc_channel_args* b);
/** Returns a channel arg instance with socket mutator added. The socket mutator
* will perform its mutate_fd method on all file descriptors used by the
* channel.
* If \a a is non-MULL, its args are copied. */
-grpc_channel_args *grpc_channel_args_set_socket_mutator(
- grpc_channel_args *a, grpc_socket_mutator *mutator);
+grpc_channel_args* grpc_channel_args_set_socket_mutator(
+ grpc_channel_args* a, grpc_socket_mutator* mutator);
/** Returns the value of argument \a name from \a args, or NULL if not found. */
-const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args,
- const char *name);
+const grpc_arg* grpc_channel_args_find(const grpc_channel_args* args,
+ const char* name);
-bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args);
+bool grpc_channel_args_want_minimal_stack(const grpc_channel_args* args);
typedef struct grpc_integer_options {
int default_value; // Return this if value is outside of expected bounds.
@@ -142,16 +142,16 @@ typedef struct grpc_integer_options {
} grpc_integer_options;
/** Returns the value of \a arg, subject to the contraints in \a options. */
-int grpc_channel_arg_get_integer(const grpc_arg *arg,
+int grpc_channel_arg_get_integer(const grpc_arg* arg,
const grpc_integer_options options);
-bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value);
+bool grpc_channel_arg_get_bool(const grpc_arg* arg, bool default_value);
// Helpers for creating channel args.
-grpc_arg grpc_channel_arg_string_create(char *name, char *value);
-grpc_arg grpc_channel_arg_integer_create(char *name, int value);
-grpc_arg grpc_channel_arg_pointer_create(char *name, void *value,
- const grpc_arg_pointer_vtable *vtable);
+grpc_arg grpc_channel_arg_string_create(char* name, char* value);
+grpc_arg grpc_channel_arg_integer_create(char* name, int value);
+grpc_arg grpc_channel_arg_pointer_create(char* name, void* value,
+ const grpc_arg_pointer_vtable* vtable);
#ifdef __cplusplus
}
diff --git a/src/core/lib/channel/channel_stack.cc b/src/core/lib/channel/channel_stack.cc
index 775c8bc667..3ab2e33d6d 100644
--- a/src/core/lib/channel/channel_stack.cc
+++ b/src/core/lib/channel/channel_stack.cc
@@ -45,7 +45,7 @@ grpc_tracer_flag grpc_trace_channel = GRPC_TRACER_INITIALIZER(false, "channel");
#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \
(((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
-size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
+size_t grpc_channel_stack_size(const grpc_channel_filter** filters,
size_t filter_count) {
/* always need the header, and size for the channel elements */
size_t size =
@@ -64,52 +64,51 @@ size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
return size;
}
-#define CHANNEL_ELEMS_FROM_STACK(stk) \
- ((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \
- sizeof(grpc_channel_stack))))
+#define CHANNEL_ELEMS_FROM_STACK(stk) \
+ ((grpc_channel_element*)((char*)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \
+ sizeof(grpc_channel_stack))))
-#define CALL_ELEMS_FROM_STACK(stk) \
- ((grpc_call_element *)((char *)(stk) + \
- ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack))))
+#define CALL_ELEMS_FROM_STACK(stk) \
+ ((grpc_call_element*)((char*)(stk) + \
+ ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack))))
-grpc_channel_element *grpc_channel_stack_element(
- grpc_channel_stack *channel_stack, size_t index) {
+grpc_channel_element* grpc_channel_stack_element(
+ grpc_channel_stack* channel_stack, size_t index) {
return CHANNEL_ELEMS_FROM_STACK(channel_stack) + index;
}
-grpc_channel_element *grpc_channel_stack_last_element(
- grpc_channel_stack *channel_stack) {
+grpc_channel_element* grpc_channel_stack_last_element(
+ grpc_channel_stack* channel_stack) {
return grpc_channel_stack_element(channel_stack, channel_stack->count - 1);
}
-grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
+grpc_call_element* grpc_call_stack_element(grpc_call_stack* call_stack,
size_t index) {
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
-grpc_error *grpc_channel_stack_init(
- grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
- void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count,
- const grpc_channel_args *channel_args, grpc_transport *optional_transport,
- const char *name, grpc_channel_stack *stack) {
+grpc_error* grpc_channel_stack_init(
+ grpc_exec_ctx* exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
+ void* destroy_arg, const grpc_channel_filter** filters, size_t filter_count,
+ const grpc_channel_args* channel_args, grpc_transport* optional_transport,
+ const char* name, grpc_channel_stack* stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
- grpc_channel_element *elems;
+ grpc_channel_element* elems;
grpc_channel_element_args args;
- char *user_data;
+ char* user_data;
size_t i;
stack->count = filter_count;
GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
- user_data =
- ((char *)elems) +
- ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
+ user_data = ((char*)elems) + ROUND_UP_TO_ALIGNMENT_SIZE(
+ filter_count * sizeof(grpc_channel_element));
/* init per-filter data */
- grpc_error *first_error = GRPC_ERROR_NONE;
+ grpc_error* first_error = GRPC_ERROR_NONE;
for (i = 0; i < filter_count; i++) {
args.channel_stack = stack;
args.channel_args = channel_args;
@@ -118,7 +117,7 @@ grpc_error *grpc_channel_stack_init(
args.is_last = i == (filter_count - 1);
elems[i].filter = filters[i];
elems[i].channel_data = user_data;
- grpc_error *error =
+ grpc_error* error =
elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
if (error != GRPC_ERROR_NONE) {
if (first_error == GRPC_ERROR_NONE) {
@@ -131,17 +130,17 @@ grpc_error *grpc_channel_stack_init(
call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
}
- GPR_ASSERT(user_data > (char *)stack);
- GPR_ASSERT((uintptr_t)(user_data - (char *)stack) ==
+ GPR_ASSERT(user_data > (char*)stack);
+ GPR_ASSERT((uintptr_t)(user_data - (char*)stack) ==
grpc_channel_stack_size(filters, filter_count));
stack->call_stack_size = call_size;
return first_error;
}
-void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack *stack) {
- grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
+void grpc_channel_stack_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack* stack) {
+ grpc_channel_element* channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
@@ -151,31 +150,31 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
}
}
-grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack *channel_stack,
+grpc_error* grpc_call_stack_init(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack* channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy,
- void *destroy_arg,
- const grpc_call_element_args *elem_args) {
- grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
+ void* destroy_arg,
+ const grpc_call_element_args* elem_args) {
+ grpc_channel_element* channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
size_t count = channel_stack->count;
- grpc_call_element *call_elems;
- char *user_data;
+ grpc_call_element* call_elems;
+ char* user_data;
size_t i;
elem_args->call_stack->count = count;
GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy,
destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack);
- user_data = ((char *)call_elems) +
+ user_data = ((char*)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
- grpc_error *first_error = GRPC_ERROR_NONE;
+ grpc_error* first_error = GRPC_ERROR_NONE;
for (i = 0; i < count; i++) {
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
- grpc_error *error = call_elems[i].filter->init_call_elem(
+ grpc_error* error = call_elems[i].filter->init_call_elem(
exec_ctx, &call_elems[i], elem_args);
if (error != GRPC_ERROR_NONE) {
if (first_error == GRPC_ERROR_NONE) {
@@ -190,16 +189,16 @@ grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
return first_error;
}
-void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_call_stack *call_stack,
- grpc_polling_entity *pollent) {
+void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_call_stack* call_stack,
+ grpc_polling_entity* pollent) {
size_t count = call_stack->count;
- grpc_call_element *call_elems;
- char *user_data;
+ grpc_call_element* call_elems;
+ char* user_data;
size_t i;
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
- user_data = ((char *)call_elems) +
+ user_data = ((char*)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
@@ -212,13 +211,13 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
}
void grpc_call_stack_ignore_set_pollset_or_pollset_set(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_polling_entity *pollent) {}
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_polling_entity* pollent) {}
-void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure) {
- grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
+void grpc_call_stack_destroy(grpc_exec_ctx* exec_ctx, grpc_call_stack* stack,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure) {
+ grpc_call_element* elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
@@ -230,33 +229,33 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
}
}
-void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- grpc_call_element *next_elem = elem + 1;
+void grpc_call_next_op(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ grpc_call_element* next_elem = elem + 1;
GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
}
-void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- const grpc_channel_info *channel_info) {
- grpc_channel_element *next_elem = elem + 1;
+void grpc_channel_next_get_info(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {
+ grpc_channel_element* next_elem = elem + 1;
next_elem->filter->get_channel_info(exec_ctx, next_elem, channel_info);
}
-void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_transport_op *op) {
- grpc_channel_element *next_elem = elem + 1;
+void grpc_channel_next_op(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_transport_op* op) {
+ grpc_channel_element* next_elem = elem + 1;
next_elem->filter->start_transport_op(exec_ctx, next_elem, op);
}
-grpc_channel_stack *grpc_channel_stack_from_top_element(
- grpc_channel_element *elem) {
- return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
+grpc_channel_stack* grpc_channel_stack_from_top_element(
+ grpc_channel_element* elem) {
+ return (grpc_channel_stack*)((char*)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_channel_stack)));
}
-grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
- return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
+grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) {
+ return (grpc_call_stack*)((char*)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE(
sizeof(grpc_call_stack)));
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 5c00c09889..aa993112a0 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -56,23 +56,23 @@ typedef struct grpc_channel_stack grpc_channel_stack;
typedef struct grpc_call_stack grpc_call_stack;
typedef struct {
- grpc_channel_stack *channel_stack;
- const grpc_channel_args *channel_args;
+ grpc_channel_stack* channel_stack;
+ const grpc_channel_args* channel_args;
/** Transport, iff it is known */
- grpc_transport *optional_transport;
+ grpc_transport* optional_transport;
int is_first;
int is_last;
} grpc_channel_element_args;
typedef struct {
- grpc_call_stack *call_stack;
- const void *server_transport_data;
- grpc_call_context_element *context;
+ grpc_call_stack* call_stack;
+ const void* server_transport_data;
+ grpc_call_context_element* context;
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
- gpr_arena *arena;
- grpc_call_combiner *call_combiner;
+ gpr_arena* arena;
+ grpc_call_combiner* call_combiner;
} grpc_call_element_args;
typedef struct {
@@ -99,14 +99,14 @@ typedef struct {
typedef struct {
/* Called to eg. send/receive data on a call.
See grpc_call_next_op on how to call the next element in the stack */
- void (*start_transport_stream_op_batch)(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op);
+ void (*start_transport_stream_op_batch)(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op);
/* Called to handle channel level operations - e.g. new calls, or transport
closure.
See grpc_channel_next_op on how to call the next element in the stack */
- void (*start_transport_op)(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem, grpc_transport_op *op);
+ void (*start_transport_op)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem, grpc_transport_op* op);
/* sizeof(per call data) */
size_t sizeof_call_data;
@@ -119,21 +119,21 @@ typedef struct {
transport and is on the server. Most filters want to ignore this
argument.
Implementations may assume that elem->call_data is all zeros. */
- grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args);
- void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent);
+ grpc_error* (*init_call_elem)(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args);
+ void (*set_pollset_or_pollset_set)(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent);
/* Destroy per call data.
The filter does not need to do any chaining.
The bottom filter of a stack will be passed a non-NULL pointer to
\a then_schedule_closure that should be passed to GRPC_CLOSURE_SCHED when
destruction is complete. \a final_info contains data about the completed
call, mainly for reporting purposes. */
- void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure);
+ void (*destroy_call_elem)(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure);
/* sizeof(per channel data) */
size_t sizeof_channel_data;
@@ -144,36 +144,36 @@ typedef struct {
useful for asserting correct configuration by upper layer code.
The filter does not need to do any chaining.
Implementations may assume that elem->call_data is all zeros. */
- grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args);
+ grpc_error* (*init_channel_elem)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args);
/* Destroy per channel data.
The filter does not need to do any chaining */
- void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem);
+ void (*destroy_channel_elem)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem);
/* Implement grpc_channel_get_info() */
- void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- const grpc_channel_info *channel_info);
+ void (*get_channel_info)(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ const grpc_channel_info* channel_info);
/* The name of this filter */
- const char *name;
+ const char* name;
} grpc_channel_filter;
/* A channel_element tracks its filter and the filter requested memory within
a channel allocation */
struct grpc_channel_element {
- const grpc_channel_filter *filter;
- void *channel_data;
+ const grpc_channel_filter* filter;
+ void* channel_data;
};
/* A call_element tracks its filter, the filter requested memory within
a channel allocation, and the filter requested memory within a call
allocation */
struct grpc_call_element {
- const grpc_channel_filter *filter;
- void *channel_data;
- void *call_data;
+ const grpc_channel_filter* filter;
+ void* channel_data;
+ void* call_data;
};
/* A channel stack tracks a set of related filters for one channel, and
@@ -198,40 +198,40 @@ struct grpc_call_stack {
};
/* Get a channel element given a channel stack and its index */
-grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack,
+grpc_channel_element* grpc_channel_stack_element(grpc_channel_stack* stack,
size_t i);
/* Get the last channel element in a channel stack */
-grpc_channel_element *grpc_channel_stack_last_element(
- grpc_channel_stack *stack);
+grpc_channel_element* grpc_channel_stack_last_element(
+ grpc_channel_stack* stack);
/* Get a call stack element given a call stack and an index */
-grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
+grpc_call_element* grpc_call_stack_element(grpc_call_stack* stack, size_t i);
/* Determine memory required for a channel stack containing a set of filters */
-size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
+size_t grpc_channel_stack_size(const grpc_channel_filter** filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
-grpc_error *grpc_channel_stack_init(
- grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
- void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count,
- const grpc_channel_args *args, grpc_transport *optional_transport,
- const char *name, grpc_channel_stack *stack);
+grpc_error* grpc_channel_stack_init(
+ grpc_exec_ctx* exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
+ void* destroy_arg, const grpc_channel_filter** filters, size_t filter_count,
+ const grpc_channel_args* args, grpc_transport* optional_transport,
+ const char* name, grpc_channel_stack* stack);
/* Destroy a channel stack */
-void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack *stack);
+void grpc_channel_stack_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack* stack);
/* Initialize a call stack given a channel stack. transport_server_data is
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
-grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack *channel_stack,
+grpc_error* grpc_call_stack_init(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack* channel_stack,
int initial_refs, grpc_iomgr_cb_func destroy,
- void *destroy_arg,
- const grpc_call_element_args *elem_args);
+ void* destroy_arg,
+ const grpc_call_element_args* elem_args);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
-void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_call_stack *call_stack,
- grpc_polling_entity *pollent);
+void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_call_stack* call_stack,
+ grpc_polling_entity* pollent);
#ifndef NDEBUG
#define GRPC_CALL_STACK_REF(call_stack, reason) \
@@ -254,36 +254,36 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
#endif
/* Destroy a call stack */
-void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure);
+void grpc_call_stack_destroy(grpc_exec_ctx* exec_ctx, grpc_call_stack* stack,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure);
/* Ignore set pollset{_set} - used by filters if they don't care about pollsets
* at all. Does nothing. */
void grpc_call_stack_ignore_set_pollset_or_pollset_set(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_polling_entity *pollent);
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_polling_entity* pollent);
/* Call the next operation in a call stack */
-void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op);
+void grpc_call_next_op(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op);
/* Call the next operation (depending on call directionality) in a channel
stack */
-void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
- grpc_transport_op *op);
+void grpc_channel_next_op(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+ grpc_transport_op* op);
/* Pass through a request to get_channel_info() to the next child element */
-void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- const grpc_channel_info *channel_info);
+void grpc_channel_next_get_info(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ const grpc_channel_info* channel_info);
/* Given the top element of a channel stack, get the channel stack itself */
-grpc_channel_stack *grpc_channel_stack_from_top_element(
- grpc_channel_element *elem);
+grpc_channel_stack* grpc_channel_stack_from_top_element(
+ grpc_channel_element* elem);
/* Given the top element of a call stack, get the call stack itself */
-grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
+grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem);
-void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op);
+void grpc_call_log_op(const char* file, int line, gpr_log_severity severity,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op);
extern grpc_tracer_flag grpc_trace_channel;
diff --git a/src/core/lib/channel/channel_stack_builder.cc b/src/core/lib/channel/channel_stack_builder.cc
index b663ebfb52..83748db3c2 100644
--- a/src/core/lib/channel/channel_stack_builder.cc
+++ b/src/core/lib/channel/channel_stack_builder.cc
@@ -27,11 +27,11 @@ grpc_tracer_flag grpc_trace_channel_stack_builder =
GRPC_TRACER_INITIALIZER(false, "channel_stack_builder");
typedef struct filter_node {
- struct filter_node *next;
- struct filter_node *prev;
- const grpc_channel_filter *filter;
+ struct filter_node* next;
+ struct filter_node* prev;
+ const grpc_channel_filter* filter;
grpc_post_filter_create_init_func init;
- void *init_arg;
+ void* init_arg;
} filter_node;
struct grpc_channel_stack_builder {
@@ -39,20 +39,20 @@ struct grpc_channel_stack_builder {
filter_node begin;
filter_node end;
// various set/get-able parameters
- grpc_channel_args *args;
- grpc_transport *transport;
- char *target;
- const char *name;
+ grpc_channel_args* args;
+ grpc_transport* transport;
+ char* target;
+ const char* name;
};
struct grpc_channel_stack_builder_iterator {
- grpc_channel_stack_builder *builder;
- filter_node *node;
+ grpc_channel_stack_builder* builder;
+ filter_node* node;
};
-grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
- grpc_channel_stack_builder *b =
- (grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
+grpc_channel_stack_builder* grpc_channel_stack_builder_create(void) {
+ grpc_channel_stack_builder* b =
+ (grpc_channel_stack_builder*)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@@ -64,76 +64,76 @@ grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
return b;
}
-void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder *b,
- const char *target) {
+void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder* b,
+ const char* target) {
gpr_free(b->target);
b->target = gpr_strdup(target);
}
-const char *grpc_channel_stack_builder_get_target(
- grpc_channel_stack_builder *b) {
+const char* grpc_channel_stack_builder_get_target(
+ grpc_channel_stack_builder* b) {
return b->target;
}
-static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
- grpc_channel_stack_builder *builder, filter_node *node) {
- grpc_channel_stack_builder_iterator *it =
- (grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
+static grpc_channel_stack_builder_iterator* create_iterator_at_filter_node(
+ grpc_channel_stack_builder* builder, filter_node* node) {
+ grpc_channel_stack_builder_iterator* it =
+ (grpc_channel_stack_builder_iterator*)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
}
void grpc_channel_stack_builder_iterator_destroy(
- grpc_channel_stack_builder_iterator *it) {
+ grpc_channel_stack_builder_iterator* it) {
gpr_free(it);
}
-grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_iterator*
grpc_channel_stack_builder_create_iterator_at_first(
- grpc_channel_stack_builder *builder) {
+ grpc_channel_stack_builder* builder) {
return create_iterator_at_filter_node(builder, &builder->begin);
}
-grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_iterator*
grpc_channel_stack_builder_create_iterator_at_last(
- grpc_channel_stack_builder *builder) {
+ grpc_channel_stack_builder* builder) {
return create_iterator_at_filter_node(builder, &builder->end);
}
bool grpc_channel_stack_builder_iterator_is_end(
- grpc_channel_stack_builder_iterator *iterator) {
+ grpc_channel_stack_builder_iterator* iterator) {
return iterator->node == &iterator->builder->end;
}
-const char *grpc_channel_stack_builder_iterator_filter_name(
- grpc_channel_stack_builder_iterator *iterator) {
+const char* grpc_channel_stack_builder_iterator_filter_name(
+ grpc_channel_stack_builder_iterator* iterator) {
if (iterator->node->filter == NULL) return NULL;
return iterator->node->filter->name;
}
bool grpc_channel_stack_builder_move_next(
- grpc_channel_stack_builder_iterator *iterator) {
+ grpc_channel_stack_builder_iterator* iterator) {
if (iterator->node == &iterator->builder->end) return false;
iterator->node = iterator->node->next;
return true;
}
bool grpc_channel_stack_builder_move_prev(
- grpc_channel_stack_builder_iterator *iterator) {
+ grpc_channel_stack_builder_iterator* iterator) {
if (iterator->node == &iterator->builder->begin) return false;
iterator->node = iterator->node->prev;
return true;
}
-grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
- grpc_channel_stack_builder *builder, const char *filter_name) {
+grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder* builder, const char* filter_name) {
GPR_ASSERT(filter_name != NULL);
- grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_create_iterator_at_first(builder);
while (grpc_channel_stack_builder_move_next(it)) {
if (grpc_channel_stack_builder_iterator_is_end(it)) break;
- const char *filter_name_at_it =
+ const char* filter_name_at_it =
grpc_channel_stack_builder_iterator_filter_name(it);
if (strcmp(filter_name, filter_name_at_it) == 0) break;
}
@@ -141,17 +141,17 @@ grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
}
bool grpc_channel_stack_builder_move_prev(
- grpc_channel_stack_builder_iterator *iterator);
+ grpc_channel_stack_builder_iterator* iterator);
-void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder,
- const char *name) {
+void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder* builder,
+ const char* name) {
GPR_ASSERT(builder->name == NULL);
builder->name = name;
}
void grpc_channel_stack_builder_set_channel_arguments(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
- const grpc_channel_args *args) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
+ const grpc_channel_args* args) {
if (builder->args != NULL) {
grpc_channel_args_destroy(exec_ctx, builder->args);
}
@@ -159,25 +159,25 @@ void grpc_channel_stack_builder_set_channel_arguments(
}
void grpc_channel_stack_builder_set_transport(
- grpc_channel_stack_builder *builder, grpc_transport *transport) {
+ grpc_channel_stack_builder* builder, grpc_transport* transport) {
GPR_ASSERT(builder->transport == NULL);
builder->transport = transport;
}
-grpc_transport *grpc_channel_stack_builder_get_transport(
- grpc_channel_stack_builder *builder) {
+grpc_transport* grpc_channel_stack_builder_get_transport(
+ grpc_channel_stack_builder* builder) {
return builder->transport;
}
-const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments(
- grpc_channel_stack_builder *builder) {
+const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
+ grpc_channel_stack_builder* builder) {
return builder->args;
}
bool grpc_channel_stack_builder_append_filter(
- grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
- grpc_post_filter_create_init_func post_init_func, void *user_data) {
- grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder* builder, const grpc_channel_filter* filter,
+ grpc_post_filter_create_init_func post_init_func, void* user_data) {
+ grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_create_iterator_at_last(builder);
bool ok = grpc_channel_stack_builder_add_filter_before(
it, filter, post_init_func, user_data);
@@ -186,8 +186,8 @@ bool grpc_channel_stack_builder_append_filter(
}
bool grpc_channel_stack_builder_remove_filter(
- grpc_channel_stack_builder *builder, const char *filter_name) {
- grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder* builder, const char* filter_name) {
+ grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_iterator_find(builder, filter_name);
if (grpc_channel_stack_builder_iterator_is_end(it)) {
grpc_channel_stack_builder_iterator_destroy(it);
@@ -201,9 +201,9 @@ bool grpc_channel_stack_builder_remove_filter(
}
bool grpc_channel_stack_builder_prepend_filter(
- grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
- grpc_post_filter_create_init_func post_init_func, void *user_data) {
- grpc_channel_stack_builder_iterator *it =
+ grpc_channel_stack_builder* builder, const grpc_channel_filter* filter,
+ grpc_post_filter_create_init_func post_init_func, void* user_data) {
+ grpc_channel_stack_builder_iterator* it =
grpc_channel_stack_builder_create_iterator_at_first(builder);
bool ok = grpc_channel_stack_builder_add_filter_after(
it, filter, post_init_func, user_data);
@@ -211,10 +211,10 @@ bool grpc_channel_stack_builder_prepend_filter(
return ok;
}
-static void add_after(filter_node *before, const grpc_channel_filter *filter,
+static void add_after(filter_node* before, const grpc_channel_filter* filter,
grpc_post_filter_create_init_func post_init_func,
- void *user_data) {
- filter_node *new_node = (filter_node *)gpr_malloc(sizeof(*new_node));
+ void* user_data) {
+ filter_node* new_node = (filter_node*)gpr_malloc(sizeof(*new_node));
new_node->next = before->next;
new_node->prev = before;
new_node->next->prev = new_node->prev->next = new_node;
@@ -224,28 +224,28 @@ static void add_after(filter_node *before, const grpc_channel_filter *filter,
}
bool grpc_channel_stack_builder_add_filter_before(
- grpc_channel_stack_builder_iterator *iterator,
- const grpc_channel_filter *filter,
- grpc_post_filter_create_init_func post_init_func, void *user_data) {
+ grpc_channel_stack_builder_iterator* iterator,
+ const grpc_channel_filter* filter,
+ grpc_post_filter_create_init_func post_init_func, void* user_data) {
if (iterator->node == &iterator->builder->begin) return false;
add_after(iterator->node->prev, filter, post_init_func, user_data);
return true;
}
bool grpc_channel_stack_builder_add_filter_after(
- grpc_channel_stack_builder_iterator *iterator,
- const grpc_channel_filter *filter,
- grpc_post_filter_create_init_func post_init_func, void *user_data) {
+ grpc_channel_stack_builder_iterator* iterator,
+ const grpc_channel_filter* filter,
+ grpc_post_filter_create_init_func post_init_func, void* user_data) {
if (iterator->node == &iterator->builder->end) return false;
add_after(iterator->node, filter, post_init_func, user_data);
return true;
}
-void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder) {
- filter_node *p = builder->begin.next;
+void grpc_channel_stack_builder_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder) {
+ filter_node* p = builder->begin.next;
while (p != &builder->end) {
- filter_node *next = p->next;
+ filter_node* next = p->next;
gpr_free(p);
p = next;
}
@@ -256,21 +256,21 @@ void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(builder);
}
-grpc_error *grpc_channel_stack_builder_finish(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+grpc_error* grpc_channel_stack_builder_finish(
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy,
- void *destroy_arg, void **result) {
+ void* destroy_arg, void** result) {
// count the number of filters
size_t num_filters = 0;
- for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
+ for (filter_node* p = builder->begin.next; p != &builder->end; p = p->next) {
num_filters++;
}
// create an array of filters
- const grpc_channel_filter **filters =
- (const grpc_channel_filter **)gpr_malloc(sizeof(*filters) * num_filters);
+ const grpc_channel_filter** filters =
+ (const grpc_channel_filter**)gpr_malloc(sizeof(*filters) * num_filters);
size_t i = 0;
- for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) {
+ for (filter_node* p = builder->begin.next; p != &builder->end; p = p->next) {
filters[i++] = p->filter;
}
@@ -280,10 +280,10 @@ grpc_error *grpc_channel_stack_builder_finish(
// allocate memory, with prefix_bytes followed by channel_stack_size
*result = gpr_zalloc(prefix_bytes + channel_stack_size);
// fetch a pointer to the channel stack
- grpc_channel_stack *channel_stack =
- (grpc_channel_stack *)((char *)(*result) + prefix_bytes);
+ grpc_channel_stack* channel_stack =
+ (grpc_channel_stack*)((char*)(*result) + prefix_bytes);
// and initialize it
- grpc_error *error = grpc_channel_stack_init(
+ grpc_error* error = grpc_channel_stack_init(
exec_ctx, initial_refs, destroy,
destroy_arg == NULL ? *result : destroy_arg, filters, num_filters,
builder->args, builder->transport, builder->name, channel_stack);
@@ -295,7 +295,7 @@ grpc_error *grpc_channel_stack_builder_finish(
} else {
// run post-initialization functions
i = 0;
- for (filter_node *p = builder->begin.next; p != &builder->end;
+ for (filter_node* p = builder->begin.next; p != &builder->end;
p = p->next) {
if (p->init != NULL) {
p->init(channel_stack, grpc_channel_stack_element(channel_stack, i),
@@ -306,7 +306,7 @@ grpc_error *grpc_channel_stack_builder_finish(
}
grpc_channel_stack_builder_destroy(exec_ctx, builder);
- gpr_free((grpc_channel_filter **)filters);
+ gpr_free((grpc_channel_filter**)filters);
return error;
}
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index fdff2a2b6d..23134b7d10 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -35,130 +35,130 @@ typedef struct grpc_channel_stack_builder_iterator
grpc_channel_stack_builder_iterator;
/// Create a new channel stack builder
-grpc_channel_stack_builder *grpc_channel_stack_builder_create(void);
+grpc_channel_stack_builder* grpc_channel_stack_builder_create(void);
/// Assign a name to the channel stack: \a name must be statically allocated
-void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder,
- const char *name);
+void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder* builder,
+ const char* name);
/// Set the target uri
-void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder *b,
- const char *target);
+void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder* b,
+ const char* target);
-const char *grpc_channel_stack_builder_get_target(
- grpc_channel_stack_builder *b);
+const char* grpc_channel_stack_builder_get_target(
+ grpc_channel_stack_builder* b);
/// Attach \a transport to the builder (does not take ownership)
void grpc_channel_stack_builder_set_transport(
- grpc_channel_stack_builder *builder, grpc_transport *transport);
+ grpc_channel_stack_builder* builder, grpc_transport* transport);
/// Fetch attached transport
-grpc_transport *grpc_channel_stack_builder_get_transport(
- grpc_channel_stack_builder *builder);
+grpc_transport* grpc_channel_stack_builder_get_transport(
+ grpc_channel_stack_builder* builder);
/// Set channel arguments: copies args
void grpc_channel_stack_builder_set_channel_arguments(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
- const grpc_channel_args *args);
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
+ const grpc_channel_args* args);
/// Return a borrowed pointer to the channel arguments
-const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments(
- grpc_channel_stack_builder *builder);
+const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
+ grpc_channel_stack_builder* builder);
/// Begin iterating over already defined filters in the builder at the beginning
-grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_iterator*
grpc_channel_stack_builder_create_iterator_at_first(
- grpc_channel_stack_builder *builder);
+ grpc_channel_stack_builder* builder);
/// Begin iterating over already defined filters in the builder at the end
-grpc_channel_stack_builder_iterator *
+grpc_channel_stack_builder_iterator*
grpc_channel_stack_builder_create_iterator_at_last(
- grpc_channel_stack_builder *builder);
+ grpc_channel_stack_builder* builder);
/// Is an iterator at the first element?
bool grpc_channel_stack_builder_iterator_is_first(
- grpc_channel_stack_builder_iterator *iterator);
+ grpc_channel_stack_builder_iterator* iterator);
/// Is an iterator at the end?
bool grpc_channel_stack_builder_iterator_is_end(
- grpc_channel_stack_builder_iterator *iterator);
+ grpc_channel_stack_builder_iterator* iterator);
/// What is the name of the filter at this iterator position?
-const char *grpc_channel_stack_builder_iterator_filter_name(
- grpc_channel_stack_builder_iterator *iterator);
+const char* grpc_channel_stack_builder_iterator_filter_name(
+ grpc_channel_stack_builder_iterator* iterator);
/// Move an iterator to the next item
bool grpc_channel_stack_builder_move_next(
- grpc_channel_stack_builder_iterator *iterator);
+ grpc_channel_stack_builder_iterator* iterator);
/// Move an iterator to the previous item
bool grpc_channel_stack_builder_move_prev(
- grpc_channel_stack_builder_iterator *iterator);
+ grpc_channel_stack_builder_iterator* iterator);
/// Return an iterator at \a filter_name, or at the end of the list if not
/// found.
-grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find(
- grpc_channel_stack_builder *builder, const char *filter_name);
+grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_iterator_find(
+ grpc_channel_stack_builder* builder, const char* filter_name);
typedef void (*grpc_post_filter_create_init_func)(
- grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg);
+ grpc_channel_stack* channel_stack, grpc_channel_element* elem, void* arg);
/// Add \a filter to the stack, after \a iterator.
/// Call \a post_init_func(..., \a user_data) once the channel stack is
/// created.
bool grpc_channel_stack_builder_add_filter_after(
- grpc_channel_stack_builder_iterator *iterator,
- const grpc_channel_filter *filter,
+ grpc_channel_stack_builder_iterator* iterator,
+ const grpc_channel_filter* filter,
grpc_post_filter_create_init_func post_init_func,
- void *user_data) GRPC_MUST_USE_RESULT;
+ void* user_data) GRPC_MUST_USE_RESULT;
/// Add \a filter to the stack, before \a iterator.
/// Call \a post_init_func(..., \a user_data) once the channel stack is
/// created.
bool grpc_channel_stack_builder_add_filter_before(
- grpc_channel_stack_builder_iterator *iterator,
- const grpc_channel_filter *filter,
+ grpc_channel_stack_builder_iterator* iterator,
+ const grpc_channel_filter* filter,
grpc_post_filter_create_init_func post_init_func,
- void *user_data) GRPC_MUST_USE_RESULT;
+ void* user_data) GRPC_MUST_USE_RESULT;
/// Add \a filter to the beginning of the filter list.
/// Call \a post_init_func(..., \a user_data) once the channel stack is
/// created.
bool grpc_channel_stack_builder_prepend_filter(
- grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
+ grpc_channel_stack_builder* builder, const grpc_channel_filter* filter,
grpc_post_filter_create_init_func post_init_func,
- void *user_data) GRPC_MUST_USE_RESULT;
+ void* user_data) GRPC_MUST_USE_RESULT;
/// Add \a filter to the end of the filter list.
/// Call \a post_init_func(..., \a user_data) once the channel stack is
/// created.
bool grpc_channel_stack_builder_append_filter(
- grpc_channel_stack_builder *builder, const grpc_channel_filter *filter,
+ grpc_channel_stack_builder* builder, const grpc_channel_filter* filter,
grpc_post_filter_create_init_func post_init_func,
- void *user_data) GRPC_MUST_USE_RESULT;
+ void* user_data) GRPC_MUST_USE_RESULT;
/// Remove any filter whose name is \a filter_name from \a builder. Returns true
/// if \a filter_name was not found.
bool grpc_channel_stack_builder_remove_filter(
- grpc_channel_stack_builder *builder, const char *filter_name);
+ grpc_channel_stack_builder* builder, const char* filter_name);
/// Terminate iteration and destroy \a iterator
void grpc_channel_stack_builder_iterator_destroy(
- grpc_channel_stack_builder_iterator *iterator);
+ grpc_channel_stack_builder_iterator* iterator);
/// Destroy the builder, return the freshly minted channel stack in \a result.
/// Allocates \a prefix_bytes bytes before the channel stack
/// Returns the base pointer of the allocated block
/// \a initial_refs, \a destroy, \a destroy_arg are as per
/// grpc_channel_stack_init
-grpc_error *grpc_channel_stack_builder_finish(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+grpc_error* grpc_channel_stack_builder_finish(
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy,
- void *destroy_arg, void **result);
+ void* destroy_arg, void** result);
/// Destroy the builder without creating a channel stack
-void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder);
+void grpc_channel_stack_builder_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder);
extern grpc_tracer_flag grpc_trace_channel_stack_builder;
diff --git a/src/core/lib/channel/connected_channel.cc b/src/core/lib/channel/connected_channel.cc
index 4f37908958..49b9f140c0 100644
--- a/src/core/lib/channel/connected_channel.cc
+++ b/src/core/lib/channel/connected_channel.cc
@@ -33,41 +33,41 @@
#define MAX_BUFFER_LENGTH 8192
typedef struct connected_channel_channel_data {
- grpc_transport *transport;
+ grpc_transport* transport;
} channel_data;
typedef struct {
grpc_closure closure;
- grpc_closure *original_closure;
- grpc_call_combiner *call_combiner;
- const char *reason;
+ grpc_closure* original_closure;
+ grpc_call_combiner* call_combiner;
+ const char* reason;
} callback_state;
typedef struct connected_channel_call_data {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
// Closures used for returning results on the call combiner.
callback_state on_complete[6]; // Max number of pending batches.
callback_state recv_initial_metadata_ready;
callback_state recv_message_ready;
} call_data;
-static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- callback_state *state = (callback_state *)arg;
+static void run_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ callback_state* state = (callback_state*)arg;
GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
state->original_closure, GRPC_ERROR_REF(error),
state->reason);
}
-static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
+static void run_cancel_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
run_in_call_combiner(exec_ctx, arg, error);
gpr_free(arg);
}
-static void intercept_callback(call_data *calld, callback_state *state,
- bool free_when_done, const char *reason,
- grpc_closure **original_closure) {
+static void intercept_callback(call_data* calld, callback_state* state,
+ bool free_when_done, const char* reason,
+ grpc_closure** original_closure) {
state->original_closure = *original_closure;
state->call_combiner = calld->call_combiner;
state->reason = reason;
@@ -77,8 +77,8 @@ static void intercept_callback(call_data *calld, callback_state *state,
state, grpc_schedule_on_exec_ctx);
}
-static callback_state *get_state_for_batch(
- call_data *calld, grpc_transport_stream_op_batch *batch) {
+static callback_state* get_state_for_batch(
+ call_data* calld, grpc_transport_stream_op_batch* batch) {
if (batch->send_initial_metadata) return &calld->on_complete[0];
if (batch->send_message) return &calld->on_complete[1];
if (batch->send_trailing_metadata) return &calld->on_complete[2];
@@ -91,25 +91,25 @@ static callback_state *get_state_for_batch(
/* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal
cache line requests */
-#define TRANSPORT_STREAM_FROM_CALL_DATA(calld) ((grpc_stream *)((calld) + 1))
+#define TRANSPORT_STREAM_FROM_CALL_DATA(calld) ((grpc_stream*)((calld) + 1))
#define CALL_DATA_FROM_TRANSPORT_STREAM(transport_stream) \
- (((call_data *)(transport_stream)) - 1)
+ (((call_data*)(transport_stream)) - 1)
/* Intercept a call operation and either push it directly up or translate it
into transport stream operations */
static void con_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (batch->recv_initial_metadata) {
- callback_state *state = &calld->recv_initial_metadata_ready;
+ callback_state* state = &calld->recv_initial_metadata_ready;
intercept_callback(
calld, state, false, "recv_initial_metadata_ready",
&batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
}
if (batch->recv_message) {
- callback_state *state = &calld->recv_message_ready;
+ callback_state* state = &calld->recv_message_ready;
intercept_callback(calld, state, false, "recv_message_ready",
&batch->payload->recv_message.recv_message_ready);
}
@@ -119,11 +119,11 @@ static void con_start_transport_stream_op_batch(
// calld->on_complete like we can for the other ops. However,
// cancellation isn't in the fast path, so we just allocate a new
// closure for each one.
- callback_state *state = (callback_state *)gpr_malloc(sizeof(*state));
+ callback_state* state = (callback_state*)gpr_malloc(sizeof(*state));
intercept_callback(calld, state, true, "on_complete (cancel_stream)",
&batch->on_complete);
} else {
- callback_state *state = get_state_for_batch(calld, batch);
+ callback_state* state = get_state_for_batch(calld, batch);
intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
}
grpc_transport_perform_stream_op(exec_ctx, chand->transport,
@@ -133,19 +133,19 @@ static void con_start_transport_stream_op_batch(
"passed batch to transport");
}
-static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void con_start_transport_op(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_transport_op* op) {
+ channel_data* chand = (channel_data*)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
@@ -155,49 +155,49 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
"transport stream initialization failed");
}
-static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *cd = (channel_data *)elem->channel_data;
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* cd = (channel_data*)elem->channel_data;
GPR_ASSERT(args->is_last);
cd->transport = NULL;
return GRPC_ERROR_NONE;
}
/* Destructor for channel_data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *cd = (channel_data *)elem->channel_data;
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
+ channel_data* cd = (channel_data*)elem->channel_data;
if (cd->transport) {
grpc_transport_destroy(exec_ctx, cd->transport);
}
}
/* No-op. */
-static void con_get_channel_info(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- const grpc_channel_info *channel_info) {}
+static void con_get_channel_info(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {}
const grpc_channel_filter grpc_connected_filter = {
con_start_transport_stream_op_batch,
@@ -213,12 +213,12 @@ const grpc_channel_filter grpc_connected_filter = {
"connected",
};
-static void bind_transport(grpc_channel_stack *channel_stack,
- grpc_channel_element *elem, void *t) {
- channel_data *cd = (channel_data *)elem->channel_data;
+static void bind_transport(grpc_channel_stack* channel_stack,
+ grpc_channel_element* elem, void* t) {
+ channel_data* cd = (channel_data*)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
- cd->transport = (grpc_transport *)t;
+ cd->transport = (grpc_transport*)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@@ -227,20 +227,20 @@ static void bind_transport(grpc_channel_stack *channel_stack,
the last call element, and the last call element MUST be the connected
channel. */
channel_stack->call_stack_size +=
- grpc_transport_stream_size((grpc_transport *)t);
+ grpc_transport_stream_size((grpc_transport*)t);
}
-bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg_must_be_null) {
+bool grpc_add_connected_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg_must_be_null) {
GPR_ASSERT(arg_must_be_null == NULL);
- grpc_transport *t = grpc_channel_stack_builder_get_transport(builder);
+ grpc_transport* t = grpc_channel_stack_builder_get_transport(builder);
GPR_ASSERT(t != NULL);
return grpc_channel_stack_builder_append_filter(
builder, &grpc_connected_filter, bind_transport, t);
}
-grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
- call_data *calld = (call_data *)elem->call_data;
+grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem) {
+ call_data* calld = (call_data*)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}
diff --git a/src/core/lib/channel/connected_channel.h b/src/core/lib/channel/connected_channel.h
index 4615727baa..cca19737dc 100644
--- a/src/core/lib/channel/connected_channel.h
+++ b/src/core/lib/channel/connected_channel.h
@@ -27,12 +27,12 @@ extern "C" {
extern const grpc_channel_filter grpc_connected_filter;
-bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg_must_be_null);
+bool grpc_add_connected_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg_must_be_null);
/* Debug helper to dig the transport stream out of a call element */
-grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem);
+grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem);
#ifdef __cplusplus
}
diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h
index 191bd63351..5daf48a9a9 100644
--- a/src/core/lib/channel/context.h
+++ b/src/core/lib/channel/context.h
@@ -42,8 +42,8 @@ typedef enum {
} grpc_context_index;
typedef struct {
- void *value;
- void (*destroy)(void *);
+ void* value;
+ void (*destroy)(void*);
} grpc_call_context_element;
#endif /* GRPC_CORE_LIB_CHANNEL_CONTEXT_H */
diff --git a/src/core/lib/channel/handshaker_factory.cc b/src/core/lib/channel/handshaker_factory.cc
index 4deb280c60..bf1188dbb9 100644
--- a/src/core/lib/channel/handshaker_factory.cc
+++ b/src/core/lib/channel/handshaker_factory.cc
@@ -21,8 +21,8 @@
#include <grpc/support/log.h>
void grpc_handshaker_factory_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory,
- const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) {
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory,
+ const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
if (handshaker_factory != NULL) {
GPR_ASSERT(handshaker_factory->vtable != NULL);
handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory,
@@ -31,7 +31,7 @@ void grpc_handshaker_factory_add_handshakers(
}
void grpc_handshaker_factory_destroy(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory) {
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory) {
if (handshaker_factory != NULL) {
GPR_ASSERT(handshaker_factory->vtable != NULL);
handshaker_factory->vtable->destroy(exec_ctx, handshaker_factory);
diff --git a/src/core/lib/channel/handshaker_factory.h b/src/core/lib/channel/handshaker_factory.h
index 59008adf05..63d9b5af72 100644
--- a/src/core/lib/channel/handshaker_factory.h
+++ b/src/core/lib/channel/handshaker_factory.h
@@ -33,24 +33,24 @@ extern "C" {
typedef struct grpc_handshaker_factory grpc_handshaker_factory;
typedef struct {
- void (*add_handshakers)(grpc_exec_ctx *exec_ctx,
- grpc_handshaker_factory *handshaker_factory,
- const grpc_channel_args *args,
- grpc_handshake_manager *handshake_mgr);
- void (*destroy)(grpc_exec_ctx *exec_ctx,
- grpc_handshaker_factory *handshaker_factory);
+ void (*add_handshakers)(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker_factory* handshaker_factory,
+ const grpc_channel_args* args,
+ grpc_handshake_manager* handshake_mgr);
+ void (*destroy)(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker_factory* handshaker_factory);
} grpc_handshaker_factory_vtable;
struct grpc_handshaker_factory {
- const grpc_handshaker_factory_vtable *vtable;
+ const grpc_handshaker_factory_vtable* vtable;
};
void grpc_handshaker_factory_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory,
- const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr);
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory,
+ const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr);
void grpc_handshaker_factory_destroy(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory);
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory);
#ifdef __cplusplus
}
diff --git a/src/core/lib/compression/compression.cc b/src/core/lib/compression/compression.cc
index 1cfac23129..a0d5bdcc78 100644
--- a/src/core/lib/compression/compression.cc
+++ b/src/core/lib/compression/compression.cc
@@ -27,7 +27,7 @@
#include "src/core/lib/transport/static_metadata.h"
int grpc_compression_algorithm_parse(grpc_slice name,
- grpc_compression_algorithm *algorithm) {
+ grpc_compression_algorithm* algorithm) {
/* we use strncmp not only because it's safer (even though in this case it
* doesn't matter, given that we are comparing against string literals, but
* because this way we needn't have "name" nil-terminated (useful for slice
@@ -47,7 +47,7 @@ int grpc_compression_algorithm_parse(grpc_slice name,
}
int grpc_stream_compression_algorithm_parse(
- grpc_slice name, grpc_stream_compression_algorithm *algorithm) {
+ grpc_slice name, grpc_stream_compression_algorithm* algorithm) {
if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) {
*algorithm = GRPC_STREAM_COMPRESS_NONE;
return 1;
@@ -60,7 +60,7 @@ int grpc_stream_compression_algorithm_parse(
}
int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
- const char **name) {
+ const char** name) {
GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
switch (algorithm) {
@@ -80,7 +80,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
}
int grpc_stream_compression_algorithm_name(
- grpc_stream_compression_algorithm algorithm, const char **name) {
+ grpc_stream_compression_algorithm algorithm, const char** name) {
GRPC_API_TRACE(
"grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2,
((int)algorithm, name));
@@ -168,7 +168,7 @@ grpc_mdelem grpc_stream_compression_encoding_mdelem(
return GRPC_MDNULL;
}
-void grpc_compression_options_init(grpc_compression_options *opts) {
+void grpc_compression_options_init(grpc_compression_options* opts) {
memset(opts, 0, sizeof(*opts));
/* all enabled by default */
opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
@@ -177,23 +177,23 @@ void grpc_compression_options_init(grpc_compression_options *opts) {
}
void grpc_compression_options_enable_algorithm(
- grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
+ grpc_compression_options* opts, grpc_compression_algorithm algorithm) {
GPR_BITSET(&opts->enabled_algorithms_bitset, algorithm);
}
void grpc_compression_options_disable_algorithm(
- grpc_compression_options *opts, grpc_compression_algorithm algorithm) {
+ grpc_compression_options* opts, grpc_compression_algorithm algorithm) {
GPR_BITCLEAR(&opts->enabled_algorithms_bitset, algorithm);
}
int grpc_compression_options_is_algorithm_enabled(
- const grpc_compression_options *opts,
+ const grpc_compression_options* opts,
grpc_compression_algorithm algorithm) {
return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm);
}
int grpc_compression_options_is_stream_compression_algorithm_enabled(
- const grpc_compression_options *opts,
+ const grpc_compression_options* opts,
grpc_stream_compression_algorithm algorithm) {
return GPR_BITGET(opts->enabled_stream_compression_algorithms_bitset,
algorithm);
diff --git a/src/core/lib/compression/stream_compression.cc b/src/core/lib/compression/stream_compression.cc
index 7faeb0d34f..8a57b33230 100644
--- a/src/core/lib/compression/stream_compression.cc
+++ b/src/core/lib/compression/stream_compression.cc
@@ -24,23 +24,23 @@
extern "C" const grpc_stream_compression_vtable
grpc_stream_compression_identity_vtable;
-bool grpc_stream_compress(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
+bool grpc_stream_compress(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in, grpc_slice_buffer* out,
+ size_t* output_size, size_t max_output_size,
grpc_stream_compression_flush flush) {
return ctx->vtable->compress(ctx, in, out, output_size, max_output_size,
flush);
}
-bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
- bool *end_of_context) {
+bool grpc_stream_decompress(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in, grpc_slice_buffer* out,
+ size_t* output_size, size_t max_output_size,
+ bool* end_of_context) {
return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size,
end_of_context);
}
-grpc_stream_compression_context *grpc_stream_compression_context_create(
+grpc_stream_compression_context* grpc_stream_compression_context_create(
grpc_stream_compression_method method) {
switch (method) {
case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS:
@@ -56,13 +56,13 @@ grpc_stream_compression_context *grpc_stream_compression_context_create(
}
void grpc_stream_compression_context_destroy(
- grpc_stream_compression_context *ctx) {
+ grpc_stream_compression_context* ctx) {
ctx->vtable->context_destroy(ctx);
}
int grpc_stream_compression_method_parse(
grpc_slice value, bool is_compress,
- grpc_stream_compression_method *method) {
+ grpc_stream_compression_method* method) {
if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) {
*method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
: GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS;
diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h
index 6ee3ac1966..b56c142543 100644
--- a/src/core/lib/compression/stream_compression.h
+++ b/src/core/lib/compression/stream_compression.h
@@ -34,7 +34,7 @@ typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
/* Stream compression/decompression context */
typedef struct grpc_stream_compression_context {
- const grpc_stream_compression_vtable *vtable;
+ const grpc_stream_compression_vtable* vtable;
} grpc_stream_compression_context;
typedef enum grpc_stream_compression_method {
@@ -53,16 +53,16 @@ typedef enum grpc_stream_compression_flush {
} grpc_stream_compression_flush;
struct grpc_stream_compression_vtable {
- bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
- grpc_slice_buffer *out, size_t *output_size,
+ bool (*compress)(grpc_stream_compression_context* ctx, grpc_slice_buffer* in,
+ grpc_slice_buffer* out, size_t* output_size,
size_t max_output_size, grpc_stream_compression_flush flush);
- bool (*decompress)(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
- bool *end_of_context);
- grpc_stream_compression_context *(*context_create)(
+ bool (*decompress)(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in, grpc_slice_buffer* out,
+ size_t* output_size, size_t max_output_size,
+ bool* end_of_context);
+ grpc_stream_compression_context* (*context_create)(
grpc_stream_compression_method method);
- void (*context_destroy)(grpc_stream_compression_context *ctx);
+ void (*context_destroy)(grpc_stream_compression_context* ctx);
};
/**
@@ -78,9 +78,9 @@ struct grpc_stream_compression_vtable {
* previous compressed bytes. It allows corresponding decompression context to
* be dropped when reaching this boundary.
*/
-bool grpc_stream_compress(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
+bool grpc_stream_compress(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in, grpc_slice_buffer* out,
+ size_t* output_size, size_t max_output_size,
grpc_stream_compression_flush flush);
/**
@@ -90,30 +90,30 @@ bool grpc_stream_compress(grpc_stream_compression_context *ctx,
* it is set to false. The total number of bytes emitted is outputed in \a
* output_size.
*/
-bool grpc_stream_decompress(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size,
- bool *end_of_context);
+bool grpc_stream_decompress(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in, grpc_slice_buffer* out,
+ size_t* output_size, size_t max_output_size,
+ bool* end_of_context);
/**
* Creates a stream compression context. \a pending_bytes_buffer is the input
* buffer for compression/decompression operations. \a method specifies whether
* the context is for compression or decompression.
*/
-grpc_stream_compression_context *grpc_stream_compression_context_create(
+grpc_stream_compression_context* grpc_stream_compression_context_create(
grpc_stream_compression_method method);
/**
* Destroys a stream compression context.
*/
void grpc_stream_compression_context_destroy(
- grpc_stream_compression_context *ctx);
+ grpc_stream_compression_context* ctx);
/**
* Parse stream compression method based on algorithm name
*/
int grpc_stream_compression_method_parse(
- grpc_slice value, bool is_compress, grpc_stream_compression_method *method);
+ grpc_slice value, bool is_compress, grpc_stream_compression_method* method);
#ifdef __cplusplus
}
diff --git a/src/core/lib/compression/stream_compression_gzip.cc b/src/core/lib/compression/stream_compression_gzip.cc
index 087b018be5..53ec7ca223 100644
--- a/src/core/lib/compression/stream_compression_gzip.cc
+++ b/src/core/lib/compression/stream_compression_gzip.cc
@@ -29,13 +29,13 @@ typedef struct grpc_stream_compression_context_gzip {
grpc_stream_compression_context base;
z_stream zs;
- int (*flate)(z_stream *zs, int flush);
+ int (*flate)(z_stream* zs, int flush);
} grpc_stream_compression_context_gzip;
-static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
- grpc_slice_buffer *in, grpc_slice_buffer *out,
- size_t *output_size, size_t max_output_size, int flush,
- bool *end_of_context) {
+static bool gzip_flate(grpc_stream_compression_context_gzip* ctx,
+ grpc_slice_buffer* in, grpc_slice_buffer* out,
+ size_t* output_size, size_t max_output_size, int flush,
+ bool* end_of_context) {
GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH);
/* Full flush is not allowed when inflating. */
GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
@@ -131,17 +131,17 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx,
return true;
}
-static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in,
- grpc_slice_buffer *out,
- size_t *output_size,
+static bool grpc_stream_compress_gzip(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in,
+ grpc_slice_buffer* out,
+ size_t* output_size,
size_t max_output_size,
grpc_stream_compression_flush flush) {
if (ctx == NULL) {
return false;
}
- grpc_stream_compression_context_gzip *gzip_ctx =
- (grpc_stream_compression_context_gzip *)ctx;
+ grpc_stream_compression_context_gzip* gzip_ctx =
+ (grpc_stream_compression_context_gzip*)ctx;
GPR_ASSERT(gzip_ctx->flate == deflate);
int gzip_flush;
switch (flush) {
@@ -161,29 +161,29 @@ static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx,
NULL);
}
-static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in,
- grpc_slice_buffer *out,
- size_t *output_size,
+static bool grpc_stream_decompress_gzip(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in,
+ grpc_slice_buffer* out,
+ size_t* output_size,
size_t max_output_size,
- bool *end_of_context) {
+ bool* end_of_context) {
if (ctx == NULL) {
return false;
}
- grpc_stream_compression_context_gzip *gzip_ctx =
- (grpc_stream_compression_context_gzip *)ctx;
+ grpc_stream_compression_context_gzip* gzip_ctx =
+ (grpc_stream_compression_context_gzip*)ctx;
GPR_ASSERT(gzip_ctx->flate == inflate);
return gzip_flate(gzip_ctx, in, out, output_size, max_output_size,
Z_SYNC_FLUSH, end_of_context);
}
-static grpc_stream_compression_context *
+static grpc_stream_compression_context*
grpc_stream_compression_context_create_gzip(
grpc_stream_compression_method method) {
GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ||
method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS);
- grpc_stream_compression_context_gzip *gzip_ctx =
- (grpc_stream_compression_context_gzip *)gpr_zalloc(
+ grpc_stream_compression_context_gzip* gzip_ctx =
+ (grpc_stream_compression_context_gzip*)gpr_zalloc(
sizeof(grpc_stream_compression_context_gzip));
int r;
if (gzip_ctx == NULL) {
@@ -203,16 +203,16 @@ grpc_stream_compression_context_create_gzip(
}
gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable;
- return (grpc_stream_compression_context *)gzip_ctx;
+ return (grpc_stream_compression_context*)gzip_ctx;
}
static void grpc_stream_compression_context_destroy_gzip(
- grpc_stream_compression_context *ctx) {
+ grpc_stream_compression_context* ctx) {
if (ctx == NULL) {
return;
}
- grpc_stream_compression_context_gzip *gzip_ctx =
- (grpc_stream_compression_context_gzip *)ctx;
+ grpc_stream_compression_context_gzip* gzip_ctx =
+ (grpc_stream_compression_context_gzip*)ctx;
if (gzip_ctx->flate == inflate) {
inflateEnd(&gzip_ctx->zs);
} else {
diff --git a/src/core/lib/compression/stream_compression_identity.cc b/src/core/lib/compression/stream_compression_identity.cc
index 9b2e6062e1..5e8bfc09a6 100644
--- a/src/core/lib/compression/stream_compression_identity.cc
+++ b/src/core/lib/compression/stream_compression_identity.cc
@@ -29,9 +29,9 @@
static grpc_stream_compression_context identity_ctx = {
&grpc_stream_compression_identity_vtable};
-static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
- grpc_slice_buffer *out,
- size_t *output_size,
+static void grpc_stream_compression_pass_through(grpc_slice_buffer* in,
+ grpc_slice_buffer* out,
+ size_t* output_size,
size_t max_output_size) {
if (max_output_size >= in->length) {
if (output_size) {
@@ -46,10 +46,10 @@ static void grpc_stream_compression_pass_through(grpc_slice_buffer *in,
}
}
-static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
- grpc_slice_buffer *in,
- grpc_slice_buffer *out,
- size_t *output_size,
+static bool grpc_stream_compress_identity(grpc_stream_compression_context* ctx,
+ grpc_slice_buffer* in,
+ grpc_slice_buffer* out,
+ size_t* output_size,
size_t max_output_size,
grpc_stream_compression_flush flush) {
if (ctx == NULL) {
@@ -60,9 +60,9 @@ static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx,
}
static bool grpc_stream_decompress_identity(
- grpc_stream_compression_context *ctx, grpc_slice_buffer *in,
- grpc_slice_buffer *out, size_t *output_size, size_t max_output_size,
- bool *end_of_context) {
+ grpc_stream_compression_context* ctx, grpc_slice_buffer* in,
+ grpc_slice_buffer* out, size_t* output_size, size_t max_output_size,
+ bool* end_of_context) {
if (ctx == NULL) {
return false;
}
@@ -73,17 +73,17 @@ static bool grpc_stream_decompress_identity(
return true;
}
-static grpc_stream_compression_context *
+static grpc_stream_compression_context*
grpc_stream_compression_context_create_identity(
grpc_stream_compression_method method) {
GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS ||
method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS);
/* No context needed in this case. Use fake context instead. */
- return (grpc_stream_compression_context *)&identity_ctx;
+ return (grpc_stream_compression_context*)&identity_ctx;
}
static void grpc_stream_compression_context_destroy_identity(
- grpc_stream_compression_context *ctx) {
+ grpc_stream_compression_context* ctx) {
return;
}
diff --git a/src/core/lib/debug/stats.cc b/src/core/lib/debug/stats.cc
index 4096384dd9..b32cf19028 100644
--- a/src/core/lib/debug/stats.cc
+++ b/src/core/lib/debug/stats.cc
@@ -27,18 +27,18 @@
#include "src/core/lib/support/string.h"
-grpc_stats_data *grpc_stats_per_cpu_storage = NULL;
+grpc_stats_data* grpc_stats_per_cpu_storage = NULL;
static size_t g_num_cores;
void grpc_stats_init(void) {
g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
grpc_stats_per_cpu_storage =
- (grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
+ (grpc_stats_data*)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
}
void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }
-void grpc_stats_collect(grpc_stats_data *output) {
+void grpc_stats_collect(grpc_stats_data* output) {
memset(output, 0, sizeof(*output));
for (size_t core = 0; core < g_num_cores; core++) {
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
@@ -52,8 +52,8 @@ void grpc_stats_collect(grpc_stats_data *output) {
}
}
-void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
- grpc_stats_data *c) {
+void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a,
+ grpc_stats_data* c) {
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
c->counters[i] = b->counters[i] - a->counters[i];
}
@@ -62,13 +62,13 @@ void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
}
}
-int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
- const int *table, int table_size) {
+int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx* exec_ctx, int value,
+ const int* table, int table_size) {
GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx);
- const int *const start = table;
+ const int* const start = table;
while (table_size > 0) {
int step = table_size / 2;
- const int *it = table + step;
+ const int* it = table + step;
if (value >= *it) {
table = it + 1;
table_size -= step + 1;
@@ -79,7 +79,7 @@ int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
return (int)(table - start) - 1;
}
-size_t grpc_stats_histo_count(const grpc_stats_data *stats,
+size_t grpc_stats_histo_count(const grpc_stats_data* stats,
grpc_stats_histograms histogram) {
size_t sum = 0;
for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) {
@@ -88,8 +88,8 @@ size_t grpc_stats_histo_count(const grpc_stats_data *stats,
return sum;
}
-static double threshold_for_count_below(const gpr_atm *bucket_counts,
- const int *bucket_boundaries,
+static double threshold_for_count_below(const gpr_atm* bucket_counts,
+ const int* bucket_boundaries,
int num_buckets, double count_below) {
double count_so_far;
double lower_bound;
@@ -119,13 +119,13 @@ static double threshold_for_count_below(const gpr_atm *bucket_counts,
should lie */
lower_bound = bucket_boundaries[lower_idx];
upper_bound = bucket_boundaries[lower_idx + 1];
- return upper_bound -
- (upper_bound - lower_bound) * (count_so_far - count_below) /
- (double)bucket_counts[lower_idx];
+ return upper_bound - (upper_bound - lower_bound) *
+ (count_so_far - count_below) /
+ (double)bucket_counts[lower_idx];
}
}
-double grpc_stats_histo_percentile(const grpc_stats_data *stats,
+double grpc_stats_histo_percentile(const grpc_stats_data* stats,
grpc_stats_histograms histogram,
double percentile) {
size_t count = grpc_stats_histo_count(stats, histogram);
@@ -136,9 +136,9 @@ double grpc_stats_histo_percentile(const grpc_stats_data *stats,
grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0);
}
-char *grpc_stats_data_as_json(const grpc_stats_data *data) {
+char* grpc_stats_data_as_json(const grpc_stats_data* data) {
gpr_strvec v;
- char *tmp;
+ char* tmp;
bool is_first = true;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("{"));
diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h
index fec1d651e6..1c19e72345 100644
--- a/src/core/lib/debug/stats.h
+++ b/src/core/lib/debug/stats.h
@@ -32,7 +32,7 @@ typedef struct grpc_stats_data {
gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS];
} grpc_stats_data;
-extern grpc_stats_data *grpc_stats_per_cpu_storage;
+extern grpc_stats_data* grpc_stats_per_cpu_storage;
#define GRPC_THREAD_STATS_DATA(exec_ctx) \
(&grpc_stats_per_cpu_storage[(exec_ctx)->starting_cpu])
@@ -49,17 +49,17 @@ extern grpc_stats_data *grpc_stats_per_cpu_storage;
void grpc_stats_init(void);
void grpc_stats_shutdown(void);
-void grpc_stats_collect(grpc_stats_data *output);
+void grpc_stats_collect(grpc_stats_data* output);
// c = b-a
-void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
- grpc_stats_data *c);
-char *grpc_stats_data_as_json(const grpc_stats_data *data);
-int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value,
- const int *table, int table_size);
-double grpc_stats_histo_percentile(const grpc_stats_data *data,
+void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a,
+ grpc_stats_data* c);
+char* grpc_stats_data_as_json(const grpc_stats_data* data);
+int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx* exec_ctx, int value,
+ const int* table, int table_size);
+double grpc_stats_histo_percentile(const grpc_stats_data* data,
grpc_stats_histograms histogram,
double percentile);
-size_t grpc_stats_histo_count(const grpc_stats_data *data,
+size_t grpc_stats_histo_count(const grpc_stats_data* data,
grpc_stats_histograms histogram);
#ifdef __cplusplus
diff --git a/src/core/lib/debug/stats_data.cc b/src/core/lib/debug/stats_data.cc
index 5d737c56cb..17e15f4cfb 100644
--- a/src/core/lib/debug/stats_data.cc
+++ b/src/core/lib/debug/stats_data.cc
@@ -22,7 +22,7 @@
#include <grpc/support/useful.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/exec_ctx.h"
-const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
+const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"client_calls_created",
"server_calls_created",
"cqs_created",
@@ -120,11 +120,13 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"cq_ev_queue_trylock_successes",
"cq_ev_queue_transient_pop_failures",
};
-const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
+const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
- "Number of completion queues created", "Number of client channels created",
- "Number of client subchannels created", "Number of server channels created",
+ "Number of completion queues created",
+ "Number of client channels created",
+ "Number of client subchannels created",
+ "Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
"How many polling wakeups were performed by the process (only valid for "
@@ -154,7 +156,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
- "Number of settings frames sent", "Number of HTTP2 pings sent by process",
+ "Number of settings frames sent",
+ "Number of HTTP2 pings sent by process",
"Number of HTTP2 writes initiated",
"Number of HTTP2 writes offloaded to the executor from application threads",
"Number of HTTP2 writes that finished seeing more data needed to be "
@@ -241,7 +244,7 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times NULL was popped out of completion queue's event queue "
"even though the event queue was not empty",
};
-const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
+const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"call_initial_size",
"poll_events_returned",
"tcp_write_size",
@@ -256,7 +259,7 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_flowctl_per_write",
"server_cqs_checked",
};
-const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
+const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Initial size of the grpc_call arena created at call start",
"How many events are called for each syscall_poll",
"Number of bytes offered to each syscall_write",
@@ -339,7 +342,7 @@ const uint8_t grpc_stats_table_7[102] = {
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
-void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 262144);
if (value < 6) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
@@ -364,7 +367,7 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
}
-void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -390,7 +393,7 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 128));
}
-void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_tcp_write_size(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
@@ -415,7 +418,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 64));
}
-void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -440,7 +443,7 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
-void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_tcp_read_size(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
@@ -465,7 +468,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 64));
}
-void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
@@ -490,7 +493,7 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) {
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 64));
}
-void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
+void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx* exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
@@ -517,7 +520,7 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
-void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
+void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx* exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
@@ -545,7 +548,7 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
(exec_ctx), value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_http2_send_initial_metadata_per_write(
- grpc_exec_ctx *exec_ctx, int value) {
+ grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -573,7 +576,7 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
64));
}
-void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx* exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
@@ -601,7 +604,7 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
(exec_ctx), value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
- grpc_exec_ctx *exec_ctx, int value) {
+ grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
@@ -629,7 +632,7 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
64));
}
-void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx* exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
@@ -656,7 +659,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_6, 64));
}
-void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx* exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
@@ -685,13 +688,13 @@ const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832};
-const int *const grpc_stats_histo_bucket_boundaries[13] = {
+const int* const grpc_stats_histo_bucket_boundaries[13] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
grpc_stats_table_8};
-void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = {
+void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx* exec_ctx, int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index 031942df5c..fbfcce83ba 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -127,8 +127,8 @@ typedef enum {
GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
-extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
-extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
+extern const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
+extern const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
@@ -145,8 +145,8 @@ typedef enum {
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
-extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
-extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
+extern const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
+extern const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
@@ -454,52 +454,52 @@ typedef enum {
(exec_ctx), GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES)
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
-void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_call_initial_size(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
-void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_poll_events_returned(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_tcp_write_size(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_tcp_read_size(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \
grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_read_offer_iov_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_initial_metadata_per_write(
- grpc_exec_ctx *exec_ctx, int x);
+ grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
+void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx* exec_ctx,
int x);
#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
(int)(value))
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
- grpc_exec_ctx *exec_ctx, int x);
+ grpc_exec_ctx* exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
+void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx* exec_ctx,
int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
-void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
+void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx* exec_ctx, int x);
extern const int grpc_stats_histo_buckets[13];
extern const int grpc_stats_histo_start[13];
-extern const int *const grpc_stats_histo_bucket_boundaries[13];
-extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx,
+extern const int* const grpc_stats_histo_bucket_boundaries[13];
+extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx* exec_ctx,
int x);
#ifdef __cplusplus
diff --git a/src/core/lib/debug/trace.cc b/src/core/lib/debug/trace.cc
index 21b0d8c3a6..b1ae1fa185 100644
--- a/src/core/lib/debug/trace.cc
+++ b/src/core/lib/debug/trace.cc
@@ -25,13 +25,13 @@
#include <grpc/support/log.h>
#include "src/core/lib/support/env.h"
-int grpc_tracer_set_enabled(const char *name, int enabled);
+int grpc_tracer_set_enabled(const char* name, int enabled);
typedef struct tracer {
- grpc_tracer_flag *flag;
- struct tracer *next;
+ grpc_tracer_flag* flag;
+ struct tracer* next;
} tracer;
-static tracer *tracers;
+static tracer* tracers;
#ifdef GRPC_THREADSAFE_TRACER
#define TRACER_SET(flag, on) gpr_atm_no_barrier_store(&(flag).value, (on))
@@ -39,31 +39,31 @@ static tracer *tracers;
#define TRACER_SET(flag, on) (flag).value = (on)
#endif
-void grpc_register_tracer(grpc_tracer_flag *flag) {
- tracer *t = (tracer *)gpr_malloc(sizeof(*t));
+void grpc_register_tracer(grpc_tracer_flag* flag) {
+ tracer* t = (tracer*)gpr_malloc(sizeof(*t));
t->flag = flag;
t->next = tracers;
TRACER_SET(*flag, false);
tracers = t;
}
-static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
+static void add(const char* beg, const char* end, char*** ss, size_t* ns) {
size_t n = *ns;
size_t np = n + 1;
- char *s;
+ char* s;
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = (char *)gpr_malloc(len + 1);
+ s = (char*)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char**)gpr_realloc(*ss, sizeof(char**) * np);
(*ss)[n] = s;
*ns = np;
}
-static void split(const char *s, char ***ss, size_t *ns) {
- const char *c = strchr(s, ',');
+static void split(const char* s, char*** ss, size_t* ns) {
+ const char* c = strchr(s, ',');
if (c == NULL) {
add(s, s + strlen(s), ss, ns);
} else {
@@ -72,8 +72,8 @@ static void split(const char *s, char ***ss, size_t *ns) {
}
}
-static void parse(const char *s) {
- char **strings = NULL;
+static void parse(const char* s) {
+ char** strings = NULL;
size_t nstrings = 0;
size_t i;
split(s, &strings, &nstrings);
@@ -94,14 +94,14 @@ static void parse(const char *s) {
static void list_tracers() {
gpr_log(GPR_DEBUG, "available tracers:");
- tracer *t;
+ tracer* t;
for (t = tracers; t; t = t->next) {
gpr_log(GPR_DEBUG, "\t%s", t->flag->name);
}
}
-void grpc_tracer_init(const char *env_var) {
- char *e = gpr_getenv(env_var);
+void grpc_tracer_init(const char* env_var) {
+ char* e = gpr_getenv(env_var);
if (e != NULL) {
parse(e);
gpr_free(e);
@@ -110,14 +110,14 @@ void grpc_tracer_init(const char *env_var) {
void grpc_tracer_shutdown(void) {
while (tracers) {
- tracer *t = tracers;
+ tracer* t = tracers;
tracers = t->next;
gpr_free(t);
}
}
-int grpc_tracer_set_enabled(const char *name, int enabled) {
- tracer *t;
+int grpc_tracer_set_enabled(const char* name, int enabled) {
+ tracer* t;
if (0 == strcmp(name, "all")) {
for (t = tracers; t; t = t->next) {
TRACER_SET(*t->flag, enabled);
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index 558ba942bb..7447d5d94a 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -39,7 +39,7 @@ typedef struct {
#else
bool value;
#endif
- const char *name;
+ const char* name;
} grpc_tracer_flag;
#ifdef GRPC_THREADSAFE_TRACER
@@ -52,8 +52,8 @@ typedef struct {
{ (on), (name) }
#endif
-void grpc_register_tracer(grpc_tracer_flag *flag);
-void grpc_tracer_init(const char *env_var_name);
+void grpc_register_tracer(grpc_tracer_flag* flag);
+void grpc_tracer_init(const char* env_var_name);
void grpc_tracer_shutdown(void);
#ifdef __cplusplus
diff --git a/src/core/lib/http/format_request.cc b/src/core/lib/http/format_request.cc
index 88fb0ab0b6..f3f3cbda7b 100644
--- a/src/core/lib/http/format_request.cc
+++ b/src/core/lib/http/format_request.cc
@@ -28,8 +28,8 @@
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
-static void fill_common_header(const grpc_httpcli_request *request,
- gpr_strvec *buf, bool connection_close) {
+static void fill_common_header(const grpc_httpcli_request* request,
+ gpr_strvec* buf, bool connection_close) {
size_t i;
gpr_strvec_add(buf, gpr_strdup(request->http.path));
gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n"));
@@ -51,9 +51,9 @@ static void fill_common_header(const grpc_httpcli_request *request,
}
grpc_slice grpc_httpcli_format_get_request(
- const grpc_httpcli_request *request) {
+ const grpc_httpcli_request* request) {
gpr_strvec out;
- char *flat;
+ char* flat;
size_t flat_len;
gpr_strvec_init(&out);
@@ -67,11 +67,11 @@ grpc_slice grpc_httpcli_format_get_request(
return grpc_slice_new(flat, flat_len, gpr_free);
}
-grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
- const char *body_bytes,
+grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request* request,
+ const char* body_bytes,
size_t body_size) {
gpr_strvec out;
- char *tmp;
+ char* tmp;
size_t out_len;
size_t i;
@@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
- tmp = (char *)gpr_realloc(tmp, out_len + body_size);
+ tmp = (char*)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}
@@ -107,14 +107,14 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
}
grpc_slice grpc_httpcli_format_connect_request(
- const grpc_httpcli_request *request) {
+ const grpc_httpcli_request* request) {
gpr_strvec out;
gpr_strvec_init(&out);
gpr_strvec_add(&out, gpr_strdup("CONNECT "));
fill_common_header(request, &out, false);
gpr_strvec_add(&out, gpr_strdup("\r\n"));
size_t flat_len;
- char *flat = gpr_strvec_flatten(&out, &flat_len);
+ char* flat = gpr_strvec_flatten(&out, &flat_len);
gpr_strvec_destroy(&out);
return grpc_slice_new(flat, flat_len, gpr_free);
}
diff --git a/src/core/lib/http/format_request.h b/src/core/lib/http/format_request.h
index 2e77e8661a..32054805b4 100644
--- a/src/core/lib/http/format_request.h
+++ b/src/core/lib/http/format_request.h
@@ -26,12 +26,12 @@
extern "C" {
#endif
-grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request);
-grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
- const char *body_bytes,
+grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request* request);
+grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request* request,
+ const char* body_bytes,
size_t body_size);
grpc_slice grpc_httpcli_format_connect_request(
- const grpc_httpcli_request *request);
+ const grpc_httpcli_request* request);
#ifdef __cplusplus
}
diff --git a/src/core/lib/http/httpcli.cc b/src/core/lib/http/httpcli.cc
index c96800b85c..493e6af95c 100644
--- a/src/core/lib/http/httpcli.cc
+++ b/src/core/lib/http/httpcli.cc
@@ -39,56 +39,56 @@
typedef struct {
grpc_slice request_text;
grpc_http_parser parser;
- grpc_resolved_addresses *addresses;
+ grpc_resolved_addresses* addresses;
size_t next_address;
- grpc_endpoint *ep;
- char *host;
- char *ssl_host_override;
+ grpc_endpoint* ep;
+ char* host;
+ char* ssl_host_override;
grpc_millis deadline;
int have_read_byte;
- const grpc_httpcli_handshaker *handshaker;
- grpc_closure *on_done;
- grpc_httpcli_context *context;
- grpc_polling_entity *pollent;
+ const grpc_httpcli_handshaker* handshaker;
+ grpc_closure* on_done;
+ grpc_httpcli_context* context;
+ grpc_polling_entity* pollent;
grpc_iomgr_object iomgr_obj;
grpc_slice_buffer incoming;
grpc_slice_buffer outgoing;
grpc_closure on_read;
grpc_closure done_write;
grpc_closure connected;
- grpc_error *overall_error;
- grpc_resource_quota *resource_quota;
+ grpc_error* overall_error;
+ grpc_resource_quota* resource_quota;
} internal_request;
static grpc_httpcli_get_override g_get_override = NULL;
static grpc_httpcli_post_override g_post_override = NULL;
-static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_endpoint *endpoint, const char *host,
+static void plaintext_handshake(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* endpoint, const char* host,
grpc_millis deadline,
- void (*on_done)(grpc_exec_ctx *exec_ctx,
- void *arg,
- grpc_endpoint *endpoint)) {
+ void (*on_done)(grpc_exec_ctx* exec_ctx,
+ void* arg,
+ grpc_endpoint* endpoint)) {
on_done(exec_ctx, arg, endpoint);
}
const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http",
plaintext_handshake};
-void grpc_httpcli_context_init(grpc_httpcli_context *context) {
+void grpc_httpcli_context_init(grpc_httpcli_context* context) {
context->pollset_set = grpc_pollset_set_create();
}
-void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
- grpc_httpcli_context *context) {
+void grpc_httpcli_context_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_httpcli_context* context) {
grpc_pollset_set_destroy(exec_ctx, context->pollset_set);
}
-static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
- grpc_error *due_to_error);
+static void next_address(grpc_exec_ctx* exec_ctx, internal_request* req,
+ grpc_error* due_to_error);
-static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
- grpc_error *error) {
+static void finish(grpc_exec_ctx* exec_ctx, internal_request* req,
+ grpc_error* error) {
grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
req->context->pollset_set);
GRPC_CLOSURE_SCHED(exec_ctx, req->on_done, error);
@@ -110,13 +110,13 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
gpr_free(req);
}
-static void append_error(internal_request *req, grpc_error *error) {
+static void append_error(internal_request* req, grpc_error* error) {
if (req->overall_error == GRPC_ERROR_NONE) {
req->overall_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed HTTP/1 client request");
}
- grpc_resolved_address *addr = &req->addresses->addrs[req->next_address - 1];
- char *addr_text = grpc_sockaddr_to_uri(addr);
+ grpc_resolved_address* addr = &req->addresses->addrs[req->next_address - 1];
+ char* addr_text = grpc_sockaddr_to_uri(addr);
req->overall_error = grpc_error_add_child(
req->overall_error,
grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
@@ -124,19 +124,19 @@ static void append_error(internal_request *req, grpc_error *error) {
gpr_free(addr_text);
}
-static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
+static void do_read(grpc_exec_ctx* exec_ctx, internal_request* req) {
grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read);
}
-static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *error) {
- internal_request *req = (internal_request *)user_data;
+static void on_read(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* error) {
+ internal_request* req = (internal_request*)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
if (GRPC_SLICE_LENGTH(req->incoming.slices[i])) {
req->have_read_byte = 1;
- grpc_error *err =
+ grpc_error* err =
grpc_http_parser_parse(&req->parser, req->incoming.slices[i], NULL);
if (err != GRPC_ERROR_NONE) {
finish(exec_ctx, req, err);
@@ -154,12 +154,12 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
}
}
-static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
+static void on_written(grpc_exec_ctx* exec_ctx, internal_request* req) {
do_read(exec_ctx, req);
}
-static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = (internal_request *)arg;
+static void done_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ internal_request* req = (internal_request*)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@@ -167,19 +167,20 @@ static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
}
-static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
+static void start_write(grpc_exec_ctx* exec_ctx, internal_request* req) {
grpc_slice_ref_internal(req->request_text);
grpc_slice_buffer_add(&req->outgoing, req->request_text);
grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write);
}
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_endpoint *ep) {
- internal_request *req = (internal_request *)arg;
+static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* ep) {
+ internal_request* req = (internal_request*)arg;
if (!ep) {
- next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Unexplained handshake failure"));
+ next_address(
+ exec_ctx, req,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexplained handshake failure"));
return;
}
@@ -187,9 +188,9 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
start_write(exec_ctx, req);
}
-static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- internal_request *req = (internal_request *)arg;
+static void on_connected(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ internal_request* req = (internal_request*)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@@ -201,9 +202,9 @@ static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
req->deadline, on_handshake_done);
}
-static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
- grpc_error *error) {
- grpc_resolved_address *addr;
+static void next_address(grpc_exec_ctx* exec_ctx, internal_request* req,
+ grpc_error* error) {
+ grpc_resolved_address* addr;
if (error != GRPC_ERROR_NONE) {
append_error(req, error);
}
@@ -217,7 +218,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
- (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
+ (char*)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
@@ -225,8 +226,8 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
req->deadline);
}
-static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- internal_request *req = (internal_request *)arg;
+static void on_resolved(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ internal_request* req = (internal_request*)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@@ -235,16 +236,16 @@ static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
next_address(exec_ctx, req, GRPC_ERROR_NONE);
}
-static void internal_request_begin(grpc_exec_ctx *exec_ctx,
- grpc_httpcli_context *context,
- grpc_polling_entity *pollent,
- grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- grpc_millis deadline, grpc_closure *on_done,
- grpc_httpcli_response *response,
- const char *name, grpc_slice request_text) {
- internal_request *req =
- (internal_request *)gpr_malloc(sizeof(internal_request));
+static void internal_request_begin(grpc_exec_ctx* exec_ctx,
+ grpc_httpcli_context* context,
+ grpc_polling_entity* pollent,
+ grpc_resource_quota* resource_quota,
+ const grpc_httpcli_request* request,
+ grpc_millis deadline, grpc_closure* on_done,
+ grpc_httpcli_response* response,
+ const char* name, grpc_slice request_text) {
+ internal_request* req =
+ (internal_request*)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);
@@ -275,12 +276,12 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
&req->addresses);
}
-void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_polling_entity *pollent,
- grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request, grpc_millis deadline,
- grpc_closure *on_done, grpc_httpcli_response *response) {
- char *name;
+void grpc_httpcli_get(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+ grpc_polling_entity* pollent,
+ grpc_resource_quota* resource_quota,
+ const grpc_httpcli_request* request, grpc_millis deadline,
+ grpc_closure* on_done, grpc_httpcli_response* response) {
+ char* name;
if (g_get_override &&
g_get_override(exec_ctx, request, deadline, on_done, response)) {
return;
@@ -292,14 +293,14 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
gpr_free(name);
}
-void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_polling_entity *pollent,
- grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size,
- grpc_millis deadline, grpc_closure *on_done,
- grpc_httpcli_response *response) {
- char *name;
+void grpc_httpcli_post(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+ grpc_polling_entity* pollent,
+ grpc_resource_quota* resource_quota,
+ const grpc_httpcli_request* request,
+ const char* body_bytes, size_t body_size,
+ grpc_millis deadline, grpc_closure* on_done,
+ grpc_httpcli_response* response) {
+ char* name;
if (g_post_override &&
g_post_override(exec_ctx, request, body_bytes, body_size, deadline,
on_done, response)) {
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index 76b790fa8a..a3411341ad 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -40,15 +40,15 @@ extern "C" {
TODO(ctiller): allow caching and capturing multiple requests for the
same content and combining them */
typedef struct grpc_httpcli_context {
- grpc_pollset_set *pollset_set;
+ grpc_pollset_set* pollset_set;
} grpc_httpcli_context;
typedef struct {
- const char *default_port;
- void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint,
- const char *host, grpc_millis deadline,
- void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_endpoint *endpoint));
+ const char* default_port;
+ void (*handshake)(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* endpoint,
+ const char* host, grpc_millis deadline,
+ void (*on_done)(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* endpoint));
} grpc_httpcli_handshaker;
extern const grpc_httpcli_handshaker grpc_httpcli_plaintext;
@@ -57,23 +57,23 @@ extern const grpc_httpcli_handshaker grpc_httpcli_ssl;
/* A request */
typedef struct grpc_httpcli_request {
/* The host name to connect to */
- char *host;
+ char* host;
/* The host to verify in the SSL handshake (or NULL) */
- char *ssl_host_override;
+ char* ssl_host_override;
/* The main part of the request
The following headers are supplied automatically and MUST NOT be set here:
Host, Connection, User-Agent */
grpc_http_request http;
/* handshaker to use ssl for the request */
- const grpc_httpcli_handshaker *handshaker;
+ const grpc_httpcli_handshaker* handshaker;
} grpc_httpcli_request;
/* Expose the parser response type as a httpcli response too */
typedef struct grpc_http_response grpc_httpcli_response;
-void grpc_httpcli_context_init(grpc_httpcli_context *context);
-void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
- grpc_httpcli_context *context);
+void grpc_httpcli_context_init(grpc_httpcli_context* context);
+void grpc_httpcli_context_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_httpcli_context* context);
/* Asynchronously perform a HTTP GET.
'context' specifies the http context under which to do the get
@@ -84,12 +84,12 @@ void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx,
destroyed once the call returns
'deadline' contains a deadline for the request (or gpr_inf_future)
'on_response' is a callback to report results to */
-void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_polling_entity *pollent,
- grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request, grpc_millis deadline,
- grpc_closure *on_complete,
- grpc_httpcli_response *response);
+void grpc_httpcli_get(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+ grpc_polling_entity* pollent,
+ grpc_resource_quota* resource_quota,
+ const grpc_httpcli_request* request, grpc_millis deadline,
+ grpc_closure* on_complete,
+ grpc_httpcli_response* response);
/* Asynchronously perform a HTTP POST.
'context' specifies the http context under which to do the post
@@ -105,24 +105,24 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
lifetime of the request
'on_response' is a callback to report results to
Does not support ?var1=val1&var2=val2 in the path. */
-void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
- grpc_polling_entity *pollent,
- grpc_resource_quota *resource_quota,
- const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size,
- grpc_millis deadline, grpc_closure *on_complete,
- grpc_httpcli_response *response);
+void grpc_httpcli_post(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+ grpc_polling_entity* pollent,
+ grpc_resource_quota* resource_quota,
+ const grpc_httpcli_request* request,
+ const char* body_bytes, size_t body_size,
+ grpc_millis deadline, grpc_closure* on_complete,
+ grpc_httpcli_response* response);
/* override functions return 1 if they handled the request, 0 otherwise */
-typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx,
- const grpc_httpcli_request *request,
+typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx* exec_ctx,
+ const grpc_httpcli_request* request,
grpc_millis deadline,
- grpc_closure *on_complete,
- grpc_httpcli_response *response);
+ grpc_closure* on_complete,
+ grpc_httpcli_response* response);
typedef int (*grpc_httpcli_post_override)(
- grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request,
- const char *body_bytes, size_t body_size, grpc_millis deadline,
- grpc_closure *on_complete, grpc_httpcli_response *response);
+ grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
+ const char* body_bytes, size_t body_size, grpc_millis deadline,
+ grpc_closure* on_complete, grpc_httpcli_response* response);
void grpc_httpcli_set_override(grpc_httpcli_get_override get,
grpc_httpcli_post_override post);
diff --git a/src/core/lib/http/httpcli_security_connector.cc b/src/core/lib/http/httpcli_security_connector.cc
index d832dacb69..d029323eac 100644
--- a/src/core/lib/http/httpcli_security_connector.cc
+++ b/src/core/lib/http/httpcli_security_connector.cc
@@ -34,14 +34,14 @@
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_client_handshaker_factory *handshaker_factory;
- char *secure_peer_name;
+ tsi_ssl_client_handshaker_factory* handshaker_factory;
+ char* secure_peer_name;
} grpc_httpcli_ssl_channel_security_connector;
-static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc) {
- grpc_httpcli_ssl_channel_security_connector *c =
- (grpc_httpcli_ssl_channel_security_connector *)sc;
+static void httpcli_ssl_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc) {
+ grpc_httpcli_ssl_channel_security_connector* c =
+ (grpc_httpcli_ssl_channel_security_connector*)sc;
if (c->handshaker_factory != NULL) {
tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
c->handshaker_factory = NULL;
@@ -50,12 +50,12 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(sc);
}
-static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_handshake_manager *handshake_mgr) {
- grpc_httpcli_ssl_channel_security_connector *c =
- (grpc_httpcli_ssl_channel_security_connector *)sc;
- tsi_handshaker *handshaker = NULL;
+static void httpcli_ssl_add_handshakers(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr) {
+ grpc_httpcli_ssl_channel_security_connector* c =
+ (grpc_httpcli_ssl_channel_security_connector*)sc;
+ tsi_handshaker* handshaker = NULL;
if (c->handshaker_factory != NULL) {
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
c->handshaker_factory, c->secure_peer_name, &handshaker);
@@ -70,18 +70,18 @@ static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx,
exec_ctx, tsi_create_adapter_handshaker(handshaker), &sc->base));
}
-static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc, tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
- grpc_httpcli_ssl_channel_security_connector *c =
- (grpc_httpcli_ssl_channel_security_connector *)sc;
- grpc_error *error = GRPC_ERROR_NONE;
+static void httpcli_ssl_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc, tsi_peer peer,
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
+ grpc_httpcli_ssl_channel_security_connector* c =
+ (grpc_httpcli_ssl_channel_security_connector*)sc;
+ grpc_error* error = GRPC_ERROR_NONE;
/* Check the peer name. */
if (c->secure_peer_name != NULL &&
!tsi_ssl_peer_matches_name(&peer, c->secure_peer_name)) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate",
c->secure_peer_name);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
@@ -91,12 +91,12 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
tsi_peer_destruct(&peer);
}
-static int httpcli_ssl_cmp(grpc_security_connector *sc1,
- grpc_security_connector *sc2) {
- grpc_httpcli_ssl_channel_security_connector *c1 =
- (grpc_httpcli_ssl_channel_security_connector *)sc1;
- grpc_httpcli_ssl_channel_security_connector *c2 =
- (grpc_httpcli_ssl_channel_security_connector *)sc2;
+static int httpcli_ssl_cmp(grpc_security_connector* sc1,
+ grpc_security_connector* sc2) {
+ grpc_httpcli_ssl_channel_security_connector* c1 =
+ (grpc_httpcli_ssl_channel_security_connector*)sc1;
+ grpc_httpcli_ssl_channel_security_connector* c2 =
+ (grpc_httpcli_ssl_channel_security_connector*)sc2;
return strcmp(c1->secure_peer_name, c2->secure_peer_name);
}
@@ -104,10 +104,10 @@ static grpc_security_connector_vtable httpcli_ssl_vtable = {
httpcli_ssl_destroy, httpcli_ssl_check_peer, httpcli_ssl_cmp};
static grpc_security_status httpcli_ssl_channel_security_connector_create(
- grpc_exec_ctx *exec_ctx, const char *pem_root_certs,
- const char *secure_peer_name, grpc_channel_security_connector **sc) {
+ grpc_exec_ctx* exec_ctx, const char* pem_root_certs,
+ const char* secure_peer_name, grpc_channel_security_connector** sc) {
tsi_result result = TSI_OK;
- grpc_httpcli_ssl_channel_security_connector *c;
+ grpc_httpcli_ssl_channel_security_connector* c;
if (secure_peer_name != NULL && pem_root_certs == NULL) {
gpr_log(GPR_ERROR,
@@ -115,7 +115,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
return GRPC_SECURITY_ERROR;
}
- c = (grpc_httpcli_ssl_channel_security_connector *)gpr_zalloc(
+ c = (grpc_httpcli_ssl_channel_security_connector*)gpr_zalloc(
sizeof(grpc_httpcli_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
@@ -135,7 +135,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
// We don't actually need a channel credentials object in this case,
// but we set it to a non-NULL address so that we don't trigger
// assertions in grpc_channel_security_connector_cmp().
- c->base.channel_creds = (grpc_channel_credentials *)1;
+ c->base.channel_creds = (grpc_channel_credentials*)1;
c->base.add_handshakers = httpcli_ssl_add_handshakers;
*sc = &c->base;
return GRPC_SECURITY_OK;
@@ -144,17 +144,17 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create(
/* handshaker */
typedef struct {
- void (*func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint);
- void *arg;
- grpc_handshake_manager *handshake_mgr;
+ void (*func)(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* endpoint);
+ void* arg;
+ grpc_handshake_manager* handshake_mgr;
} on_done_closure;
-static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
- on_done_closure *c = (on_done_closure *)args->user_data;
+static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_handshaker_args* args = (grpc_handshaker_args*)arg;
+ on_done_closure* c = (on_done_closure*)args->user_data;
if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg);
c->func(exec_ctx, c->arg, NULL);
@@ -168,13 +168,13 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
-static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_endpoint *tcp, const char *host,
+static void ssl_handshake(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* tcp, const char* host,
grpc_millis deadline,
- void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_endpoint *endpoint)) {
- on_done_closure *c = (on_done_closure *)gpr_malloc(sizeof(*c));
- const char *pem_root_certs = grpc_get_default_ssl_roots();
+ void (*on_done)(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* endpoint)) {
+ on_done_closure* c = (on_done_closure*)gpr_malloc(sizeof(*c));
+ const char* pem_root_certs = grpc_get_default_ssl_roots();
if (pem_root_certs == NULL) {
gpr_log(GPR_ERROR, "Could not get default pem root certs.");
on_done(exec_ctx, arg, NULL);
@@ -183,7 +183,7 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg,
}
c->func = on_done;
c->arg = arg;
- grpc_channel_security_connector *sc = NULL;
+ grpc_channel_security_connector* sc = NULL;
GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
exec_ctx, pem_root_certs, host, &sc) == GRPC_SECURITY_OK);
grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base);
diff --git a/src/core/lib/http/parser.cc b/src/core/lib/http/parser.cc
index 0950bd655e..99a4919401 100644
--- a/src/core/lib/http/parser.cc
+++ b/src/core/lib/http/parser.cc
@@ -27,17 +27,17 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
-static char *buf2str(void *buffer, size_t length) {
- char *out = (char *)gpr_malloc(length + 1);
+static char* buf2str(void* buffer, size_t length) {
+ char* out = (char*)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
}
-static grpc_error *handle_response_line(grpc_http_parser *parser) {
- uint8_t *beg = parser->cur_line;
- uint8_t *cur = beg;
- uint8_t *end = beg + parser->cur_line_length;
+static grpc_error* handle_response_line(grpc_http_parser* parser) {
+ uint8_t* beg = parser->cur_line;
+ uint8_t* cur = beg;
+ uint8_t* end = beg + parser->cur_line_length;
if (cur == end || *cur++ != 'H')
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'H'");
@@ -75,10 +75,10 @@ static grpc_error *handle_response_line(grpc_http_parser *parser) {
return GRPC_ERROR_NONE;
}
-static grpc_error *handle_request_line(grpc_http_parser *parser) {
- uint8_t *beg = parser->cur_line;
- uint8_t *cur = beg;
- uint8_t *end = beg + parser->cur_line_length;
+static grpc_error* handle_request_line(grpc_http_parser* parser) {
+ uint8_t* beg = parser->cur_line;
+ uint8_t* cur = beg;
+ uint8_t* end = beg + parser->cur_line_length;
uint8_t vers_major = 0;
uint8_t vers_minor = 0;
@@ -137,7 +137,7 @@ static grpc_error *handle_request_line(grpc_http_parser *parser) {
return GRPC_ERROR_NONE;
}
-static grpc_error *handle_first_line(grpc_http_parser *parser) {
+static grpc_error* handle_first_line(grpc_http_parser* parser) {
switch (parser->type) {
case GRPC_HTTP_REQUEST:
return handle_request_line(parser);
@@ -148,14 +148,14 @@ static grpc_error *handle_first_line(grpc_http_parser *parser) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
-static grpc_error *add_header(grpc_http_parser *parser) {
- uint8_t *beg = parser->cur_line;
- uint8_t *cur = beg;
- uint8_t *end = beg + parser->cur_line_length;
- size_t *hdr_count = NULL;
- grpc_http_header **hdrs = NULL;
+static grpc_error* add_header(grpc_http_parser* parser) {
+ uint8_t* beg = parser->cur_line;
+ uint8_t* cur = beg;
+ uint8_t* end = beg + parser->cur_line_length;
+ size_t* hdr_count = NULL;
+ grpc_http_header** hdrs = NULL;
grpc_http_header hdr = {NULL, NULL};
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
GPR_ASSERT(cur != end);
@@ -197,7 +197,7 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
- *hdrs = (grpc_http_header *)gpr_realloc(
+ *hdrs = (grpc_http_header*)gpr_realloc(
*hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@@ -210,9 +210,9 @@ done:
return error;
}
-static grpc_error *finish_line(grpc_http_parser *parser,
- bool *found_body_start) {
- grpc_error *err;
+static grpc_error* finish_line(grpc_http_parser* parser,
+ bool* found_body_start) {
+ grpc_error* err;
switch (parser->state) {
case GRPC_HTTP_FIRST_LINE:
err = handle_first_line(parser);
@@ -239,9 +239,9 @@ static grpc_error *finish_line(grpc_http_parser *parser,
return GRPC_ERROR_NONE;
}
-static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
- size_t *body_length = NULL;
- char **body = NULL;
+static grpc_error* addbyte_body(grpc_http_parser* parser, uint8_t byte) {
+ size_t* body_length = NULL;
+ char** body = NULL;
if (parser->type == GRPC_HTTP_RESPONSE) {
body_length = &parser->http.response->body_length;
@@ -256,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
- *body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
+ *body = (char*)gpr_realloc((void*)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;
@@ -264,7 +264,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
return GRPC_ERROR_NONE;
}
-static bool check_line(grpc_http_parser *parser) {
+static bool check_line(grpc_http_parser* parser) {
if (parser->cur_line_length >= 2 &&
parser->cur_line[parser->cur_line_length - 2] == '\r' &&
parser->cur_line[parser->cur_line_length - 1] == '\n') {
@@ -288,8 +288,8 @@ static bool check_line(grpc_http_parser *parser) {
return false;
}
-static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte,
- bool *found_body_start) {
+static grpc_error* addbyte(grpc_http_parser* parser, uint8_t byte,
+ bool* found_body_start) {
switch (parser->state) {
case GRPC_HTTP_FIRST_LINE:
case GRPC_HTTP_HEADERS:
@@ -312,8 +312,8 @@ static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte,
GPR_UNREACHABLE_CODE(return GRPC_ERROR_NONE);
}
-void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type,
- void *request_or_response) {
+void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type,
+ void* request_or_response) {
memset(parser, 0, sizeof(*parser));
parser->state = GRPC_HTTP_FIRST_LINE;
parser->type = type;
@@ -321,9 +321,9 @@ void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type,
parser->cur_line_end_length = 2;
}
-void grpc_http_parser_destroy(grpc_http_parser *parser) {}
+void grpc_http_parser_destroy(grpc_http_parser* parser) {}
-void grpc_http_request_destroy(grpc_http_request *request) {
+void grpc_http_request_destroy(grpc_http_request* request) {
size_t i;
gpr_free(request->body);
for (i = 0; i < request->hdr_count; i++) {
@@ -335,7 +335,7 @@ void grpc_http_request_destroy(grpc_http_request *request) {
gpr_free(request->path);
}
-void grpc_http_response_destroy(grpc_http_response *response) {
+void grpc_http_response_destroy(grpc_http_response* response) {
size_t i;
gpr_free(response->body);
for (i = 0; i < response->hdr_count; i++) {
@@ -345,11 +345,11 @@ void grpc_http_response_destroy(grpc_http_response *response) {
gpr_free(response->hdrs);
}
-grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice,
- size_t *start_of_body) {
+grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice,
+ size_t* start_of_body) {
for (size_t i = 0; i < GRPC_SLICE_LENGTH(slice); i++) {
bool found_body_start = false;
- grpc_error *err =
+ grpc_error* err =
addbyte(parser, GRPC_SLICE_START_PTR(slice)[i], &found_body_start);
if (err != GRPC_ERROR_NONE) return err;
if (found_body_start && start_of_body != NULL) *start_of_body = i + 1;
@@ -357,7 +357,7 @@ grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_http_parser_eof(grpc_http_parser *parser) {
+grpc_error* grpc_http_parser_eof(grpc_http_parser* parser) {
if (parser->state != GRPC_HTTP_BODY) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Did not finish headers");
}
diff --git a/src/core/lib/http/parser.h b/src/core/lib/http/parser.h
index d2bda6ae0e..3d28481c4c 100644
--- a/src/core/lib/http/parser.h
+++ b/src/core/lib/http/parser.h
@@ -33,8 +33,8 @@ extern "C" {
/* A single header to be passed in a request */
typedef struct grpc_http_header {
- char *key;
- char *value;
+ char* key;
+ char* value;
} grpc_http_header;
typedef enum {
@@ -57,17 +57,17 @@ typedef enum {
/* A request */
typedef struct grpc_http_request {
/* Method of the request (e.g. GET, POST) */
- char *method;
+ char* method;
/* The path of the resource to fetch */
- char *path;
+ char* path;
/* HTTP version to use */
grpc_http_version version;
/* Headers attached to the request */
size_t hdr_count;
- grpc_http_header *hdrs;
+ grpc_http_header* hdrs;
/* Body: length and contents; contents are NOT null-terminated */
size_t body_length;
- char *body;
+ char* body;
} grpc_http_request;
/* A response */
@@ -76,10 +76,10 @@ typedef struct grpc_http_response {
int status;
/* Headers: count and key/values */
size_t hdr_count;
- grpc_http_header *hdrs;
+ grpc_http_header* hdrs;
/* Body: length and contents; contents are NOT null-terminated */
size_t body_length;
- char *body;
+ char* body;
} grpc_http_response;
typedef struct {
@@ -87,9 +87,9 @@ typedef struct {
grpc_http_type type;
union {
- grpc_http_response *response;
- grpc_http_request *request;
- void *request_or_response;
+ grpc_http_response* response;
+ grpc_http_request* request;
+ void* request_or_response;
} http;
size_t body_capacity;
size_t hdr_capacity;
@@ -99,17 +99,17 @@ typedef struct {
size_t cur_line_end_length;
} grpc_http_parser;
-void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type,
- void *request_or_response);
-void grpc_http_parser_destroy(grpc_http_parser *parser);
+void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type,
+ void* request_or_response);
+void grpc_http_parser_destroy(grpc_http_parser* parser);
/* Sets \a start_of_body to the offset in \a slice of the start of the body. */
-grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice,
- size_t *start_of_body);
-grpc_error *grpc_http_parser_eof(grpc_http_parser *parser);
+grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice,
+ size_t* start_of_body);
+grpc_error* grpc_http_parser_eof(grpc_http_parser* parser);
-void grpc_http_request_destroy(grpc_http_request *request);
-void grpc_http_response_destroy(grpc_http_response *response);
+void grpc_http_request_destroy(grpc_http_request* request);
+void grpc_http_response_destroy(grpc_http_response* response);
extern grpc_tracer_flag grpc_http1_trace;
diff --git a/src/core/lib/iomgr/closure.cc b/src/core/lib/iomgr/closure.cc
index 00edefc6ae..60e99d0e4e 100644
--- a/src/core/lib/iomgr/closure.cc
+++ b/src/core/lib/iomgr/closure.cc
@@ -29,14 +29,14 @@ grpc_tracer_flag grpc_trace_closure = GRPC_TRACER_INITIALIZER(false, "closure");
#endif
#ifndef NDEBUG
-grpc_closure *grpc_closure_init(const char *file, int line,
- grpc_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg,
- grpc_closure_scheduler *scheduler) {
+grpc_closure* grpc_closure_init(const char* file, int line,
+ grpc_closure* closure, grpc_iomgr_cb_func cb,
+ void* cb_arg,
+ grpc_closure_scheduler* scheduler) {
#else
-grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg,
- grpc_closure_scheduler *scheduler) {
+grpc_closure* grpc_closure_init(grpc_closure* closure, grpc_iomgr_cb_func cb,
+ void* cb_arg,
+ grpc_closure_scheduler* scheduler) {
#endif
closure->cb = cb;
closure->cb_arg = cb_arg;
@@ -52,12 +52,12 @@ grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
return closure;
}
-void grpc_closure_list_init(grpc_closure_list *closure_list) {
+void grpc_closure_list_init(grpc_closure_list* closure_list) {
closure_list->head = closure_list->tail = NULL;
}
-bool grpc_closure_list_append(grpc_closure_list *closure_list,
- grpc_closure *closure, grpc_error *error) {
+bool grpc_closure_list_append(grpc_closure_list* closure_list,
+ grpc_closure* closure, grpc_error* error) {
if (closure == NULL) {
GRPC_ERROR_UNREF(error);
return false;
@@ -74,9 +74,9 @@ bool grpc_closure_list_append(grpc_closure_list *closure_list,
return was_empty;
}
-void grpc_closure_list_fail_all(grpc_closure_list *list,
- grpc_error *forced_failure) {
- for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) {
+void grpc_closure_list_fail_all(grpc_closure_list* list,
+ grpc_error* forced_failure) {
+ for (grpc_closure* c = list->head; c != NULL; c = c->next_data.next) {
if (c->error_data.error == GRPC_ERROR_NONE) {
c->error_data.error = GRPC_ERROR_REF(forced_failure);
}
@@ -88,7 +88,7 @@ bool grpc_closure_list_empty(grpc_closure_list closure_list) {
return closure_list.head == NULL;
}
-void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
+void grpc_closure_list_move(grpc_closure_list* src, grpc_closure_list* dst) {
if (src->head == NULL) {
return;
}
@@ -103,28 +103,28 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
typedef struct {
grpc_iomgr_cb_func cb;
- void *cb_arg;
+ void* cb_arg;
grpc_closure wrapper;
} wrapped_closure;
-static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- wrapped_closure *wc = (wrapped_closure *)arg;
+static void closure_wrapper(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ wrapped_closure* wc = (wrapped_closure*)arg;
grpc_iomgr_cb_func cb = wc->cb;
- void *cb_arg = wc->cb_arg;
+ void* cb_arg = wc->cb_arg;
gpr_free(wc);
cb(exec_ctx, cb_arg, error);
}
#ifndef NDEBUG
-grpc_closure *grpc_closure_create(const char *file, int line,
- grpc_iomgr_cb_func cb, void *cb_arg,
- grpc_closure_scheduler *scheduler) {
+grpc_closure* grpc_closure_create(const char* file, int line,
+ grpc_iomgr_cb_func cb, void* cb_arg,
+ grpc_closure_scheduler* scheduler) {
#else
-grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
- grpc_closure_scheduler *scheduler) {
+grpc_closure* grpc_closure_create(grpc_iomgr_cb_func cb, void* cb_arg,
+ grpc_closure_scheduler* scheduler) {
#endif
- wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
+ wrapped_closure* wc = (wrapped_closure*)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG
@@ -136,11 +136,11 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
}
#ifndef NDEBUG
-void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
- grpc_closure *c, grpc_error *error) {
+void grpc_closure_run(const char* file, int line, grpc_exec_ctx* exec_ctx,
+ grpc_closure* c, grpc_error* error) {
#else
-void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
- grpc_error *error) {
+void grpc_closure_run(grpc_exec_ctx* exec_ctx, grpc_closure* c,
+ grpc_error* error) {
#endif
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
@@ -158,11 +158,11 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
}
#ifndef NDEBUG
-void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
- grpc_closure *c, grpc_error *error) {
+void grpc_closure_sched(const char* file, int line, grpc_exec_ctx* exec_ctx,
+ grpc_closure* c, grpc_error* error) {
#else
-void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
- grpc_error *error) {
+void grpc_closure_sched(grpc_exec_ctx* exec_ctx, grpc_closure* c,
+ grpc_error* error) {
#endif
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
@@ -189,14 +189,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
}
#ifndef NDEBUG
-void grpc_closure_list_sched(const char *file, int line,
- grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+void grpc_closure_list_sched(const char* file, int line,
+ grpc_exec_ctx* exec_ctx, grpc_closure_list* list) {
#else
-void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+void grpc_closure_list_sched(grpc_exec_ctx* exec_ctx, grpc_closure_list* list) {
#endif
- grpc_closure *c = list->head;
+ grpc_closure* c = list->head;
while (c != NULL) {
- grpc_closure *next = c->next_data.next;
+ grpc_closure* next = c->next_data.next;
#ifndef NDEBUG
if (c->scheduled) {
gpr_log(GPR_ERROR,
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index cd32a4ba38..8b1188e2db 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -38,8 +38,8 @@ extern grpc_tracer_flag grpc_trace_closure;
#endif
typedef struct grpc_closure_list {
- grpc_closure *head;
- grpc_closure *tail;
+ grpc_closure* head;
+ grpc_closure* tail;
} grpc_closure_list;
/** gRPC Callback definition.
@@ -49,24 +49,24 @@ typedef struct grpc_closure_list {
* describing what went wrong.
* Error contract: it is not the cb's job to unref this error;
* the closure scheduler will do that after the cb returns */
-typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
typedef struct grpc_closure_scheduler grpc_closure_scheduler;
typedef struct grpc_closure_scheduler_vtable {
/* NOTE: for all these functions, closure->scheduler == the scheduler that was
used to find this vtable */
- void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
- void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
- const char *name;
+ void (*run)(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
+ void (*sched)(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
+ const char* name;
} grpc_closure_scheduler_vtable;
/** Abstract type that can schedule closures for execution */
struct grpc_closure_scheduler {
- const grpc_closure_scheduler_vtable *vtable;
+ const grpc_closure_scheduler_vtable* vtable;
};
/** A closure over a grpc_iomgr_cb_func. */
@@ -74,7 +74,7 @@ struct grpc_closure {
/** Once queued, next indicates the next queued closure; before then, scratch
* space */
union {
- grpc_closure *next;
+ grpc_closure* next;
gpr_mpscq_node atm_next;
uintptr_t scratch;
} next_data;
@@ -83,15 +83,15 @@ struct grpc_closure {
grpc_iomgr_cb_func cb;
/** Arguments to be passed to "cb". */
- void *cb_arg;
+ void* cb_arg;
/** Scheduler to schedule against: NULL to schedule against current execution
context */
- grpc_closure_scheduler *scheduler;
+ grpc_closure_scheduler* scheduler;
/** Once queued, the result of the closure. Before then: scratch space */
union {
- grpc_error *error;
+ grpc_error* error;
uintptr_t scratch;
} error_data;
@@ -100,39 +100,39 @@ struct grpc_closure {
#ifndef NDEBUG
bool scheduled;
bool run; // true = run, false = scheduled
- const char *file_created;
+ const char* file_created;
int line_created;
- const char *file_initiated;
+ const char* file_initiated;
int line_initiated;
#endif
};
/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
#ifndef NDEBUG
-grpc_closure *grpc_closure_init(const char *file, int line,
- grpc_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg,
- grpc_closure_scheduler *scheduler);
+grpc_closure* grpc_closure_init(const char* file, int line,
+ grpc_closure* closure, grpc_iomgr_cb_func cb,
+ void* cb_arg,
+ grpc_closure_scheduler* scheduler);
#define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \
grpc_closure_init(__FILE__, __LINE__, closure, cb, cb_arg, scheduler)
#else
-grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
- void *cb_arg,
- grpc_closure_scheduler *scheduler);
+grpc_closure* grpc_closure_init(grpc_closure* closure, grpc_iomgr_cb_func cb,
+ void* cb_arg,
+ grpc_closure_scheduler* scheduler);
#define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \
grpc_closure_init(closure, cb, cb_arg, scheduler)
#endif
/* Create a heap allocated closure: try to avoid except for very rare events */
#ifndef NDEBUG
-grpc_closure *grpc_closure_create(const char *file, int line,
- grpc_iomgr_cb_func cb, void *cb_arg,
- grpc_closure_scheduler *scheduler);
+grpc_closure* grpc_closure_create(const char* file, int line,
+ grpc_iomgr_cb_func cb, void* cb_arg,
+ grpc_closure_scheduler* scheduler);
#define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \
grpc_closure_create(__FILE__, __LINE__, cb, cb_arg, scheduler)
#else
-grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
- grpc_closure_scheduler *scheduler);
+grpc_closure* grpc_closure_create(grpc_iomgr_cb_func cb, void* cb_arg,
+ grpc_closure_scheduler* scheduler);
#define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \
grpc_closure_create(cb, cb_arg, scheduler)
#endif
@@ -140,20 +140,20 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }
-void grpc_closure_list_init(grpc_closure_list *list);
+void grpc_closure_list_init(grpc_closure_list* list);
/** add \a closure to the end of \a list
and set \a closure's result to \a error
Returns true if \a list becomes non-empty */
-bool grpc_closure_list_append(grpc_closure_list *list, grpc_closure *closure,
- grpc_error *error);
+bool grpc_closure_list_append(grpc_closure_list* list, grpc_closure* closure,
+ grpc_error* error);
/** force all success bits in \a list to false */
-void grpc_closure_list_fail_all(grpc_closure_list *list,
- grpc_error *forced_failure);
+void grpc_closure_list_fail_all(grpc_closure_list* list,
+ grpc_error* forced_failure);
/** append all closures from \a src to \a dst and empty \a src. */
-void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst);
+void grpc_closure_list_move(grpc_closure_list* src, grpc_closure_list* dst);
/** return whether \a list is empty. */
bool grpc_closure_list_empty(grpc_closure_list list);
@@ -162,26 +162,26 @@ bool grpc_closure_list_empty(grpc_closure_list list);
* Note that calling this at the end of a closure callback function itself is
* by definition safe. */
#ifndef NDEBUG
-void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_error *error);
+void grpc_closure_run(const char* file, int line, grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_error* error);
#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error)
#else
-void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
+void grpc_closure_run(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
grpc_closure_run(exec_ctx, closure, error)
#endif
/** Schedule a closure to be run. Does not need to be run from a safe point. */
#ifndef NDEBUG
-void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_error *error);
+void grpc_closure_sched(const char* file, int line, grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_error* error);
#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error)
#else
-void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
+void grpc_closure_sched(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
grpc_closure_sched(exec_ctx, closure, error)
#endif
@@ -189,14 +189,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
/** Schedule all closures in a list to be run. Does not need to be run from a
* safe point. */
#ifndef NDEBUG
-void grpc_closure_list_sched(const char *file, int line,
- grpc_exec_ctx *exec_ctx,
- grpc_closure_list *closure_list);
+void grpc_closure_list_sched(const char* file, int line,
+ grpc_exec_ctx* exec_ctx,
+ grpc_closure_list* closure_list);
#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list)
#else
-void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
- grpc_closure_list *closure_list);
+void grpc_closure_list_sched(grpc_exec_ctx* exec_ctx,
+ grpc_closure_list* closure_list);
#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
grpc_closure_list_sched(exec_ctx, closure_list)
#endif
diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc
index 53f4b7eaa7..ca9c00b935 100644
--- a/src/core/lib/iomgr/combiner.cc
+++ b/src/core/lib/iomgr/combiner.cc
@@ -43,7 +43,7 @@ grpc_tracer_flag grpc_combiner_trace =
#define STATE_ELEM_COUNT_LOW_BIT 2
struct grpc_combiner {
- grpc_combiner *next_combiner_on_this_exec_ctx;
+ grpc_combiner* next_combiner_on_this_exec_ctx;
grpc_closure_scheduler scheduler;
grpc_closure_scheduler finally_scheduler;
gpr_mpscq queue;
@@ -62,20 +62,20 @@ struct grpc_combiner {
gpr_refcount refs;
};
-static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error);
-static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_error *error);
+static void combiner_exec(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error);
+static void combiner_finally_exec(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_error* error);
static const grpc_closure_scheduler_vtable scheduler = {
combiner_exec, combiner_exec, "combiner:immediately"};
static const grpc_closure_scheduler_vtable finally_scheduler = {
combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
-static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
+static void offload(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error);
-grpc_combiner *grpc_combiner_create(void) {
- grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock));
+grpc_combiner* grpc_combiner_create(void) {
+ grpc_combiner* lock = (grpc_combiner*)gpr_zalloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
@@ -88,14 +88,14 @@ grpc_combiner *grpc_combiner_create(void) {
return lock;
}
-static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+static void really_destroy(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue);
gpr_free(lock);
}
-static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+static void start_destroy(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
@@ -116,22 +116,22 @@ static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
#define GRPC_COMBINER_DEBUG_SPAM(op, delta)
#endif
-void grpc_combiner_unref(grpc_exec_ctx *exec_ctx,
- grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) {
+void grpc_combiner_unref(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
GRPC_COMBINER_DEBUG_SPAM("UNREF", -1);
if (gpr_unref(&lock->refs)) {
start_destroy(exec_ctx, lock);
}
}
-grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) {
+grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
GRPC_COMBINER_DEBUG_SPAM(" REF", 1);
gpr_ref(&lock->refs);
return lock;
}
-static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
- grpc_combiner *lock) {
+static void push_last_on_exec_ctx(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock) {
lock->next_combiner_on_this_exec_ctx = NULL;
if (exec_ctx->active_combiner == NULL) {
exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
@@ -141,8 +141,8 @@ static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
}
}
-static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
- grpc_combiner *lock) {
+static void push_first_on_exec_ctx(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock) {
lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
exec_ctx->active_combiner = lock;
if (lock->next_combiner_on_this_exec_ctx == NULL) {
@@ -151,14 +151,14 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
}
#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
- ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
- offsetof(grpc_combiner, scheduler_name)))
+ ((grpc_combiner*)(((char*)((closure)->scheduler)) - \
+ offsetof(grpc_combiner, scheduler_name)))
-static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
- grpc_error *error) {
+static void combiner_exec(grpc_exec_ctx* exec_ctx, grpc_closure* cl,
+ grpc_error* error) {
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
GPR_TIMER_BEGIN("combiner.execute", 0);
- grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
+ grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
@@ -187,7 +187,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
GPR_TIMER_END("combiner.execute", 0);
}
-static void move_next(grpc_exec_ctx *exec_ctx) {
+static void move_next(grpc_exec_ctx* exec_ctx) {
exec_ctx->active_combiner =
exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
if (exec_ctx->active_combiner == NULL) {
@@ -195,21 +195,21 @@ static void move_next(grpc_exec_ctx *exec_ctx) {
}
}
-static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_combiner *lock = (grpc_combiner *)arg;
+static void offload(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_combiner* lock = (grpc_combiner*)arg;
push_last_on_exec_ctx(exec_ctx, lock);
}
-static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
+static void queue_offload(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx);
move_next(exec_ctx);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
}
-bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx* exec_ctx) {
GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
- grpc_combiner *lock = exec_ctx->active_combiner;
+ grpc_combiner* lock = exec_ctx->active_combiner;
if (lock == NULL) {
GPR_TIMER_END("combiner.continue_exec_ctx", 0);
return false;
@@ -241,7 +241,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
// peek to see if something new has shown up, and execute that with
// priority
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
- gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
+ gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
if (n == NULL) {
@@ -253,8 +253,8 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
return true;
}
GPR_TIMER_BEGIN("combiner.exec1", 0);
- grpc_closure *cl = (grpc_closure *)n;
- grpc_error *cl_err = cl->error_data.error;
+ grpc_closure* cl = (grpc_closure*)n;
+ grpc_error* cl_err = cl->error_data.error;
#ifndef NDEBUG
cl->scheduled = false;
#endif
@@ -262,7 +262,7 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GRPC_ERROR_UNREF(cl_err);
GPR_TIMER_END("combiner.exec1", 0);
} else {
- grpc_closure *c = lock->final_list.head;
+ grpc_closure* c = lock->final_list.head;
GPR_ASSERT(c != NULL);
grpc_closure_list_init(&lock->final_list);
int loops = 0;
@@ -270,8 +270,8 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
GPR_TIMER_BEGIN("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
+ grpc_closure* next = c->next_data.next;
+ grpc_error* error = c->error_data.error;
#ifndef NDEBUG
c->scheduled = false;
#endif
@@ -327,13 +327,13 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
return true;
}
-static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
- grpc_error *error);
+static void enqueue_finally(grpc_exec_ctx* exec_ctx, void* closure,
+ grpc_error* error);
-static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_error *error) {
+static void combiner_finally_exec(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_error* error) {
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx);
- grpc_combiner *lock =
+ grpc_combiner* lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
"C:%p grpc_combiner_execute_finally c=%p; ac=%p",
@@ -356,17 +356,17 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("combiner.execute_finally", 0);
}
-static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
- grpc_error *error) {
- combiner_finally_exec(exec_ctx, (grpc_closure *)closure,
+static void enqueue_finally(grpc_exec_ctx* exec_ctx, void* closure,
+ grpc_error* error) {
+ combiner_finally_exec(exec_ctx, (grpc_closure*)closure,
GRPC_ERROR_REF(error));
}
-grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
+grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) {
return &combiner->scheduler;
}
-grpc_closure_scheduler *grpc_combiner_finally_scheduler(
- grpc_combiner *combiner) {
+grpc_closure_scheduler* grpc_combiner_finally_scheduler(
+ grpc_combiner* combiner) {
return &combiner->finally_scheduler;
}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index 10e5fb480d..f8a8b9df62 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -37,7 +37,7 @@ extern "C" {
// Initialize the lock, with an optional workqueue to shift load to when
// necessary
-grpc_combiner *grpc_combiner_create(void);
+grpc_combiner* grpc_combiner_create(void);
#ifndef NDEBUG
#define GRPC_COMBINER_DEBUG_ARGS \
@@ -55,15 +55,15 @@ grpc_combiner *grpc_combiner_create(void);
// Ref/unref the lock, for when we're sharing the lock ownership
// Prefer to use the macros above
-grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
-void grpc_combiner_unref(grpc_exec_ctx *exec_ctx,
- grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS);
+grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
+void grpc_combiner_unref(grpc_exec_ctx* exec_ctx,
+ grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
// Fetch a scheduler to schedule closures against
-grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock);
+grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* lock);
// Scheduler to execute \a action within the lock just prior to unlocking.
-grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock);
+grpc_closure_scheduler* grpc_combiner_finally_scheduler(grpc_combiner* lock);
-bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
+bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx* exec_ctx);
extern grpc_tracer_flag grpc_combiner_trace;
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 92964e0f2d..1b0a9e725e 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -37,21 +37,21 @@ typedef struct grpc_endpoint grpc_endpoint;
typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
struct grpc_endpoint_vtable {
- void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb);
- void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb);
- void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset *pollset);
- void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset_set *pollset);
- void (*delete_from_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset_set *pollset);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why);
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
- grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep);
- char *(*get_peer)(grpc_endpoint *ep);
- int (*get_fd)(grpc_endpoint *ep);
+ void (*read)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
+ void (*write)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
+ void (*add_to_pollset)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset);
+ void (*add_to_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset);
+ void (*delete_from_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, grpc_error* why);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep);
+ grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep);
+ char* (*get_peer)(grpc_endpoint* ep);
+ int (*get_fd)(grpc_endpoint* ep);
};
/* When data is available on the connection, calls the callback with slices.
@@ -59,14 +59,14 @@ struct grpc_endpoint_vtable {
indicates the endpoint is closed.
Valid slices may be placed into \a slices even when the callback is
invoked with error != GRPC_ERROR_NONE. */
-void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb);
+void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
-char *grpc_endpoint_get_peer(grpc_endpoint *ep);
+char* grpc_endpoint_get_peer(grpc_endpoint* ep);
/* Get the file descriptor used by \a ep. Return -1 if \a ep is not using an fd.
- */
-int grpc_endpoint_get_fd(grpc_endpoint *ep);
+ */
+int grpc_endpoint_get_fd(grpc_endpoint* ep);
/* Write slices out to the socket.
@@ -78,32 +78,32 @@ int grpc_endpoint_get_fd(grpc_endpoint *ep);
No guarantee is made to the content of slices after a write EXCEPT that
it is a valid slice buffer.
*/
-void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb);
+void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb);
/* Causes any pending and future read/write callbacks to run immediately with
success==0 */
-void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_error *why);
-void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep);
+void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why);
+void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep);
/* Add an endpoint to a pollset or pollset_set, so that when the pollset is
polled, events from this endpoint are considered */
-void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset *pollset);
-void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *ep,
- grpc_pollset_set *pollset_set);
+void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset);
+void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set);
/* Delete an endpoint from a pollset_set */
-void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *ep,
- grpc_pollset_set *pollset_set);
+void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set);
-grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint);
+grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* endpoint);
struct grpc_endpoint {
- const grpc_endpoint_vtable *vtable;
+ const grpc_endpoint_vtable* vtable;
};
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index ee91795749..219eea8550 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -26,12 +26,12 @@ extern "C" {
#endif
typedef struct {
- grpc_endpoint *client;
- grpc_endpoint *server;
+ grpc_endpoint* client;
+ grpc_endpoint* server;
} grpc_endpoint_pair;
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- grpc_channel_args *args);
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
+ grpc_channel_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.cc b/src/core/lib/iomgr/endpoint_pair_posix.cc
index 3ade2148ba..f5f59f9917 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.cc
+++ b/src/core/lib/iomgr/endpoint_pair_posix.cc
@@ -47,11 +47,11 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE);
}
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- grpc_channel_args *args) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
+ grpc_channel_args* args) {
int sv[2];
grpc_endpoint_pair p;
- char *final_name;
+ char* final_name;
create_sockets(sv);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
diff --git a/src/core/lib/iomgr/endpoint_pair_uv.cc b/src/core/lib/iomgr/endpoint_pair_uv.cc
index ff72fe0492..128a947d1b 100644
--- a/src/core/lib/iomgr/endpoint_pair_uv.cc
+++ b/src/core/lib/iomgr/endpoint_pair_uv.cc
@@ -26,8 +26,8 @@
#include "src/core/lib/iomgr/endpoint_pair.h"
-grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
- grpc_channel_args *args) {
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
+ grpc_channel_args* args) {
grpc_endpoint_pair endpoint_pair;
// TODO(mlumish): implement this properly under libuv
GPR_ASSERT(false &&
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.cc b/src/core/lib/iomgr/endpoint_pair_windows.cc
index 782fa2fd69..afa995a1c7 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.cc
+++ b/src/core/lib/iomgr/endpoint_pair_windows.cc
@@ -44,19 +44,19 @@ static void create_sockets(SOCKET sv[2]) {
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_family = AF_INET;
- GPR_ASSERT(bind(lst_sock, (struct sockaddr *)&addr, sizeof(addr)) !=
+ GPR_ASSERT(bind(lst_sock, (struct sockaddr*)&addr, sizeof(addr)) !=
SOCKET_ERROR);
GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR);
- GPR_ASSERT(getsockname(lst_sock, (struct sockaddr *)&addr, &addr_len) !=
+ GPR_ASSERT(getsockname(lst_sock, (struct sockaddr*)&addr, &addr_len) !=
SOCKET_ERROR);
cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0,
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET);
- GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL,
- NULL, NULL, NULL) == 0);
- svr_sock = accept(lst_sock, (struct sockaddr *)&addr, &addr_len);
+ GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL,
+ NULL, NULL) == 0);
+ svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET);
closesocket(lst_sock);
@@ -68,7 +68,7 @@ static void create_sockets(SOCKET sv[2]) {
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
- const char *name, grpc_channel_args *channel_args) {
+ const char* name, grpc_channel_args* channel_args) {
SOCKET sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc
index 2ea6cf1301..123ff72851 100644
--- a/src/core/lib/iomgr/error.cc
+++ b/src/core/lib/iomgr/error.cc
@@ -42,7 +42,7 @@ grpc_tracer_flag grpc_trace_error_refcount =
GRPC_TRACER_INITIALIZER(false, "error_refcount");
#endif
-static const char *error_int_name(grpc_error_ints key) {
+static const char* error_int_name(grpc_error_ints key) {
switch (key) {
case GRPC_ERROR_INT_ERRNO:
return "errno";
@@ -80,7 +80,7 @@ static const char *error_int_name(grpc_error_ints key) {
GPR_UNREACHABLE_CODE(return "unknown");
}
-static const char *error_str_name(grpc_error_strs key) {
+static const char* error_str_name(grpc_error_strs key) {
switch (key) {
case GRPC_ERROR_STR_KEY:
return "key";
@@ -112,7 +112,7 @@ static const char *error_str_name(grpc_error_strs key) {
GPR_UNREACHABLE_CODE(return "unknown");
}
-static const char *error_time_name(grpc_error_times key) {
+static const char* error_time_name(grpc_error_times key) {
switch (key) {
case GRPC_ERROR_TIME_CREATED:
return "created";
@@ -122,13 +122,13 @@ static const char *error_time_name(grpc_error_times key) {
GPR_UNREACHABLE_CODE(return "unknown");
}
-bool grpc_error_is_special(grpc_error *err) {
+bool grpc_error_is_special(grpc_error* err) {
return err == GRPC_ERROR_NONE || err == GRPC_ERROR_OOM ||
err == GRPC_ERROR_CANCELLED;
}
#ifndef NDEBUG
-grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) {
+grpc_error* grpc_error_ref(grpc_error* err, const char* file, int line) {
if (grpc_error_is_special(err)) return err;
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
@@ -139,17 +139,17 @@ grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) {
return err;
}
#else
-grpc_error *grpc_error_ref(grpc_error *err) {
+grpc_error* grpc_error_ref(grpc_error* err) {
if (grpc_error_is_special(err)) return err;
gpr_ref(&err->atomics.refs);
return err;
}
#endif
-static void unref_errs(grpc_error *err) {
+static void unref_errs(grpc_error* err) {
uint8_t slot = err->first_err;
while (slot != UINT8_MAX) {
- grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot);
+ grpc_linked_error* lerr = (grpc_linked_error*)(err->arena + slot);
GRPC_ERROR_UNREF(lerr->err);
GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX
: lerr->next != UINT8_MAX);
@@ -163,25 +163,25 @@ static void unref_slice(grpc_slice slice) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static void unref_strs(grpc_error *err) {
+static void unref_strs(grpc_error* err) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- unref_slice(*(grpc_slice *)(err->arena + slot));
+ unref_slice(*(grpc_slice*)(err->arena + slot));
}
}
}
-static void error_destroy(grpc_error *err) {
+static void error_destroy(grpc_error* err) {
GPR_ASSERT(!grpc_error_is_special(err));
unref_errs(err);
unref_strs(err);
- gpr_free((void *)gpr_atm_acq_load(&err->atomics.error_string));
+ gpr_free((void*)gpr_atm_acq_load(&err->atomics.error_string));
gpr_free(err);
}
#ifndef NDEBUG
-void grpc_error_unref(grpc_error *err, const char *file, int line) {
+void grpc_error_unref(grpc_error* err, const char* file, int line) {
if (grpc_error_is_special(err)) return;
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
@@ -193,7 +193,7 @@ void grpc_error_unref(grpc_error *err, const char *file, int line) {
}
}
#else
-void grpc_error_unref(grpc_error *err) {
+void grpc_error_unref(grpc_error* err) {
if (grpc_error_is_special(err)) return;
if (gpr_unref(&err->atomics.refs)) {
error_destroy(err);
@@ -201,7 +201,7 @@ void grpc_error_unref(grpc_error *err) {
}
#endif
-static uint8_t get_placement(grpc_error **err, size_t size) {
+static uint8_t get_placement(grpc_error** err, size_t size) {
GPR_ASSERT(*err);
uint8_t slots = (uint8_t)(size / sizeof(intptr_t));
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
@@ -211,9 +211,9 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
return UINT8_MAX;
}
#ifndef NDEBUG
- grpc_error *orig = *err;
+ grpc_error* orig = *err;
#endif
- *err = (grpc_error *)gpr_realloc(
+ *err = (grpc_error*)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@@ -228,7 +228,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
return placement;
}
-static void internal_set_int(grpc_error **err, grpc_error_ints which,
+static void internal_set_int(grpc_error** err, grpc_error_ints which,
intptr_t value) {
uint8_t slot = (*err)->ints[which];
if (slot == UINT8_MAX) {
@@ -243,36 +243,36 @@ static void internal_set_int(grpc_error **err, grpc_error_ints which,
(*err)->arena[slot] = value;
}
-static void internal_set_str(grpc_error **err, grpc_error_strs which,
+static void internal_set_str(grpc_error** err, grpc_error_strs which,
grpc_slice value) {
uint8_t slot = (*err)->strs[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
if (slot == UINT8_MAX) {
- const char *str = grpc_slice_to_c_string(value);
+ const char* str = grpc_slice_to_c_string(value);
gpr_log(GPR_ERROR, "Error %p is full, dropping string {\"%s\":\"%s\"}",
*err, error_str_name(which), str);
- gpr_free((void *)str);
+ gpr_free((void*)str);
return;
}
} else {
- unref_slice(*(grpc_slice *)((*err)->arena + slot));
+ unref_slice(*(grpc_slice*)((*err)->arena + slot));
}
(*err)->strs[which] = slot;
memcpy((*err)->arena + slot, &value, sizeof(value));
}
-static char *fmt_time(gpr_timespec tm);
-static void internal_set_time(grpc_error **err, grpc_error_times which,
+static char* fmt_time(gpr_timespec tm);
+static void internal_set_time(grpc_error** err, grpc_error_times which,
gpr_timespec value) {
uint8_t slot = (*err)->times[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
if (slot == UINT8_MAX) {
- const char *time_str = fmt_time(value);
+ const char* time_str = fmt_time(value);
gpr_log(GPR_ERROR, "Error %p is full, dropping \"%s\":\"%s\"}", *err,
error_time_name(which), time_str);
- gpr_free((void *)time_str);
+ gpr_free((void*)time_str);
return;
}
}
@@ -280,7 +280,7 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
memcpy((*err)->arena + slot, &value, sizeof(value));
}
-static void internal_add_error(grpc_error **err, grpc_error *new_err) {
+static void internal_add_error(grpc_error** err, grpc_error* new_err) {
grpc_linked_error new_last = {new_err, UINT8_MAX};
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
if (slot == UINT8_MAX) {
@@ -295,8 +295,8 @@ static void internal_add_error(grpc_error **err, grpc_error *new_err) {
(*err)->first_err = slot;
} else {
GPR_ASSERT((*err)->last_err != UINT8_MAX);
- grpc_linked_error *old_last =
- (grpc_linked_error *)((*err)->arena + (*err)->last_err);
+ grpc_linked_error* old_last =
+ (grpc_linked_error*)((*err)->arena + (*err)->last_err);
old_last->next = slot;
(*err)->last_err = slot;
}
@@ -316,14 +316,14 @@ static void internal_add_error(grpc_error **err, grpc_error *new_err) {
// It is very common to include and extra int and string in an error
#define SURPLUS_CAPACITY (2 * SLOTS_PER_INT + SLOTS_PER_TIME)
-grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
- grpc_error **referencing,
+grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
+ grpc_error** referencing,
size_t num_referencing) {
GPR_TIMER_BEGIN("grpc_error_create", 0);
uint8_t initial_arena_capacity = (uint8_t)(
DEFAULT_ERROR_CAPACITY +
(uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
- grpc_error *err = (grpc_error *)gpr_malloc(
+ grpc_error* err = (grpc_error*)gpr_malloc(
sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
return GRPC_ERROR_OOM;
@@ -364,27 +364,27 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
return err;
}
-static void ref_strs(grpc_error *err) {
+static void ref_strs(grpc_error* err) {
for (size_t i = 0; i < GRPC_ERROR_STR_MAX; ++i) {
uint8_t slot = err->strs[i];
if (slot != UINT8_MAX) {
- grpc_slice_ref_internal(*(grpc_slice *)(err->arena + slot));
+ grpc_slice_ref_internal(*(grpc_slice*)(err->arena + slot));
}
}
}
-static void ref_errs(grpc_error *err) {
+static void ref_errs(grpc_error* err) {
uint8_t slot = err->first_err;
while (slot != UINT8_MAX) {
- grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot);
+ grpc_linked_error* lerr = (grpc_linked_error*)(err->arena + slot);
GRPC_ERROR_REF(lerr->err);
slot = lerr->next;
}
}
-static grpc_error *copy_error_and_unref(grpc_error *in) {
+static grpc_error* copy_error_and_unref(grpc_error* in) {
GPR_TIMER_BEGIN("copy_error_and_unref", 0);
- grpc_error *out;
+ grpc_error* out;
if (grpc_error_is_special(in)) {
out = GRPC_ERROR_CREATE_FROM_STATIC_STRING("unknown");
if (in == GRPC_ERROR_NONE) {
@@ -408,8 +408,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
- out = (grpc_error *)gpr_malloc(sizeof(*in) +
- new_arena_capacity * sizeof(intptr_t));
+ out = (grpc_error*)gpr_malloc(sizeof(*in) +
+ new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@@ -417,7 +417,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
#endif
// bulk memcpy of the rest of the struct.
size_t skip = sizeof(&out->atomics);
- memcpy((void *)((uintptr_t)out + skip), (void *)((uintptr_t)in + skip),
+ memcpy((void*)((uintptr_t)out + skip), (void*)((uintptr_t)in + skip),
sizeof(*in) + (in->arena_size * sizeof(intptr_t)) - skip);
// manually set the atomics and the new capacity
gpr_atm_no_barrier_store(&out->atomics.error_string, 0);
@@ -431,19 +431,19 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
return out;
}
-grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
+grpc_error* grpc_error_set_int(grpc_error* src, grpc_error_ints which,
intptr_t value) {
GPR_TIMER_BEGIN("grpc_error_set_int", 0);
- grpc_error *new_err = copy_error_and_unref(src);
+ grpc_error* new_err = copy_error_and_unref(src);
internal_set_int(&new_err, which, value);
GPR_TIMER_END("grpc_error_set_int", 0);
return new_err;
}
typedef struct {
- grpc_error *error;
+ grpc_error* error;
grpc_status_code code;
- const char *msg;
+ const char* msg;
} special_error_status_map;
static special_error_status_map error_status_map[] = {
{GRPC_ERROR_NONE, GRPC_STATUS_OK, ""},
@@ -451,7 +451,7 @@ static special_error_status_map error_status_map[] = {
{GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"},
};
-bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
+bool grpc_error_get_int(grpc_error* err, grpc_error_ints which, intptr_t* p) {
GPR_TIMER_BEGIN("grpc_error_get_int", 0);
if (grpc_error_is_special(err)) {
if (which == GRPC_ERROR_INT_GRPC_STATUS) {
@@ -476,17 +476,17 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
return false;
}
-grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
+grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
grpc_slice str) {
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
- grpc_error *new_err = copy_error_and_unref(src);
+ grpc_error* new_err = copy_error_and_unref(src);
internal_set_str(&new_err, which, str);
GPR_TIMER_END("grpc_error_set_str", 0);
return new_err;
}
-bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
- grpc_slice *str) {
+bool grpc_error_get_str(grpc_error* err, grpc_error_strs which,
+ grpc_slice* str) {
if (grpc_error_is_special(err)) {
if (which == GRPC_ERROR_STR_GRPC_MESSAGE) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) {
@@ -500,53 +500,53 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
}
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- *str = *(grpc_slice *)(err->arena + slot);
+ *str = *(grpc_slice*)(err->arena + slot);
return true;
} else {
return false;
}
}
-grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
+grpc_error* grpc_error_add_child(grpc_error* src, grpc_error* child) {
GPR_TIMER_BEGIN("grpc_error_add_child", 0);
- grpc_error *new_err = copy_error_and_unref(src);
+ grpc_error* new_err = copy_error_and_unref(src);
internal_add_error(&new_err, child);
GPR_TIMER_END("grpc_error_add_child", 0);
return new_err;
}
-static const char *no_error_string = "\"No Error\"";
-static const char *oom_error_string = "\"Out of memory\"";
-static const char *cancelled_error_string = "\"Cancelled\"";
+static const char* no_error_string = "\"No Error\"";
+static const char* oom_error_string = "\"Out of memory\"";
+static const char* cancelled_error_string = "\"Cancelled\"";
typedef struct {
- char *key;
- char *value;
+ char* key;
+ char* value;
} kv_pair;
typedef struct {
- kv_pair *kvs;
+ kv_pair* kvs;
size_t num_kvs;
size_t cap_kvs;
} kv_pairs;
-static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
+static void append_chr(char c, char** s, size_t* sz, size_t* cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
- *s = (char *)gpr_realloc(*s, *cap);
+ *s = (char*)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
-static void append_str(const char *str, char **s, size_t *sz, size_t *cap) {
- for (const char *c = str; *c; c++) {
+static void append_str(const char* str, char** s, size_t* sz, size_t* cap) {
+ for (const char* c = str; *c; c++) {
append_chr(*c, s, sz, cap);
}
}
-static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
- size_t *cap) {
- static const char *hex = "0123456789abcdef";
+static void append_esc_str(const uint8_t* str, size_t len, char** s, size_t* sz,
+ size_t* cap) {
+ static const char* hex = "0123456789abcdef";
append_chr('"', s, sz, cap);
for (size_t i = 0; i < len; i++, str++) {
if (*str < 32 || *str >= 127) {
@@ -582,28 +582,28 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
append_chr('"', s, sz, cap);
}
-static void append_kv(kv_pairs *kvs, char *key, char *value) {
+static void append_kv(kv_pairs* kvs, char* key, char* value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
kvs->kvs =
- (kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
+ (kv_pair*)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
kvs->num_kvs++;
}
-static char *key_int(grpc_error_ints which) {
+static char* key_int(grpc_error_ints which) {
return gpr_strdup(error_int_name(which));
}
-static char *fmt_int(intptr_t p) {
- char *s;
+static char* fmt_int(intptr_t p) {
+ char* s;
gpr_asprintf(&s, "%" PRIdPTR, p);
return s;
}
-static void collect_ints_kvs(grpc_error *err, kv_pairs *kvs) {
+static void collect_ints_kvs(grpc_error* err, kv_pairs* kvs) {
for (size_t which = 0; which < GRPC_ERROR_INT_MAX; ++which) {
uint8_t slot = err->ints[which];
if (slot != UINT8_MAX) {
@@ -613,37 +613,37 @@ static void collect_ints_kvs(grpc_error *err, kv_pairs *kvs) {
}
}
-static char *key_str(grpc_error_strs which) {
+static char* key_str(grpc_error_strs which) {
return gpr_strdup(error_str_name(which));
}
-static char *fmt_str(grpc_slice slice) {
- char *s = NULL;
+static char* fmt_str(grpc_slice slice) {
+ char* s = NULL;
size_t sz = 0;
size_t cap = 0;
- append_esc_str((const uint8_t *)GRPC_SLICE_START_PTR(slice),
+ append_esc_str((const uint8_t*)GRPC_SLICE_START_PTR(slice),
GRPC_SLICE_LENGTH(slice), &s, &sz, &cap);
append_chr(0, &s, &sz, &cap);
return s;
}
-static void collect_strs_kvs(grpc_error *err, kv_pairs *kvs) {
+static void collect_strs_kvs(grpc_error* err, kv_pairs* kvs) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
append_kv(kvs, key_str((grpc_error_strs)which),
- fmt_str(*(grpc_slice *)(err->arena + slot)));
+ fmt_str(*(grpc_slice*)(err->arena + slot)));
}
}
}
-static char *key_time(grpc_error_times which) {
+static char* key_time(grpc_error_times which) {
return gpr_strdup(error_time_name(which));
}
-static char *fmt_time(gpr_timespec tm) {
- char *out;
- const char *pfx = "!!";
+static char* fmt_time(gpr_timespec tm) {
+ char* out;
+ const char* pfx = "!!";
switch (tm.clock_type) {
case GPR_CLOCK_MONOTONIC:
pfx = "@monotonic:";
@@ -662,24 +662,24 @@ static char *fmt_time(gpr_timespec tm) {
return out;
}
-static void collect_times_kvs(grpc_error *err, kv_pairs *kvs) {
+static void collect_times_kvs(grpc_error* err, kv_pairs* kvs) {
for (size_t which = 0; which < GRPC_ERROR_TIME_MAX; ++which) {
uint8_t slot = err->times[which];
if (slot != UINT8_MAX) {
append_kv(kvs, key_time((grpc_error_times)which),
- fmt_time(*(gpr_timespec *)(err->arena + slot)));
+ fmt_time(*(gpr_timespec*)(err->arena + slot)));
}
}
}
-static void add_errs(grpc_error *err, char **s, size_t *sz, size_t *cap) {
+static void add_errs(grpc_error* err, char** s, size_t* sz, size_t* cap) {
uint8_t slot = err->first_err;
bool first = true;
while (slot != UINT8_MAX) {
- grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot);
+ grpc_linked_error* lerr = (grpc_linked_error*)(err->arena + slot);
if (!first) append_chr(',', s, sz, cap);
first = false;
- const char *e = grpc_error_string(lerr->err);
+ const char* e = grpc_error_string(lerr->err);
append_str(e, s, sz, cap);
GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX
: lerr->next != UINT8_MAX);
@@ -687,8 +687,8 @@ static void add_errs(grpc_error *err, char **s, size_t *sz, size_t *cap) {
}
}
-static char *errs_string(grpc_error *err) {
- char *s = NULL;
+static char* errs_string(grpc_error* err) {
+ char* s = NULL;
size_t sz = 0;
size_t cap = 0;
append_chr('[', &s, &sz, &cap);
@@ -698,22 +698,22 @@ static char *errs_string(grpc_error *err) {
return s;
}
-static int cmp_kvs(const void *a, const void *b) {
- const kv_pair *ka = (const kv_pair *)a;
- const kv_pair *kb = (const kv_pair *)b;
+static int cmp_kvs(const void* a, const void* b) {
+ const kv_pair* ka = (const kv_pair*)a;
+ const kv_pair* kb = (const kv_pair*)b;
return strcmp(ka->key, kb->key);
}
-static char *finish_kvs(kv_pairs *kvs) {
- char *s = NULL;
+static char* finish_kvs(kv_pairs* kvs) {
+ char* s = NULL;
size_t sz = 0;
size_t cap = 0;
append_chr('{', &s, &sz, &cap);
for (size_t i = 0; i < kvs->num_kvs; i++) {
if (i != 0) append_chr(',', &s, &sz, &cap);
- append_esc_str((const uint8_t *)kvs->kvs[i].key, strlen(kvs->kvs[i].key),
- &s, &sz, &cap);
+ append_esc_str((const uint8_t*)kvs->kvs[i].key, strlen(kvs->kvs[i].key), &s,
+ &sz, &cap);
gpr_free(kvs->kvs[i].key);
append_chr(':', &s, &sz, &cap);
append_str(kvs->kvs[i].value, &s, &sz, &cap);
@@ -726,16 +726,16 @@ static char *finish_kvs(kv_pairs *kvs) {
return s;
}
-const char *grpc_error_string(grpc_error *err) {
+const char* grpc_error_string(grpc_error* err) {
GPR_TIMER_BEGIN("grpc_error_string", 0);
if (err == GRPC_ERROR_NONE) return no_error_string;
if (err == GRPC_ERROR_OOM) return oom_error_string;
if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string;
- void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
+ void* p = (void*)gpr_atm_acq_load(&err->atomics.error_string);
if (p != NULL) {
GPR_TIMER_END("grpc_error_string", 0);
- return (const char *)p;
+ return (const char*)p;
}
kv_pairs kvs;
@@ -750,19 +750,19 @@ const char *grpc_error_string(grpc_error *err) {
qsort(kvs.kvs, kvs.num_kvs, sizeof(kv_pair), cmp_kvs);
- char *out = finish_kvs(&kvs);
+ char* out = finish_kvs(&kvs);
if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) {
gpr_free(out);
- out = (char *)gpr_atm_no_barrier_load(&err->atomics.error_string);
+ out = (char*)gpr_atm_no_barrier_load(&err->atomics.error_string);
}
GPR_TIMER_END("grpc_error_string", 0);
return out;
}
-grpc_error *grpc_os_error(const char *file, int line, int err,
- const char *call_name) {
+grpc_error* grpc_os_error(const char* file, int line, int err,
+ const char* call_name) {
return grpc_error_set_str(
grpc_error_set_str(
grpc_error_set_int(
@@ -776,10 +776,10 @@ grpc_error *grpc_os_error(const char *file, int line, int err,
}
#ifdef GPR_WINDOWS
-grpc_error *grpc_wsa_error(const char *file, int line, int err,
- const char *call_name) {
- char *utf8_message = gpr_format_message(err);
- grpc_error *error = grpc_error_set_str(
+grpc_error* grpc_wsa_error(const char* file, int line, int err,
+ const char* call_name) {
+ char* utf8_message = gpr_format_message(err);
+ grpc_error* error = grpc_error_set_str(
grpc_error_set_str(
grpc_error_set_int(
grpc_error_create(file, line,
@@ -793,10 +793,10 @@ grpc_error *grpc_wsa_error(const char *file, int line, int err,
}
#endif
-bool grpc_log_if_error(const char *what, grpc_error *error, const char *file,
+bool grpc_log_if_error(const char* what, grpc_error* error, const char* file,
int line) {
if (error == GRPC_ERROR_NONE) return true;
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "%s: %s", what, msg);
GRPC_ERROR_UNREF(error);
return false;
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index b36330a7ab..36ba440c8a 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -122,15 +122,15 @@ typedef enum {
/// They are always even so that other code (particularly combiner locks,
/// polling engines) can safely use the lower bit for themselves.
-#define GRPC_ERROR_NONE ((grpc_error *)NULL)
-#define GRPC_ERROR_OOM ((grpc_error *)2)
-#define GRPC_ERROR_CANCELLED ((grpc_error *)4)
+#define GRPC_ERROR_NONE ((grpc_error*)NULL)
+#define GRPC_ERROR_OOM ((grpc_error*)2)
+#define GRPC_ERROR_CANCELLED ((grpc_error*)4)
-const char *grpc_error_string(grpc_error *error);
+const char* grpc_error_string(grpc_error* error);
/// Create an error - but use GRPC_ERROR_CREATE instead
-grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
- grpc_error **referencing, size_t num_referencing);
+grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
+ grpc_error** referencing, size_t num_referencing);
/// Create an error (this is the preferred way of generating an error that is
/// not due to a system call - for system calls, use GRPC_OS_ERROR or
/// GRPC_WSA_ERROR as appropriate)
@@ -156,44 +156,44 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
errs, count)
#ifndef NDEBUG
-grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line);
-void grpc_error_unref(grpc_error *err, const char *file, int line);
+grpc_error* grpc_error_ref(grpc_error* err, const char* file, int line);
+void grpc_error_unref(grpc_error* err, const char* file, int line);
#define GRPC_ERROR_REF(err) grpc_error_ref(err, __FILE__, __LINE__)
#define GRPC_ERROR_UNREF(err) grpc_error_unref(err, __FILE__, __LINE__)
#else
-grpc_error *grpc_error_ref(grpc_error *err);
-void grpc_error_unref(grpc_error *err);
+grpc_error* grpc_error_ref(grpc_error* err);
+void grpc_error_unref(grpc_error* err);
#define GRPC_ERROR_REF(err) grpc_error_ref(err)
#define GRPC_ERROR_UNREF(err) grpc_error_unref(err)
#endif
-grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
+grpc_error* grpc_error_set_int(grpc_error* src, grpc_error_ints which,
intptr_t value) GRPC_MUST_USE_RESULT;
-bool grpc_error_get_int(grpc_error *error, grpc_error_ints which, intptr_t *p);
-grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
+bool grpc_error_get_int(grpc_error* error, grpc_error_ints which, intptr_t* p);
+grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
grpc_slice str) GRPC_MUST_USE_RESULT;
/// Returns false if the specified string is not set.
/// Caller does NOT own the slice.
-bool grpc_error_get_str(grpc_error *error, grpc_error_strs which,
- grpc_slice *s);
+bool grpc_error_get_str(grpc_error* error, grpc_error_strs which,
+ grpc_slice* s);
/// Add a child error: an error that is believed to have contributed to this
/// error occurring. Allows root causing high level errors from lower level
/// errors that contributed to them.
-grpc_error *grpc_error_add_child(grpc_error *src,
- grpc_error *child) GRPC_MUST_USE_RESULT;
-grpc_error *grpc_os_error(const char *file, int line, int err,
- const char *call_name) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_error_add_child(grpc_error* src,
+ grpc_error* child) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_os_error(const char* file, int line, int err,
+ const char* call_name) GRPC_MUST_USE_RESULT;
/// create an error associated with errno!=0 (an 'operating system' error)
#define GRPC_OS_ERROR(err, call_name) \
grpc_os_error(__FILE__, __LINE__, err, call_name)
-grpc_error *grpc_wsa_error(const char *file, int line, int err,
- const char *call_name) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_wsa_error(const char* file, int line, int err,
+ const char* call_name) GRPC_MUST_USE_RESULT;
/// windows only: create an error associated with WSAGetLastError()!=0
#define GRPC_WSA_ERROR(err, call_name) \
grpc_wsa_error(__FILE__, __LINE__, err, call_name)
-bool grpc_log_if_error(const char *what, grpc_error *error, const char *file,
+bool grpc_log_if_error(const char* what, grpc_error* error, const char* file,
int line);
#define GRPC_LOG_IF_ERROR(what, error) \
grpc_log_if_error((what), (error), __FILE__, __LINE__)
diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h
index 8746d5d353..acf6e04e9c 100644
--- a/src/core/lib/iomgr/error_internal.h
+++ b/src/core/lib/iomgr/error_internal.h
@@ -31,7 +31,7 @@ extern "C" {
typedef struct grpc_linked_error grpc_linked_error;
struct grpc_linked_error {
- grpc_error *err;
+ grpc_error* err;
uint8_t next;
};
@@ -59,7 +59,7 @@ struct grpc_error {
intptr_t arena[0];
};
-bool grpc_error_is_special(grpc_error *err);
+bool grpc_error_is_special(grpc_error* err);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 6126e2771c..504c659874 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -114,7 +114,7 @@ struct grpc_fd {
gpr_atm read_closure;
gpr_atm write_closure;
- struct grpc_fd *freelist_next;
+ struct grpc_fd* freelist_next;
/* The pollset that last noticed that the fd is readable. The actual type
* stored in this is (grpc_pollset *) */
@@ -132,7 +132,7 @@ static void fd_global_shutdown(void);
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
-static const char *kick_state_string(kick_state st) {
+static const char* kick_state_string(kick_state st) {
switch (st) {
case UNKICKED:
return "UNKICKED";
@@ -148,8 +148,8 @@ struct grpc_pollset_worker {
kick_state state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
- grpc_pollset_worker *next;
- grpc_pollset_worker *prev;
+ grpc_pollset_worker* next;
+ grpc_pollset_worker* prev;
gpr_cv cv;
grpc_closure_list schedule_on_end_work;
};
@@ -164,29 +164,29 @@ struct grpc_pollset_worker {
typedef struct pollset_neighborhood {
gpr_mu mu;
- grpc_pollset *active_root;
+ grpc_pollset* active_root;
char pad[GPR_CACHELINE_SIZE];
} pollset_neighborhood;
struct grpc_pollset {
gpr_mu mu;
- pollset_neighborhood *neighborhood;
+ pollset_neighborhood* neighborhood;
bool reassigning_neighborhood;
- grpc_pollset_worker *root_worker;
+ grpc_pollset_worker* root_worker;
bool kicked_without_poller;
/* Set to true if the pollset is observed to have no workers available to
poll */
bool seen_inactive;
bool shutting_down; /* Is the pollset shutting down ? */
- grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
+ grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
/* Number of workers who are *about-to* attach themselves to the pollset
* worker list */
int begin_refs;
- grpc_pollset *next;
- grpc_pollset *prev;
+ grpc_pollset* next;
+ grpc_pollset* prev;
};
/*******************************************************************************
@@ -201,8 +201,8 @@ struct grpc_pollset_set {
* Common helpers
*/
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
+static bool append_error(grpc_error** composite, grpc_error* error,
+ const char* desc) {
if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
@@ -233,7 +233,7 @@ static bool append_error(grpc_error **composite, grpc_error *error,
* alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
* case occurs. */
-static grpc_fd *fd_freelist = NULL;
+static grpc_fd* fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
@@ -242,15 +242,15 @@ static void fd_global_shutdown(void) {
gpr_mu_lock(&fd_freelist_mu);
gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
+ grpc_fd* fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
gpr_free(fd);
}
gpr_mu_destroy(&fd_freelist_mu);
}
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
+static grpc_fd* fd_create(int fd, const char* name) {
+ grpc_fd* new_fd = NULL;
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist != NULL) {
@@ -260,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@@ -270,7 +270,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
new_fd->freelist_next = NULL;
- char *fd_name;
+ char* fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifndef NDEBUG
@@ -290,13 +290,13 @@ static grpc_fd *fd_create(int fd, const char *name) {
return new_fd;
}
-static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
+static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
/* if 'releasing_fd' is true, it means that we are going to detach the internal
* fd from grpc_fd structure (i.e which means we should not be calling
* shutdown() syscall on that fd) */
-static void fd_shutdown_internal(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_error *why, bool releasing_fd) {
+static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_error* why, bool releasing_fd) {
if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
GRPC_ERROR_REF(why))) {
if (!releasing_fd) {
@@ -308,14 +308,14 @@ static void fd_shutdown_internal(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
fd_shutdown_internal(exec_ctx, fd, why, false);
}
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- grpc_error *error = GRPC_ERROR_NONE;
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
+ bool already_closed, const char* reason) {
+ grpc_error* error = GRPC_ERROR_NONE;
bool is_release_fd = (release_fd != NULL);
if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
@@ -344,34 +344,34 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_mu_unlock(&fd_freelist_mu);
}
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
+ return (grpc_pollset*)notifier;
}
-static bool fd_is_shutdown(grpc_fd *fd) {
+static bool fd_is_shutdown(grpc_fd* fd) {
return grpc_lfev_is_shutdown(&fd->read_closure);
}
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
+static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_pollset* notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
@@ -385,11 +385,11 @@ GPR_TLS_DECL(g_current_thread_worker);
/* The designated poller */
static gpr_atm g_active_poller;
-static pollset_neighborhood *g_neighborhoods;
+static pollset_neighborhood* g_neighborhoods;
static size_t g_num_neighborhoods;
/* Return true if first in list */
-static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
+static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
if (pollset->root_worker == NULL) {
pollset->root_worker = worker;
worker->next = worker->prev = worker;
@@ -406,8 +406,8 @@ static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
/* Return true if last in list */
typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
-static worker_remove_result worker_remove(grpc_pollset *pollset,
- grpc_pollset_worker *worker) {
+static worker_remove_result worker_remove(grpc_pollset* pollset,
+ grpc_pollset_worker* worker) {
if (worker == pollset->root_worker) {
if (worker == worker->next) {
pollset->root_worker = NULL;
@@ -429,12 +429,12 @@ static size_t choose_neighborhood(void) {
return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
}
-static grpc_error *pollset_global_init(void) {
+static grpc_error* pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
gpr_atm_no_barrier_store(&g_active_poller, 0);
global_wakeup_fd.read_fd = -1;
- grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
+ grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (err != GRPC_ERROR_NONE) return err;
struct epoll_event ev;
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
@@ -444,8 +444,8 @@ static grpc_error *pollset_global_init(void) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
- g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
- sizeof(*g_neighborhoods) * g_num_neighborhoods);
+ g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
+ g_num_neighborhoods);
for (size_t i = 0; i < g_num_neighborhoods; i++) {
gpr_mu_init(&g_neighborhoods[i].mu);
}
@@ -462,7 +462,7 @@ static void pollset_global_shutdown(void) {
gpr_free(g_neighborhoods);
}
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
@@ -476,10 +476,10 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->next = pollset->prev = NULL;
}
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
gpr_mu_lock(&pollset->mu);
if (!pollset->seen_inactive) {
- pollset_neighborhood *neighborhood = pollset->neighborhood;
+ pollset_neighborhood* neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
@@ -504,12 +504,12 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
-static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
+static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
GPR_TIMER_BEGIN("pollset_kick_all", 0);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (pollset->root_worker != NULL) {
- grpc_pollset_worker *worker = pollset->root_worker;
+ grpc_pollset_worker* worker = pollset->root_worker;
do {
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
switch (worker->state) {
@@ -540,8 +540,8 @@ static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
return error;
}
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
+static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
pollset->begin_refs == 0) {
GPR_TIMER_MARK("pollset_finish_shutdown", 0);
@@ -550,8 +550,8 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
}
}
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_TIMER_BEGIN("pollset_shutdown", 0);
GPR_ASSERT(pollset->shutdown_closure == NULL);
GPR_ASSERT(!pollset->shutting_down);
@@ -562,7 +562,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GPR_TIMER_END("pollset_shutdown", 0);
}
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
grpc_millis millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
@@ -583,10 +583,10 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
called by g_active_poller thread. So there is no need for synchronization
when accessing fields in g_epoll_set */
-static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- static const char *err_desc = "process_events";
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* process_epoll_events(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
+ static const char* err_desc = "process_events";
+ grpc_error* error = GRPC_ERROR_NONE;
GPR_TIMER_BEGIN("process_epoll_events", 0);
long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
@@ -595,14 +595,14 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
(idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
idx++) {
long c = cursor++;
- struct epoll_event *ev = &g_epoll_set.events[c];
- void *data_ptr = ev->data.ptr;
+ struct epoll_event* ev = &g_epoll_set.events[c];
+ void* data_ptr = ev->data.ptr;
if (data_ptr == &global_wakeup_fd) {
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
err_desc);
} else {
- grpc_fd *fd = (grpc_fd *)(data_ptr);
+ grpc_fd* fd = (grpc_fd*)(data_ptr);
bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
@@ -628,7 +628,7 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
(i.e the designated poller thread) will be calling this function. So there is
no need for any synchronization when accesing fields in g_epoll_set */
-static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
+static grpc_error* do_epoll_wait(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
grpc_millis deadline) {
GPR_TIMER_BEGIN("do_epoll_wait", 0);
@@ -661,9 +661,9 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return GRPC_ERROR_NONE;
}
-static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl,
+static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
GPR_TIMER_BEGIN("begin_worker", 0);
if (worker_hdl != NULL) *worker_hdl = worker;
@@ -685,7 +685,7 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->reassigning_neighborhood = true;
pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
}
- pollset_neighborhood *neighborhood = pollset->neighborhood;
+ pollset_neighborhood* neighborhood = pollset->neighborhood;
gpr_mu_unlock(&pollset->mu);
// pollset unlocked: state may change (even worker->kick_state)
retry_lock_neighborhood:
@@ -788,17 +788,17 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
static bool check_neighborhood_for_available_poller(
- grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
+ grpc_exec_ctx* exec_ctx, pollset_neighborhood* neighborhood) {
GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
- grpc_pollset *inspect = neighborhood->active_root;
+ grpc_pollset* inspect = neighborhood->active_root;
if (inspect == NULL) {
break;
}
gpr_mu_lock(&inspect->mu);
GPR_ASSERT(!inspect->seen_inactive);
- grpc_pollset_worker *inspect_worker = inspect->root_worker;
+ grpc_pollset_worker* inspect_worker = inspect->root_worker;
if (inspect_worker != NULL) {
do {
switch (inspect_worker->state) {
@@ -852,9 +852,9 @@ static bool check_neighborhood_for_available_poller(
return found_worker;
}
-static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl) {
+static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
+ grpc_pollset_worker** worker_hdl) {
GPR_TIMER_BEGIN("end_worker", 0);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
@@ -887,7 +887,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
bool found_worker = false;
bool scan_state[MAX_NEIGHBORHOODS];
for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
- pollset_neighborhood *neighborhood =
+ pollset_neighborhood* neighborhood =
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
if (gpr_mu_trylock(&neighborhood->mu)) {
@@ -901,7 +901,7 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
if (scan_state[i]) continue;
- pollset_neighborhood *neighborhood =
+ pollset_neighborhood* neighborhood =
&g_neighborhoods[(poller_neighborhood_idx + i) %
g_num_neighborhoods];
gpr_mu_lock(&neighborhood->mu);
@@ -934,12 +934,12 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
- grpc_pollset_worker **worker_hdl,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
grpc_pollset_worker worker;
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollset_work";
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* err_desc = "pollset_work";
GPR_TIMER_BEGIN("pollset_work", 0);
if (ps->kicked_without_poller) {
ps->kicked_without_poller = false;
@@ -987,19 +987,19 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
return error;
}
-static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker) {
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
- grpc_error *ret_err = GRPC_ERROR_NONE;
+ grpc_error* ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
gpr_strvec_init(&log);
- char *tmp;
- gpr_asprintf(
- &tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
- specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset),
- (void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
+ char* tmp;
+ gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
+ specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
+ (void*)gpr_tls_get(&g_current_thread_worker),
+ pollset->root_worker);
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
@@ -1021,7 +1021,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
- grpc_pollset_worker *root_worker = pollset->root_worker;
+ grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == NULL) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
pollset->kicked_without_poller = true;
@@ -1030,7 +1030,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
goto done;
}
- grpc_pollset_worker *next_worker = root_worker->next;
+ grpc_pollset_worker* next_worker = root_worker->next;
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
@@ -1048,7 +1048,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} else if (root_worker ==
next_worker && // only try and wake up a poller if
// there is no next worker
- root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
+ root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
@@ -1121,7 +1121,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
SET_KICK_STATE(specific_worker, KICKED);
goto done;
} else if (specific_worker ==
- (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
+ (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
@@ -1150,39 +1150,39 @@ done:
return ret_err;
}
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {}
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {}
/*******************************************************************************
* Pollset-set Definitions
*/
-static grpc_pollset_set *pollset_set_create(void) {
- return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
+static grpc_pollset_set* pollset_set_create(void) {
+ return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
}
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {}
+static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss) {}
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {}
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {}
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {}
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {}
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {}
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {}
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {}
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {}
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {}
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {}
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
/*******************************************************************************
* Event engine binding
@@ -1228,7 +1228,7 @@ static const grpc_event_engine_vtable vtable = {
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
* Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
* support is available */
-const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
+const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
return NULL;
}
@@ -1253,7 +1253,7 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
#include "src/core/lib/iomgr/ev_epoll1_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
-const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
+const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
return NULL;
}
#endif /* defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.h b/src/core/lib/iomgr/ev_epoll1_linux.h
index b437032b36..3e66747f6c 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.h
+++ b/src/core/lib/iomgr/ev_epoll1_linux.h
@@ -28,7 +28,7 @@ extern "C" {
// a polling engine that utilizes a singleton epoll set and turnstile polling
-const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request);
+const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 0809d574a9..aafdd690c7 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -86,21 +86,21 @@ struct pollable {
grpc_wakeup_fd wakeup;
// only for type fd... one ref to the owner fd
- grpc_fd *owner_fd;
+ grpc_fd* owner_fd;
- grpc_pollset_set *pollset_set;
- pollable *next;
- pollable *prev;
+ grpc_pollset_set* pollset_set;
+ pollable* next;
+ pollable* prev;
gpr_mu mu;
- grpc_pollset_worker *root_worker;
+ grpc_pollset_worker* root_worker;
int event_cursor;
int event_count;
struct epoll_event events[MAX_EPOLL_EVENTS];
};
-static const char *pollable_type_string(pollable_type t) {
+static const char* pollable_type_string(pollable_type t) {
switch (t) {
case PO_MULTI:
return "pollset";
@@ -112,8 +112,8 @@ static const char *pollable_type_string(pollable_type t) {
return "<invalid>";
}
-static char *pollable_desc(pollable *p) {
- char *out;
+static char* pollable_desc(pollable* p) {
+ char* out;
gpr_asprintf(&out, "type=%s epfd=%d wakeup=%d", pollable_type_string(p->type),
p->epfd, p->wakeup.read_fd);
return out;
@@ -121,17 +121,17 @@ static char *pollable_desc(pollable *p) {
/// Shared empty pollable - used by pollset to poll on until the first fd is
/// added
-static pollable *g_empty_pollable;
+static pollable* g_empty_pollable;
-static grpc_error *pollable_create(pollable_type type, pollable **p);
+static grpc_error* pollable_create(pollable_type type, pollable** p);
#ifdef NDEBUG
-static pollable *pollable_ref(pollable *p);
-static void pollable_unref(pollable *p);
+static pollable* pollable_ref(pollable* p);
+static void pollable_unref(pollable* p);
#define POLLABLE_REF(p, r) pollable_ref(p)
#define POLLABLE_UNREF(p, r) pollable_unref(p)
#else
-static pollable *pollable_ref(pollable *p, int line, const char *reason);
-static void pollable_unref(pollable *p, int line, const char *reason);
+static pollable* pollable_ref(pollable* p, int line, const char* reason);
+static void pollable_unref(pollable* p, int line, const char* reason);
#define POLLABLE_REF(p, r) pollable_ref((p), __LINE__, (r))
#define POLLABLE_UNREF(p, r) pollable_unref((p), __LINE__, (r))
#endif
@@ -151,13 +151,13 @@ struct grpc_fd {
gpr_mu orphan_mu;
gpr_mu pollable_mu;
- pollable *pollable_obj;
+ pollable* pollable_obj;
gpr_atm read_closure;
gpr_atm write_closure;
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
+ struct grpc_fd* freelist_next;
+ grpc_closure* on_done_closure;
/* The pollset that last noticed that the fd is readable. The actual type
* stored in this is (grpc_pollset *) */
@@ -174,8 +174,8 @@ static void fd_global_shutdown(void);
*/
typedef struct {
- grpc_pollset_worker *next;
- grpc_pollset_worker *prev;
+ grpc_pollset_worker* next;
+ grpc_pollset_worker* prev;
} pwlink;
typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
@@ -188,18 +188,18 @@ struct grpc_pollset_worker {
pid_t originator;
#endif
gpr_cv cv;
- grpc_pollset *pollset;
- pollable *pollable_obj;
+ grpc_pollset* pollset;
+ pollable* pollable_obj;
pwlink links[PWLINK_COUNT];
};
struct grpc_pollset {
gpr_mu mu;
- pollable *active_pollable;
+ pollable* active_pollable;
bool kicked_without_poller;
- grpc_closure *shutdown_closure;
- grpc_pollset_worker *root_worker;
+ grpc_closure* shutdown_closure;
+ grpc_pollset_worker* root_worker;
int containing_pollset_set_count;
};
@@ -210,23 +210,23 @@ struct grpc_pollset {
struct grpc_pollset_set {
gpr_refcount refs;
gpr_mu mu;
- grpc_pollset_set *parent;
+ grpc_pollset_set* parent;
size_t pollset_count;
size_t pollset_capacity;
- grpc_pollset **pollsets;
+ grpc_pollset** pollsets;
size_t fd_count;
size_t fd_capacity;
- grpc_fd **fds;
+ grpc_fd** fds;
};
/*******************************************************************************
* Common helpers
*/
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
+static bool append_error(grpc_error** composite, grpc_error* error,
+ const char* desc) {
if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
@@ -252,14 +252,14 @@ static bool append_error(grpc_error **composite, grpc_error *error,
* becomes a spurious read notification on a reused fd.
*/
-static grpc_fd *fd_freelist = NULL;
+static grpc_fd* fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(ec, fd, n, reason) \
unref_by(ec, fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
@@ -270,13 +270,13 @@ static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
-static void ref_by(grpc_fd *fd, int n) {
+static void ref_by(grpc_fd* fd, int n) {
#endif
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
-static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_fd *fd = (grpc_fd *)arg;
+static void fd_destroy(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_fd* fd = (grpc_fd*)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
@@ -293,8 +293,8 @@ static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
#ifndef NDEBUG
-static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
- const char *reason, const char *file, int line) {
+static void unref_by(grpc_exec_ctx* exec_ctx, grpc_fd* fd, int n,
+ const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
@@ -302,13 +302,14 @@ static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
}
#else
-static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
+static void unref_by(grpc_exec_ctx* exec_ctx, grpc_fd* fd, int n) {
#endif
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
} else {
GPR_ASSERT(old > n);
}
@@ -320,15 +321,15 @@ static void fd_global_shutdown(void) {
gpr_mu_lock(&fd_freelist_mu);
gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
+ grpc_fd* fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
gpr_free(fd);
}
gpr_mu_destroy(&fd_freelist_mu);
}
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
+static grpc_fd* fd_create(int fd, const char* name) {
+ grpc_fd* new_fd = NULL;
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist != NULL) {
@@ -338,7 +339,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
}
gpr_mu_init(&new_fd->pollable_mu);
@@ -353,7 +354,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
new_fd->freelist_next = NULL;
new_fd->on_done_closure = NULL;
- char *fd_name;
+ char* fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifndef NDEBUG
@@ -365,14 +366,14 @@ static grpc_fd *fd_create(int fd, const char *name) {
return new_fd;
}
-static int fd_wrapped_fd(grpc_fd *fd) {
+static int fd_wrapped_fd(grpc_fd* fd) {
int ret_fd = fd->fd;
return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
}
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
+ bool already_closed, const char* reason) {
bool is_fd_closed = already_closed;
gpr_mu_lock(&fd->orphan_mu);
@@ -403,18 +404,18 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
}
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
+ return (grpc_pollset*)notifier;
}
-static bool fd_is_shutdown(grpc_fd *fd) {
+static bool fd_is_shutdown(grpc_fd* fd) {
return grpc_lfev_is_shutdown(&fd->read_closure);
}
/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
@@ -423,13 +424,13 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
GRPC_ERROR_UNREF(why);
}
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
@@ -437,15 +438,15 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
* Pollable Definitions
*/
-static grpc_error *pollable_create(pollable_type type, pollable **p) {
+static grpc_error* pollable_create(pollable_type type, pollable** p) {
*p = NULL;
int epfd = epoll_create1(EPOLL_CLOEXEC);
if (epfd == -1) {
return GRPC_OS_ERROR(errno, "epoll_create1");
}
- *p = (pollable *)gpr_malloc(sizeof(**p));
- grpc_error *err = grpc_wakeup_fd_init(&(*p)->wakeup);
+ *p = (pollable*)gpr_malloc(sizeof(**p));
+ grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup);
if (err != GRPC_ERROR_NONE) {
close(epfd);
gpr_free(*p);
@@ -454,7 +455,7 @@ static grpc_error *pollable_create(pollable_type type, pollable **p) {
}
struct epoll_event ev;
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
- ev.data.ptr = (void *)(1 | (intptr_t) & (*p)->wakeup);
+ ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup);
if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
close(epfd);
@@ -478,9 +479,9 @@ static grpc_error *pollable_create(pollable_type type, pollable **p) {
}
#ifdef NDEBUG
-static pollable *pollable_ref(pollable *p) {
+static pollable* pollable_ref(pollable* p) {
#else
-static pollable *pollable_ref(pollable *p, int line, const char *reason) {
+static pollable* pollable_ref(pollable* p, int line, const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
@@ -492,9 +493,9 @@ static pollable *pollable_ref(pollable *p, int line, const char *reason) {
}
#ifdef NDEBUG
-static void pollable_unref(pollable *p) {
+static void pollable_unref(pollable* p) {
#else
-static void pollable_unref(pollable *p, int line, const char *reason) {
+static void pollable_unref(pollable* p, int line, const char* reason) {
if (p == NULL) return;
if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
@@ -509,9 +510,9 @@ static void pollable_unref(pollable *p, int line, const char *reason) {
}
}
-static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollable_add_fd";
+static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* err_desc = "pollable_add_fd";
const int epfd = p->epfd;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
@@ -541,7 +542,7 @@ GPR_TLS_DECL(g_current_thread_pollset);
GPR_TLS_DECL(g_current_thread_worker);
/* Global state management */
-static grpc_error *pollset_global_init(void) {
+static grpc_error* pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
return pollable_create(PO_EMPTY, &g_empty_pollable);
@@ -554,8 +555,8 @@ static void pollset_global_shutdown(void) {
}
/* pollset->mu must be held while calling this function */
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
+static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
@@ -573,9 +574,9 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
/* pollset->mu must be held before calling this function,
* pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
* held */
-static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
- grpc_pollset_worker *specific_worker) {
- pollable *p = specific_worker->pollable_obj;
+static grpc_error* kick_one_worker(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_worker* specific_worker) {
+ pollable* p = specific_worker->pollable_obj;
grpc_core::mu_guard lock(&p->mu);
GPR_ASSERT(specific_worker != NULL);
if (specific_worker->kicked) {
@@ -599,7 +600,7 @@ static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
}
specific_worker->kicked = true;
- grpc_error *error = grpc_wakeup_fd_wakeup(&p->wakeup);
+ grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
return error;
}
if (specific_worker->initialized_cv) {
@@ -616,16 +617,15 @@ static grpc_error *kick_one_worker(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker) {
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
- (void *)gpr_tls_get(&g_current_thread_pollset),
- (void *)gpr_tls_get(&g_current_thread_worker),
- pollset->root_worker);
+ (void*)gpr_tls_get(&g_current_thread_pollset),
+ (void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
}
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
@@ -667,11 +667,11 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
-static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- grpc_error *error = GRPC_ERROR_NONE;
- const char *err_desc = "pollset_kick_all";
- grpc_pollset_worker *w = pollset->root_worker;
+static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ const char* err_desc = "pollset_kick_all";
+ grpc_pollset_worker* w = pollset->root_worker;
if (w != NULL) {
do {
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
@@ -682,13 +682,13 @@ static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
return error;
}
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
gpr_mu_init(&pollset->mu);
pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
*mu = &pollset->mu;
}
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
grpc_millis millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
@@ -700,8 +700,8 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
return (int)delta;
}
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
+static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_pollset* notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with
@@ -713,14 +713,14 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
-static grpc_error *fd_get_or_become_pollable(grpc_fd *fd, pollable **p) {
+static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
gpr_mu_lock(&fd->pollable_mu);
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "fd_get_or_become_pollable";
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* err_desc = "fd_get_or_become_pollable";
if (fd->pollable_obj == NULL) {
if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
err_desc)) {
@@ -744,35 +744,35 @@ static grpc_error *fd_get_or_become_pollable(grpc_fd *fd, pollable **p) {
}
/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_ASSERT(pollset->shutdown_closure == NULL);
pollset->shutdown_closure = closure;
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
-static grpc_error *pollable_process_events(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- pollable *pollable_obj, bool drain) {
- static const char *err_desc = "pollset_process_events";
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* pollable_process_events(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
+ pollable* pollable_obj, bool drain) {
+ static const char* err_desc = "pollset_process_events";
+ grpc_error* error = GRPC_ERROR_NONE;
for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
pollable_obj->event_cursor != pollable_obj->event_count;
i++) {
int n = pollable_obj->event_cursor++;
- struct epoll_event *ev = &pollable_obj->events[n];
- void *data_ptr = ev->data.ptr;
+ struct epoll_event* ev = &pollable_obj->events[n];
+ void* data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
- (grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)),
+ (grpc_wakeup_fd*)((~(intptr_t)1) & (intptr_t)data_ptr)),
err_desc);
} else {
- grpc_fd *fd = (grpc_fd *)data_ptr;
+ grpc_fd* fd = (grpc_fd*)data_ptr;
bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0;
@@ -795,17 +795,17 @@ static grpc_error *pollable_process_events(grpc_exec_ctx *exec_ctx,
}
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
POLLABLE_UNREF(pollset->active_pollable, "pollset");
pollset->active_pollable = NULL;
}
-static grpc_error *pollable_epoll(grpc_exec_ctx *exec_ctx, pollable *p,
+static grpc_error* pollable_epoll(grpc_exec_ctx* exec_ctx, pollable* p,
grpc_millis deadline) {
int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- char *desc = pollable_desc(p);
+ char* desc = pollable_desc(p);
gpr_log(GPR_DEBUG, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
gpr_free(desc);
}
@@ -835,8 +835,8 @@ static grpc_error *pollable_epoll(grpc_exec_ctx *exec_ctx, pollable *p,
}
/* Return true if first in list */
-static bool worker_insert(grpc_pollset_worker **root_worker,
- grpc_pollset_worker *worker, pwlinks link) {
+static bool worker_insert(grpc_pollset_worker** root_worker,
+ grpc_pollset_worker* worker, pwlinks link) {
if (*root_worker == NULL) {
*root_worker = worker;
worker->links[link].next = worker->links[link].prev = worker;
@@ -853,8 +853,8 @@ static bool worker_insert(grpc_pollset_worker **root_worker,
/* returns the new root IFF the root changed */
typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
-static worker_remove_result worker_remove(grpc_pollset_worker **root_worker,
- grpc_pollset_worker *worker,
+static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
+ grpc_pollset_worker* worker,
pwlinks link) {
if (worker == *root_worker) {
if (worker == worker->links[link].next) {
@@ -874,9 +874,9 @@ static worker_remove_result worker_remove(grpc_pollset_worker **root_worker,
}
/* Return true if this thread should poll */
-static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl,
+static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
bool do_poll = (pollset->shutdown_closure == nullptr);
if (worker_hdl != NULL) *worker_hdl = worker;
@@ -927,16 +927,16 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return do_poll;
}
-static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *worker,
- grpc_pollset_worker **worker_hdl) {
+static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* worker,
+ grpc_pollset_worker** worker_hdl) {
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(&worker->pollable_obj->mu);
switch (worker_remove(&worker->pollable_obj->root_worker, worker,
PWLINK_POLLABLE)) {
case WRR_NEW_ROOT: {
// wakeup new poller
- grpc_pollset_worker *new_root = worker->pollable_obj->root_worker;
+ grpc_pollset_worker* new_root = worker->pollable_obj->root_worker;
GPR_ASSERT(new_root->initialized_cv);
gpr_cv_signal(&new_root->cv);
break;
@@ -969,12 +969,12 @@ static long gettid(void) { return syscall(__NR_gettid); }
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
- grpc_pollset_worker *worker =
- (grpc_pollset_worker *)gpr_malloc(sizeof(*worker));
+ grpc_pollset_worker* worker =
+ (grpc_pollset_worker*)gpr_malloc(sizeof(*worker));
#define WORKER_PTR (worker)
#else
grpc_pollset_worker worker;
@@ -984,13 +984,14 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
WORKER_PTR->originator = gettid();
#endif
if (GRPC_TRACER_ON(grpc_polling_trace)) {
- gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
- " deadline=%" PRIdPTR " kwp=%d pollable=%p",
+ gpr_log(GPR_DEBUG,
+ "PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
+ " kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
}
- static const char *err_desc = "pollset_work";
- grpc_error *error = GRPC_ERROR_NONE;
+ static const char* err_desc = "pollset_work";
+ grpc_error* error = GRPC_ERROR_NONE;
if (pollset->kicked_without_poller) {
pollset->kicked_without_poller = false;
} else {
@@ -999,9 +1000,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
if (WORKER_PTR->pollable_obj->event_cursor ==
WORKER_PTR->pollable_obj->event_count) {
- append_error(&error, pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj,
- deadline),
- err_desc);
+ append_error(
+ &error,
+ pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj, deadline),
+ err_desc);
}
append_error(&error,
pollable_process_events(exec_ctx, pollset,
@@ -1020,10 +1022,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return error;
}
-static grpc_error *pollset_transition_pollable_from_empty_to_fd_locked(
- grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) {
- static const char *err_desc = "pollset_transition_pollable_from_empty_to_fd";
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
+ grpc_exec_ctx* exec_ctx, grpc_pollset* pollset, grpc_fd* fd) {
+ static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
+ grpc_error* error = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"PS:%p add fd %p (%d); transition pollable from empty to fd",
@@ -1036,10 +1038,10 @@ static grpc_error *pollset_transition_pollable_from_empty_to_fd_locked(
return error;
}
-static grpc_error *pollset_transition_pollable_from_fd_to_multi_locked(
- grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *and_add_fd) {
- static const char *err_desc = "pollset_transition_pollable_from_fd_to_multi";
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
+ grpc_exec_ctx* exec_ctx, grpc_pollset* pollset, grpc_fd* and_add_fd) {
+ static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
+ grpc_error* error = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_DEBUG,
@@ -1048,7 +1050,7 @@ static grpc_error *pollset_transition_pollable_from_fd_to_multi_locked(
pollset->active_pollable->owner_fd);
}
append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
- grpc_fd *initial_fd = pollset->active_pollable->owner_fd;
+ grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
POLLABLE_UNREF(pollset->active_pollable, "pollset");
pollset->active_pollable = NULL;
if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
@@ -1065,10 +1067,10 @@ static grpc_error *pollset_transition_pollable_from_fd_to_multi_locked(
}
/* expects pollsets locked, flag whether fd is locked or not */
-static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset, grpc_fd *fd) {
- grpc_error *error = GRPC_ERROR_NONE;
- pollable *po_at_start =
+static grpc_error* pollset_add_fd_locked(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset, grpc_fd* fd) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ pollable* po_at_start =
POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
switch (pollset->active_pollable->type) {
case PO_EMPTY:
@@ -1102,11 +1104,11 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
return error;
}
-static grpc_error *pollset_as_multipollable_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- pollable **pollable_obj) {
- grpc_error *error = GRPC_ERROR_NONE;
- pollable *po_at_start =
+static grpc_error* pollset_as_multipollable_locked(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
+ pollable** pollable_obj) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ pollable* po_at_start =
POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
switch (pollset->active_pollable->type) {
case PO_EMPTY:
@@ -1139,10 +1141,10 @@ static grpc_error *pollset_as_multipollable_locked(grpc_exec_ctx *exec_ctx,
return error;
}
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {
gpr_mu_lock(&pollset->mu);
- grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd);
+ grpc_error* error = pollset_add_fd_locked(exec_ctx, pollset, fd);
gpr_mu_unlock(&pollset->mu);
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
}
@@ -1151,7 +1153,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* Pollset-set Definitions
*/
-static grpc_pollset_set *pss_lock_adam(grpc_pollset_set *pss) {
+static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) {
gpr_mu_lock(&pss->mu);
while (pss->parent != NULL) {
gpr_mu_unlock(&pss->mu);
@@ -1161,14 +1163,14 @@ static grpc_pollset_set *pss_lock_adam(grpc_pollset_set *pss) {
return pss;
}
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss));
+static grpc_pollset_set* pollset_set_create(void) {
+ grpc_pollset_set* pss = (grpc_pollset_set*)gpr_zalloc(sizeof(*pss));
gpr_mu_init(&pss->mu);
gpr_ref_init(&pss->refs, 1);
return pss;
}
-static void pollset_set_unref(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss) {
+static void pollset_set_unref(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss) {
if (pss == NULL) return;
if (!gpr_unref(&pss->refs)) return;
pollset_set_unref(exec_ctx, pss->parent);
@@ -1188,13 +1190,13 @@ static void pollset_set_unref(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss) {
gpr_free(pss);
}
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
}
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollset_set_add_fd";
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* err_desc = "pollset_set_add_fd";
pss = pss_lock_adam(pss);
for (size_t i = 0; i < pss->pollset_count; i++) {
append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd),
@@ -1203,7 +1205,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
if (pss->fd_count == pss->fd_capacity) {
pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
pss->fds =
- (grpc_fd **)gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds));
+ (grpc_fd**)gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds));
}
REF_BY(fd, 2, "pollset_set");
pss->fds[pss->fd_count++] = fd;
@@ -1212,8 +1214,8 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
GRPC_LOG_IF_ERROR(err_desc, error);
}
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
}
@@ -1233,8 +1235,8 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
gpr_mu_unlock(&pss->mu);
}
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
}
@@ -1260,12 +1262,12 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
// add all fds to pollables, and output a new array of unorphaned out_fds
// assumes pollsets are multipollable
-static grpc_error *add_fds_to_pollsets(grpc_exec_ctx *exec_ctx, grpc_fd **fds,
- size_t fd_count, grpc_pollset **pollsets,
+static grpc_error* add_fds_to_pollsets(grpc_exec_ctx* exec_ctx, grpc_fd** fds,
+ size_t fd_count, grpc_pollset** pollsets,
size_t pollset_count,
- const char *err_desc, grpc_fd **out_fds,
- size_t *out_fd_count) {
- grpc_error *error = GRPC_ERROR_NONE;
+ const char* err_desc, grpc_fd** out_fds,
+ size_t* out_fd_count) {
+ grpc_error* error = GRPC_ERROR_NONE;
for (size_t i = 0; i < fd_count; i++) {
gpr_mu_lock(&fds[i]->orphan_mu);
if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
@@ -1284,14 +1286,14 @@ static grpc_error *add_fds_to_pollsets(grpc_exec_ctx *exec_ctx, grpc_fd **fds,
return error;
}
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
}
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollset_set_add_pollset";
- pollable *pollable_obj = NULL;
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* err_desc = "pollset_set_add_pollset";
+ pollable* pollable_obj = NULL;
gpr_mu_lock(&ps->mu);
if (!GRPC_LOG_IF_ERROR(err_desc, pollset_as_multipollable_locked(
exec_ctx, ps, &pollable_obj))) {
@@ -1310,7 +1312,7 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
err_desc);
if (pss->pollset_count == pss->pollset_capacity) {
pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
- pss->pollsets = (grpc_pollset **)gpr_realloc(
+ pss->pollsets = (grpc_pollset**)gpr_realloc(
pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets));
}
pss->pollsets[pss->pollset_count++] = ps;
@@ -1320,24 +1322,24 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
GRPC_LOG_IF_ERROR(err_desc, error);
}
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *a,
- grpc_pollset_set *b) {
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* a,
+ grpc_pollset_set* b) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
}
- grpc_error *error = GRPC_ERROR_NONE;
- static const char *err_desc = "pollset_set_add_fd";
+ grpc_error* error = GRPC_ERROR_NONE;
+ static const char* err_desc = "pollset_set_add_fd";
for (;;) {
if (a == b) {
// pollset ancestors are the same: nothing to do
return;
}
if (a > b) {
- GPR_SWAP(grpc_pollset_set *, a, b);
+ GPR_SWAP(grpc_pollset_set*, a, b);
}
- gpr_mu *a_mu = &a->mu;
- gpr_mu *b_mu = &b->mu;
+ gpr_mu* a_mu = &a->mu;
+ gpr_mu* b_mu = &b->mu;
gpr_mu_lock(a_mu);
gpr_mu_lock(b_mu);
if (a->parent != NULL) {
@@ -1355,7 +1357,7 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
const size_t a_size = a->fd_count + a->pollset_count;
const size_t b_size = b->fd_count + b->pollset_count;
if (b_size > a_size) {
- GPR_SWAP(grpc_pollset_set *, a, b);
+ GPR_SWAP(grpc_pollset_set*, a, b);
}
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a);
@@ -1364,22 +1366,24 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
b->parent = a;
if (a->fd_capacity < a->fd_count + b->fd_count) {
a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
- a->fds = (grpc_fd **)gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds));
+ a->fds = (grpc_fd**)gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds));
}
size_t initial_a_fd_count = a->fd_count;
a->fd_count = 0;
- append_error(&error, add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count,
- b->pollsets, b->pollset_count,
- "merge_a2b", a->fds, &a->fd_count),
- err_desc);
- append_error(&error, add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count,
- a->pollsets, a->pollset_count,
- "merge_b2a", a->fds, &a->fd_count),
- err_desc);
+ append_error(
+ &error,
+ add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count, b->pollsets,
+ b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
+ err_desc);
+ append_error(
+ &error,
+ add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count, a->pollsets,
+ a->pollset_count, "merge_b2a", a->fds, &a->fd_count),
+ err_desc);
if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
a->pollset_capacity =
GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
- a->pollsets = (grpc_pollset **)gpr_realloc(
+ a->pollsets = (grpc_pollset**)gpr_realloc(
a->pollsets, a->pollset_capacity * sizeof(*a->pollsets));
}
if (b->pollset_count > 0) {
@@ -1396,9 +1400,9 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&b->mu);
}
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {}
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {}
/*******************************************************************************
* Event engine binding
@@ -1440,7 +1444,7 @@ static const grpc_event_engine_vtable vtable = {
shutdown_engine,
};
-const grpc_event_engine_vtable *grpc_init_epollex_linux(
+const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) {
if (!explicitly_requested) {
return NULL;
@@ -1474,7 +1478,7 @@ const grpc_event_engine_vtable *grpc_init_epollex_linux(
#include "src/core/lib/iomgr/ev_epollex_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
-const grpc_event_engine_vtable *grpc_init_epollex_linux(
+const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested) {
return NULL;
}
diff --git a/src/core/lib/iomgr/ev_epollex_linux.h b/src/core/lib/iomgr/ev_epollex_linux.h
index 2849a23283..22b536c7d4 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.h
+++ b/src/core/lib/iomgr/ev_epollex_linux.h
@@ -26,7 +26,7 @@
extern "C" {
#endif
-const grpc_event_engine_vtable *grpc_init_epollex_linux(
+const grpc_event_engine_vtable* grpc_init_epollex_linux(
bool explicitly_requested);
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 035bdc4cb5..d5f3122abc 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -51,7 +51,7 @@
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
+#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
#define GRPC_POLLING_TRACE(...) \
if (GRPC_TRACER_ON(grpc_polling_trace)) { \
@@ -90,10 +90,10 @@ typedef struct poll_obj {
poll_obj_type obj_type;
#endif
gpr_mu mu;
- struct polling_island *pi;
+ struct polling_island* pi;
} poll_obj;
-const char *poll_obj_string(poll_obj_type po_type) {
+const char* poll_obj_string(poll_obj_type po_type) {
switch (po_type) {
case POLL_OBJ_FD:
return "fd";
@@ -106,11 +106,11 @@ const char *poll_obj_string(poll_obj_type po_type) {
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-/*******************************************************************************
- * Fd Declarations
- */
+ /*******************************************************************************
+ * Fd Declarations
+ */
-#define FD_FROM_PO(po) ((grpc_fd *)(po))
+#define FD_FROM_PO(po) ((grpc_fd*)(po))
struct grpc_fd {
poll_obj po;
@@ -130,8 +130,8 @@ struct grpc_fd {
gpr_atm read_closure;
gpr_atm write_closure;
- struct grpc_fd *freelist_next;
- grpc_closure *on_done_closure;
+ struct grpc_fd* freelist_next;
+ grpc_closure* on_done_closure;
/* The pollset that last noticed that the fd is readable. The actual type
* stored in this is (grpc_pollset *) */
@@ -142,14 +142,14 @@ struct grpc_fd {
/* Reference counting for fds */
#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+static void fd_ref(grpc_fd* fd, const char* reason, const char* file, int line);
+static void fd_unref(grpc_fd* fd, const char* reason, const char* file,
int line);
#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
#else
-static void fd_ref(grpc_fd *fd);
-static void fd_unref(grpc_fd *fd);
+static void fd_ref(grpc_fd* fd);
+static void fd_unref(grpc_fd* fd);
#define GRPC_FD_REF(fd, reason) fd_ref(fd)
#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
#endif
@@ -203,7 +203,7 @@ typedef struct polling_island {
/* The file descriptors in the epoll set */
size_t fd_cnt;
size_t fd_capacity;
- grpc_fd **fds;
+ grpc_fd** fds;
} polling_island;
/*******************************************************************************
@@ -215,8 +215,8 @@ struct grpc_pollset_worker {
/* Used to prevent a worker from getting kicked multiple times */
gpr_atm is_kicked;
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
+ struct grpc_pollset_worker* next;
+ struct grpc_pollset_worker* prev;
};
struct grpc_pollset {
@@ -227,7 +227,7 @@ struct grpc_pollset {
bool shutting_down; /* Is the pollset shutting down ? */
bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
- grpc_closure *shutdown_done; /* Called after after shutdown is complete */
+ grpc_closure* shutdown_done; /* Called after after shutdown is complete */
};
/*******************************************************************************
@@ -241,8 +241,8 @@ struct grpc_pollset_set {
* Common helpers
*/
-static bool append_error(grpc_error **composite, grpc_error *error,
- const char *desc) {
+static bool append_error(grpc_error** composite, grpc_error* error,
+ const char* desc) {
if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
@@ -266,10 +266,10 @@ static grpc_wakeup_fd polling_island_wakeup_fd;
/* The polling island being polled right now.
See comments in workqueue_maybe_wakeup for why this is tracked. */
-static __thread polling_island *g_current_thread_polling_island;
+static __thread polling_island* g_current_thread_polling_island;
/* Forward declaration */
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
+static void polling_island_delete(grpc_exec_ctx* exec_ctx, polling_island* pi);
#ifdef GRPC_TSAN
/* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -282,38 +282,40 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
gpr_atm g_epoll_sync;
#endif /* defined(GRPC_TSAN) */
-static void pi_add_ref(polling_island *pi);
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
+static void pi_add_ref(polling_island* pi);
+static void pi_unref(grpc_exec_ctx* exec_ctx, polling_island* pi);
#ifndef NDEBUG
-static void pi_add_ref_dbg(polling_island *pi, const char *reason,
- const char *file, int line) {
+static void pi_add_ref_dbg(polling_island* pi, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
+ gpr_log(GPR_DEBUG,
+ "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
pi, old_cnt, old_cnt + 1, reason, file, line);
}
pi_add_ref(pi);
}
-static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
- const char *reason, const char *file, int line) {
+static void pi_unref_dbg(grpc_exec_ctx* exec_ctx, polling_island* pi,
+ const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
- gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
- " (%s) - (%s, %d)",
+ gpr_log(GPR_DEBUG,
+ "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
+ " (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line);
}
pi_unref(exec_ctx, pi);
}
#endif
-static void pi_add_ref(polling_island *pi) {
+static void pi_add_ref(polling_island* pi) {
gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
}
-static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
+static void pi_unref(grpc_exec_ctx* exec_ctx, polling_island* pi) {
/* If ref count went to zero, delete the polling island.
Note that this deletion not be done under a lock. Once the ref count goes
to zero, we are guaranteed that no one else holds a reference to the
@@ -323,7 +325,7 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
non-empty, we should remove a ref to the merged_to polling island
*/
if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ polling_island* next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
polling_island_delete(exec_ctx, pi);
if (next != NULL) {
PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
@@ -332,14 +334,14 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
}
/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
+static void polling_island_add_fds_locked(polling_island* pi, grpc_fd** fds,
size_t fd_count, bool add_fd_refs,
- grpc_error **error) {
+ grpc_error** error) {
int err;
size_t i;
struct epoll_event ev;
- char *err_msg;
- const char *err_desc = "polling_island_add_fds";
+ char* err_msg;
+ const char* err_desc = "polling_island_add_fds";
#ifdef GRPC_TSAN
/* See the definition of g_epoll_sync for more context */
@@ -367,7 +369,7 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
pi->fds =
- (grpc_fd **)gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
+ (grpc_fd**)gpr_realloc(pi->fds, sizeof(grpc_fd*) * pi->fd_capacity);
}
pi->fds[pi->fd_cnt++] = fds[i];
@@ -378,13 +380,13 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
}
/* The caller is expected to hold pi->mu before calling this */
-static void polling_island_add_wakeup_fd_locked(polling_island *pi,
- grpc_wakeup_fd *wakeup_fd,
- grpc_error **error) {
+static void polling_island_add_wakeup_fd_locked(polling_island* pi,
+ grpc_wakeup_fd* wakeup_fd,
+ grpc_error** error) {
struct epoll_event ev;
int err;
- char *err_msg;
- const char *err_desc = "polling_island_add_wakeup_fd";
+ char* err_msg;
+ const char* err_desc = "polling_island_add_wakeup_fd";
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
ev.data.ptr = wakeup_fd;
@@ -402,13 +404,13 @@ static void polling_island_add_wakeup_fd_locked(polling_island *pi,
}
/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_all_fds_locked(polling_island *pi,
+static void polling_island_remove_all_fds_locked(polling_island* pi,
bool remove_fd_refs,
- grpc_error **error) {
+ grpc_error** error) {
int err;
size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fds";
+ char* err_msg;
+ const char* err_desc = "polling_island_remove_fds";
for (i = 0; i < pi->fd_cnt; i++) {
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
@@ -430,13 +432,13 @@ static void polling_island_remove_all_fds_locked(polling_island *pi,
}
/* The caller is expected to hold pi->mu lock before calling this function */
-static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
+static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd,
bool is_fd_closed,
- grpc_error **error) {
+ grpc_error** error) {
int err;
size_t i;
- char *err_msg;
- const char *err_desc = "polling_island_remove_fd";
+ char* err_msg;
+ const char* err_desc = "polling_island_remove_fd";
/* If fd is already closed, then it would have been automatically been removed
from the epoll set */
@@ -462,15 +464,15 @@ static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
}
/* Might return NULL in case of an error */
-static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
- grpc_fd *initial_fd,
- grpc_error **error) {
- polling_island *pi = NULL;
- const char *err_desc = "polling_island_create";
+static polling_island* polling_island_create(grpc_exec_ctx* exec_ctx,
+ grpc_fd* initial_fd,
+ grpc_error** error) {
+ polling_island* pi = NULL;
+ const char* err_desc = "polling_island_create";
*error = GRPC_ERROR_NONE;
- pi = (polling_island *)gpr_malloc(sizeof(*pi));
+ pi = (polling_island*)gpr_malloc(sizeof(*pi));
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@@ -500,7 +502,7 @@ done:
return pi;
}
-static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
+static void polling_island_delete(grpc_exec_ctx* exec_ctx, polling_island* pi) {
GPR_ASSERT(pi->fd_cnt == 0);
if (pi->epoll_fd >= 0) {
@@ -514,11 +516,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
/* Attempts to gets the last polling island in the linked list (liked by the
* 'merged_to' field). Since this does not lock the polling island, there are no
* guarantees that the island returned is the last island */
-static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
- polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+static polling_island* polling_island_maybe_get_latest(polling_island* pi) {
+ polling_island* next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
while (next != NULL) {
pi = next;
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
}
return pi;
@@ -533,18 +535,18 @@ static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
... critical section ..
...
gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
-static polling_island *polling_island_lock(polling_island *pi) {
- polling_island *next = NULL;
+static polling_island* polling_island_lock(polling_island* pi) {
+ polling_island* next = NULL;
while (true) {
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
if (next == NULL) {
/* Looks like 'pi' is the last node in the linked list but unless we check
this by holding the pi->mu lock, we cannot be sure (i.e without the
pi->mu lock, we don't prevent island merges).
To be absolutely sure, check once more by holding the pi->mu lock */
gpr_mu_lock(&pi->mu);
- next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
+ next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
if (next == NULL) {
/* pi is infact the last node and we have the pi->mu lock. we're done */
break;
@@ -582,11 +584,11 @@ static polling_island *polling_island_lock(polling_island *pi) {
// Release locks: Always call polling_island_unlock_pair() to release locks
polling_island_unlock_pair(p1, p2);
*/
-static void polling_island_lock_pair(polling_island **p, polling_island **q) {
- polling_island *pi_1 = *p;
- polling_island *pi_2 = *q;
- polling_island *next_1 = NULL;
- polling_island *next_2 = NULL;
+static void polling_island_lock_pair(polling_island** p, polling_island** q) {
+ polling_island* pi_1 = *p;
+ polling_island* pi_2 = *q;
+ polling_island* next_1 = NULL;
+ polling_island* next_2 = NULL;
/* The algorithm is simple:
- Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
@@ -603,16 +605,16 @@ static void polling_island_lock_pair(polling_island **p, polling_island **q) {
- If the polling islands are the last islands, we are done. If not,
release the locks and continue the process from the first step */
while (true) {
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
+ next_1 = (polling_island*)gpr_atm_acq_load(&pi_1->merged_to);
while (next_1 != NULL) {
pi_1 = next_1;
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
+ next_1 = (polling_island*)gpr_atm_acq_load(&pi_1->merged_to);
}
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
+ next_2 = (polling_island*)gpr_atm_acq_load(&pi_2->merged_to);
while (next_2 != NULL) {
pi_2 = next_2;
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
+ next_2 = (polling_island*)gpr_atm_acq_load(&pi_2->merged_to);
}
if (pi_1 == pi_2) {
@@ -628,8 +630,8 @@ static void polling_island_lock_pair(polling_island **p, polling_island **q) {
gpr_mu_lock(&pi_1->mu);
}
- next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
- next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
+ next_1 = (polling_island*)gpr_atm_acq_load(&pi_1->merged_to);
+ next_2 = (polling_island*)gpr_atm_acq_load(&pi_2->merged_to);
if (next_1 == NULL && next_2 == NULL) {
break;
}
@@ -642,7 +644,7 @@ static void polling_island_lock_pair(polling_island **p, polling_island **q) {
*q = pi_2;
}
-static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
+static void polling_island_unlock_pair(polling_island* p, polling_island* q) {
if (p == q) {
gpr_mu_unlock(&p->mu);
} else {
@@ -651,16 +653,16 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
}
}
-static polling_island *polling_island_merge(polling_island *p,
- polling_island *q,
- grpc_error **error) {
+static polling_island* polling_island_merge(polling_island* p,
+ polling_island* q,
+ grpc_error** error) {
/* Get locks on both the polling islands */
polling_island_lock_pair(&p, &q);
if (p != q) {
/* Make sure that p points to the polling island with fewer fds than q */
if (p->fd_cnt > q->fd_cnt) {
- GPR_SWAP(polling_island *, p, q);
+ GPR_SWAP(polling_island*, p, q);
}
/* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
@@ -685,8 +687,8 @@ static polling_island *polling_island_merge(polling_island *p,
return q;
}
-static grpc_error *polling_island_global_init() {
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* polling_island_global_init() {
+ grpc_error* error = GRPC_ERROR_NONE;
error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
if (error == GRPC_ERROR_NONE) {
@@ -722,13 +724,13 @@ static void polling_island_global_shutdown() {
* alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
* case occurs. */
-static grpc_fd *fd_freelist = NULL;
+static grpc_fd* fd_freelist = NULL;
static gpr_mu fd_freelist_mu;
#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
@@ -739,13 +741,13 @@ static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
-static void ref_by(grpc_fd *fd, int n) {
+static void ref_by(grpc_fd* fd, int n) {
#endif
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
#ifndef NDEBUG
-static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
@@ -754,7 +756,7 @@ static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
}
#else
-static void unref_by(grpc_fd *fd, int n) {
+static void unref_by(grpc_fd* fd, int n) {
#endif
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
@@ -775,18 +777,18 @@ static void unref_by(grpc_fd *fd, int n) {
/* Increment refcount by two to avoid changing the orphan bit */
#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
+static void fd_ref(grpc_fd* fd, const char* reason, const char* file,
int line) {
ref_by(fd, 2, reason, file, line);
}
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+static void fd_unref(grpc_fd* fd, const char* reason, const char* file,
int line) {
unref_by(fd, 2, reason, file, line);
}
#else
-static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
-static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
+static void fd_ref(grpc_fd* fd) { ref_by(fd, 2); }
+static void fd_unref(grpc_fd* fd) { unref_by(fd, 2); }
#endif
static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
@@ -795,7 +797,7 @@ static void fd_global_shutdown(void) {
gpr_mu_lock(&fd_freelist_mu);
gpr_mu_unlock(&fd_freelist_mu);
while (fd_freelist != NULL) {
- grpc_fd *fd = fd_freelist;
+ grpc_fd* fd = fd_freelist;
fd_freelist = fd_freelist->freelist_next;
gpr_mu_destroy(&fd->po.mu);
gpr_free(fd);
@@ -803,8 +805,8 @@ static void fd_global_shutdown(void) {
gpr_mu_destroy(&fd_freelist_mu);
}
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *new_fd = NULL;
+static grpc_fd* fd_create(int fd, const char* name) {
+ grpc_fd* new_fd = NULL;
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist != NULL) {
@@ -814,7 +816,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
- new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
+ new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->po.mu);
}
@@ -839,14 +841,14 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&new_fd->po.mu);
- char *fd_name;
+ char* fd_name;
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
gpr_free(fd_name);
return new_fd;
}
-static int fd_wrapped_fd(grpc_fd *fd) {
+static int fd_wrapped_fd(grpc_fd* fd) {
int ret_fd = -1;
gpr_mu_lock(&fd->po.mu);
if (!fd->orphaned) {
@@ -857,11 +859,11 @@ static int fd_wrapped_fd(grpc_fd *fd) {
return ret_fd;
}
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *unref_pi = NULL;
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
+ bool already_closed, const char* reason) {
+ grpc_error* error = GRPC_ERROR_NONE;
+ polling_island* unref_pi = NULL;
gpr_mu_lock(&fd->po.mu);
fd->on_done_closure = on_done;
@@ -879,7 +881,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- Set fd->po.pi to NULL (but remove the ref on the polling island
before doing this.) */
if (fd->po.pi != NULL) {
- polling_island *pi_latest = polling_island_lock(fd->po.pi);
+ polling_island* pi_latest = polling_island_lock(fd->po.pi);
polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error);
gpr_mu_unlock(&pi_latest->mu);
@@ -909,24 +911,24 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
}
if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "fd_orphan: %s", msg);
}
GRPC_ERROR_UNREF(error);
}
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
- return (grpc_pollset *)notifier;
+ return (grpc_pollset*)notifier;
}
-static bool fd_is_shutdown(grpc_fd *fd) {
+static bool fd_is_shutdown(grpc_fd* fd) {
return grpc_lfev_is_shutdown(&fd->read_closure);
}
/* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
GRPC_ERROR_REF(why))) {
shutdown(fd->fd, SHUT_RDWR);
@@ -935,13 +937,13 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
GRPC_ERROR_UNREF(why);
}
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
@@ -962,7 +964,7 @@ static void sig_handler(int sig_num) {
static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
/* Global state management */
-static grpc_error *pollset_global_init(void) {
+static grpc_error* pollset_global_init(void) {
gpr_tls_init(&g_current_thread_pollset);
gpr_tls_init(&g_current_thread_worker);
poller_kick_init();
@@ -974,14 +976,14 @@ static void pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_worker);
}
-static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
- grpc_error *err = GRPC_ERROR_NONE;
+static grpc_error* pollset_worker_kick(grpc_pollset_worker* worker) {
+ grpc_error* err = GRPC_ERROR_NONE;
/* Kick the worker only if it was not already kicked */
if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) {
GRPC_POLLING_TRACE(
"pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
- (void *)worker, (long int)worker->pt_id);
+ (void*)worker, (long int)worker->pt_id);
int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
if (err_num != 0) {
err = GRPC_OS_ERROR(err_num, "pthread_kill");
@@ -992,18 +994,18 @@ static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
/* Return 1 if the pollset has active threads in pollset_work (pollset must
* be locked) */
-static int pollset_has_workers(grpc_pollset *p) {
+static int pollset_has_workers(grpc_pollset* p) {
return p->root_worker.next != &p->root_worker;
}
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+static void remove_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
}
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
+static grpc_pollset_worker* pop_front_worker(grpc_pollset* p) {
if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
+ grpc_pollset_worker* w = p->root_worker.next;
remove_worker(p, w);
return w;
} else {
@@ -1011,26 +1013,26 @@ static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
}
}
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+static void push_back_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
worker->next = &p->root_worker;
worker->prev = worker->next->prev;
worker->prev->next = worker->next->prev = worker;
}
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+static void push_front_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
worker->prev = &p->root_worker;
worker->next = worker->prev->next;
worker->prev->next = worker->next->prev = worker;
}
/* p->mu must be held before calling this function */
-static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+ grpc_pollset_worker* specific_worker) {
GPR_TIMER_BEGIN("pollset_kick", 0);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
- const char *err_desc = "Kick Failure";
- grpc_pollset_worker *worker = specific_worker;
+ const char* err_desc = "Kick Failure";
+ grpc_pollset_worker* worker = specific_worker;
if (worker != NULL) {
if (worker == GRPC_POLLSET_KICK_BROADCAST) {
if (pollset_has_workers(p)) {
@@ -1076,7 +1078,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
return error;
}
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
gpr_mu_init(&pollset->po.mu);
*mu = &pollset->po.mu;
pollset->po.pi = NULL;
@@ -1092,7 +1094,7 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->shutdown_done = NULL;
}
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
grpc_millis millis) {
if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
@@ -1104,8 +1106,8 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
return (int)delta;
}
-static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_pollset *notifier) {
+static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_pollset* notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with
@@ -1117,21 +1119,21 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
-static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
-static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
- grpc_pollset *ps,
- const char *reason) {
+static void pollset_release_polling_island(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* ps,
+ const char* reason) {
if (ps->po.pi != NULL) {
PI_UNREF(exec_ctx, ps->po.pi, reason);
}
ps->po.pi = NULL;
}
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
+static void finish_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
/* The pollset cannot have any workers if we are at this stage */
GPR_ASSERT(!pollset_has_workers(pollset));
@@ -1143,8 +1145,8 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
/* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_TIMER_BEGIN("pollset_shutdown", 0);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = true;
@@ -1165,23 +1167,23 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
* than destroying the mutexes, there is nothing special that needs to be done
* here */
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
gpr_mu_destroy(&pollset->po.mu);
}
#define GRPC_EPOLL_MAX_EVENTS 100
/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
-static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker *worker, int timeout_ms,
- sigset_t *sig_mask, grpc_error **error) {
+static void pollset_work_and_unlock(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
+ grpc_pollset_worker* worker, int timeout_ms,
+ sigset_t* sig_mask, grpc_error** error) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
int epoll_fd = -1;
int ep_rv;
- polling_island *pi = NULL;
- char *err_msg;
- const char *err_desc = "pollset_work_and_unlock";
+ polling_island* pi = NULL;
+ char* err_msg;
+ const char* err_desc = "pollset_work_and_unlock";
GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
/* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
@@ -1203,7 +1205,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
PI_ADD_REF(pollset->po.pi, "ps");
GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
- (void *)pollset, (void *)pollset->po.pi);
+ (void*)pollset, (void*)pollset->po.pi);
}
pi = polling_island_maybe_get_latest(pollset->po.pi);
@@ -1243,7 +1245,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
/* We were interrupted. Save an interation by doing a zero timeout
epoll_wait to see if there are any other events of interest */
GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick",
- (void *)pollset, (void *)worker);
+ (void*)pollset, (void*)worker);
ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
}
}
@@ -1254,18 +1256,18 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
#endif /* defined(GRPC_TSAN) */
for (int i = 0; i < ep_rv; ++i) {
- void *data_ptr = ep_ev[i].data.ptr;
+ void* data_ptr = ep_ev[i].data.ptr;
if (data_ptr == &polling_island_wakeup_fd) {
GRPC_POLLING_TRACE(
"pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
"%d) got merged",
- (void *)pollset, (void *)worker, epoll_fd);
+ (void*)pollset, (void*)worker, epoll_fd);
/* This means that our polling island is merged with a different
island. We do not have to do anything here since the subsequent call
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
- grpc_fd *fd = (grpc_fd *)data_ptr;
+ grpc_fd* fd = (grpc_fd*)data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
@@ -1297,11 +1299,11 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
The function pollset_work() may temporarily release the lock (pollset->po.mu)
during the course of its execution but it will always re-acquire the lock and
ensure that it is held by the time the function returns */
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
GPR_TIMER_BEGIN("pollset_work", 0);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline);
sigset_t new_mask;
@@ -1400,8 +1402,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return error;
}
-static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
- poll_obj_type bag_type, poll_obj *item,
+static void add_poll_object(grpc_exec_ctx* exec_ctx, poll_obj* bag,
+ poll_obj_type bag_type, poll_obj* item,
poll_obj_type item_type) {
GPR_TIMER_BEGIN("add_poll_object", 0);
@@ -1410,8 +1412,8 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag,
GPR_ASSERT(bag->obj_type == bag_type);
#endif
- grpc_error *error = GRPC_ERROR_NONE;
- polling_island *pi_new = NULL;
+ grpc_error* error = GRPC_ERROR_NONE;
+ polling_island* pi_new = NULL;
gpr_mu_lock(&bag->mu);
gpr_mu_lock(&item->mu);
@@ -1462,8 +1464,8 @@ retry:
GRPC_POLLING_TRACE(
"add_poll_object: Raced creating new polling island. pi_new: %p "
"(fd: %d, %s: %p)",
- (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
- (void *)bag);
+ (void*)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type),
+ (void*)bag);
/* No need to lock 'pi_new' here since this is a new polling island
and no one has a reference to it yet */
polling_island_remove_all_fds_locked(pi_new, true, &error);
@@ -1481,13 +1483,12 @@ retry:
GRPC_POLLING_TRACE(
"add_poll_object: Created new polling island. pi_new: %p (%s: %p, "
"%s: %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
+ (void*)pi_new, poll_obj_string(item_type), (void*)item,
+ poll_obj_string(bag_type), (void*)bag);
} else {
GRPC_POLLING_TRACE(
"add_poll_object: Same polling island. pi: %p (%s, %s)",
- (void *)pi_new, poll_obj_string(item_type),
- poll_obj_string(bag_type));
+ (void*)pi_new, poll_obj_string(item_type), poll_obj_string(bag_type));
}
} else if (item->pi == NULL) {
/* GPR_ASSERT(bag->pi != NULL) */
@@ -1495,7 +1496,7 @@ retry:
pi_new = polling_island_lock(bag->pi);
if (item_type == POLL_OBJ_FD) {
- grpc_fd *fd = FD_FROM_PO(item);
+ grpc_fd* fd = FD_FROM_PO(item);
polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
}
@@ -1503,8 +1504,8 @@ retry:
GRPC_POLLING_TRACE(
"add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, "
"bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
+ (void*)pi_new, poll_obj_string(item_type), (void*)item,
+ poll_obj_string(bag_type), (void*)bag);
} else if (bag->pi == NULL) {
/* GPR_ASSERT(item->pi != NULL) */
/* Make pi_new to point to latest pi */
@@ -1513,15 +1514,15 @@ retry:
GRPC_POLLING_TRACE(
"add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, "
"bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
+ (void*)pi_new, poll_obj_string(item_type), (void*)item,
+ poll_obj_string(bag_type), (void*)bag);
} else {
pi_new = polling_island_merge(item->pi, bag->pi, &error);
GRPC_POLLING_TRACE(
"add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, "
"bag(%s): %p)",
- (void *)pi_new, poll_obj_string(item_type), (void *)item,
- poll_obj_string(bag_type), (void *)bag);
+ (void*)pi_new, poll_obj_string(item_type), (void*)item,
+ poll_obj_string(bag_type), (void*)bag);
}
/* At this point, pi_new is the polling island that both item->pi and bag->pi
@@ -1550,8 +1551,8 @@ retry:
GPR_TIMER_END("add_poll_object", 0);
}
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {
add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
POLL_OBJ_FD);
}
@@ -1560,8 +1561,8 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
* Pollset-set Definitions
*/
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pss = (grpc_pollset_set *)gpr_malloc(sizeof(*pss));
+static grpc_pollset_set* pollset_set_create(void) {
+ grpc_pollset_set* pss = (grpc_pollset_set*)gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifndef NDEBUG
@@ -1570,8 +1571,8 @@ static grpc_pollset_set *pollset_set_create(void) {
return pss;
}
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss) {
+static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss) {
gpr_mu_destroy(&pss->po.mu);
if (pss->po.pi != NULL) {
@@ -1581,45 +1582,45 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(pss);
}
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
POLL_OBJ_FD);
}
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
- grpc_fd *fd) {
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
+ grpc_fd* fd) {
/* Nothing to do */
}
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
POLL_OBJ_POLLSET);
}
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pss, grpc_pollset *ps) {
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pss, grpc_pollset* ps) {
/* Nothing to do */
}
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
POLL_OBJ_POLLSET_SET);
}
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
/* Nothing to do */
}
/* Test helper functions
* */
-void *grpc_fd_get_polling_island(grpc_fd *fd) {
- polling_island *pi;
+void* grpc_fd_get_polling_island(grpc_fd* fd) {
+ polling_island* pi;
gpr_mu_lock(&fd->po.mu);
pi = fd->po.pi;
@@ -1628,8 +1629,8 @@ void *grpc_fd_get_polling_island(grpc_fd *fd) {
return pi;
}
-void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
- polling_island *pi;
+void* grpc_pollset_get_polling_island(grpc_pollset* ps) {
+ polling_island* pi;
gpr_mu_lock(&ps->po.mu);
pi = ps->po.pi;
@@ -1638,9 +1639,9 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
return pi;
}
-bool grpc_are_polling_islands_equal(void *p, void *q) {
- polling_island *p1 = (polling_island *)p;
- polling_island *p2 = (polling_island *)q;
+bool grpc_are_polling_islands_equal(void* p, void* q) {
+ polling_island* p1 = (polling_island*)p;
+ polling_island* p2 = (polling_island*)q;
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */
@@ -1706,7 +1707,7 @@ static bool is_epoll_available() {
return true;
}
-const grpc_event_engine_vtable *grpc_init_epollsig_linux(
+const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
/* If use of signals is disabled, we cannot use epoll engine*/
if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
@@ -1748,7 +1749,7 @@ const grpc_event_engine_vtable *grpc_init_epollsig_linux(
#include "src/core/lib/iomgr/ev_epollsig_linux.h"
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
* NULL */
-const grpc_event_engine_vtable *grpc_init_epollsig_linux(
+const grpc_event_engine_vtable* grpc_init_epollsig_linux(
bool explicit_request) {
return NULL;
}
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.h b/src/core/lib/iomgr/ev_epollsig_linux.h
index c04ff27400..ca68595734 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.h
+++ b/src/core/lib/iomgr/ev_epollsig_linux.h
@@ -26,12 +26,12 @@
extern "C" {
#endif
-const grpc_event_engine_vtable *grpc_init_epollsig_linux(bool explicit_request);
+const grpc_event_engine_vtable* grpc_init_epollsig_linux(bool explicit_request);
#ifdef GRPC_LINUX_EPOLL
-void *grpc_fd_get_polling_island(grpc_fd *fd);
-void *grpc_pollset_get_polling_island(grpc_pollset *ps);
-bool grpc_are_polling_islands_equal(void *p, void *q);
+void* grpc_fd_get_polling_island(grpc_fd* fd);
+void* grpc_pollset_get_polling_island(grpc_pollset* ps);
+bool grpc_are_polling_islands_equal(void* p, void* q);
#endif /* defined(GRPC_LINUX_EPOLL) */
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 036a35690c..554a438e6a 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -45,17 +45,17 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/murmur_hash.h"
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
+#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
/*******************************************************************************
* FD declarations
*/
typedef struct grpc_fd_watcher {
- struct grpc_fd_watcher *next;
- struct grpc_fd_watcher *prev;
- grpc_pollset *pollset;
- grpc_pollset_worker *worker;
- grpc_fd *fd;
+ struct grpc_fd_watcher* next;
+ struct grpc_fd_watcher* prev;
+ grpc_pollset* pollset;
+ grpc_pollset_worker* worker;
+ grpc_fd* fd;
} grpc_fd_watcher;
struct grpc_fd {
@@ -71,7 +71,7 @@ struct grpc_fd {
int shutdown;
int closed;
int released;
- grpc_error *shutdown_error;
+ grpc_error* shutdown_error;
/* The watcher list.
@@ -96,18 +96,18 @@ struct grpc_fd {
the inactive pollers may be kicked out of their poll loops to take
that responsibility. */
grpc_fd_watcher inactive_watcher_root;
- grpc_fd_watcher *read_watcher;
- grpc_fd_watcher *write_watcher;
+ grpc_fd_watcher* read_watcher;
+ grpc_fd_watcher* write_watcher;
- grpc_closure *read_closure;
- grpc_closure *write_closure;
+ grpc_closure* read_closure;
+ grpc_closure* write_closure;
- grpc_closure *on_done_closure;
+ grpc_closure* on_done_closure;
grpc_iomgr_object iomgr_object;
/* The pollset that last noticed and notified that the fd is readable */
- grpc_pollset *read_notifier_pollset;
+ grpc_pollset* read_notifier_pollset;
};
/* Begin polling on an fd.
@@ -121,35 +121,35 @@ struct grpc_fd {
Polling strategies that do not need to alter their behavior depending on the
fd's current interest (such as epoll) do not need to call this function.
MUST NOT be called with a pollset lock taken */
-static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- grpc_pollset_worker *worker, uint32_t read_mask,
- uint32_t write_mask, grpc_fd_watcher *rec);
+static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset,
+ grpc_pollset_worker* worker, uint32_t read_mask,
+ uint32_t write_mask, grpc_fd_watcher* rec);
/* Complete polling previously started with fd_begin_poll
MUST NOT be called with a pollset lock taken
if got_read or got_write are 1, also does the become_{readable,writable} as
appropriate. */
-static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
+static void fd_end_poll(grpc_exec_ctx* exec_ctx, grpc_fd_watcher* rec,
int got_read, int got_write,
- grpc_pollset *read_notifier_pollset);
+ grpc_pollset* read_notifier_pollset);
/* Return 1 if this fd is orphaned, 0 otherwise */
-static bool fd_is_orphaned(grpc_fd *fd);
+static bool fd_is_orphaned(grpc_fd* fd);
#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+static void fd_ref(grpc_fd* fd, const char* reason, const char* file, int line);
+static void fd_unref(grpc_fd* fd, const char* reason, const char* file,
int line);
#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
#else
-static void fd_ref(grpc_fd *fd);
-static void fd_unref(grpc_fd *fd);
+static void fd_ref(grpc_fd* fd);
+static void fd_unref(grpc_fd* fd);
#define GRPC_FD_REF(fd, reason) fd_ref(fd)
#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
#endif
-#define CLOSURE_NOT_READY ((grpc_closure *)0)
-#define CLOSURE_READY ((grpc_closure *)1)
+#define CLOSURE_NOT_READY ((grpc_closure*)0)
+#define CLOSURE_READY ((grpc_closure*)1)
/*******************************************************************************
* pollset declarations
@@ -157,15 +157,15 @@ static void fd_unref(grpc_fd *fd);
typedef struct grpc_cached_wakeup_fd {
grpc_wakeup_fd fd;
- struct grpc_cached_wakeup_fd *next;
+ struct grpc_cached_wakeup_fd* next;
} grpc_cached_wakeup_fd;
struct grpc_pollset_worker {
- grpc_cached_wakeup_fd *wakeup_fd;
+ grpc_cached_wakeup_fd* wakeup_fd;
int reevaluate_polling_on_wakeup;
int kicked_specifically;
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
+ struct grpc_pollset_worker* next;
+ struct grpc_pollset_worker* prev;
};
struct grpc_pollset {
@@ -174,23 +174,23 @@ struct grpc_pollset {
int shutting_down;
int called_shutdown;
int kicked_without_pollers;
- grpc_closure *shutdown_done;
+ grpc_closure* shutdown_done;
grpc_closure_list idle_jobs;
int pollset_set_count;
/* all polled fds */
size_t fd_count;
size_t fd_capacity;
- grpc_fd **fds;
+ grpc_fd** fds;
/* Local cache of eventfds for workers */
- grpc_cached_wakeup_fd *local_wakeup_cache;
+ grpc_cached_wakeup_fd* local_wakeup_cache;
};
/* Add an fd to a pollset */
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd);
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd);
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd);
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
/* Convert a timespec to milliseconds:
- very small or negative poll times are clamped to zero to do a
@@ -199,7 +199,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
grpc_millis deadline);
/* Allow kick to wakeup the currently polling worker */
@@ -208,13 +208,13 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
-static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
- grpc_pollset_worker *specific_worker,
+static grpc_error* pollset_kick_ext(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+ grpc_pollset_worker* specific_worker,
uint32_t flags) GRPC_MUST_USE_RESULT;
/* Return 1 if the pollset has active threads in pollset_work (pollset must
* be locked) */
-static bool pollset_has_workers(grpc_pollset *pollset);
+static bool pollset_has_workers(grpc_pollset* pollset);
/*******************************************************************************
* pollset_set definitions
@@ -225,15 +225,15 @@ struct grpc_pollset_set {
size_t pollset_count;
size_t pollset_capacity;
- grpc_pollset **pollsets;
+ grpc_pollset** pollsets;
size_t pollset_set_count;
size_t pollset_set_capacity;
- struct grpc_pollset_set **pollset_sets;
+ struct grpc_pollset_set** pollset_sets;
size_t fd_count;
size_t fd_capacity;
- grpc_fd **fds;
+ grpc_fd** fds;
};
/*******************************************************************************
@@ -246,9 +246,9 @@ struct grpc_pollset_set {
typedef struct poll_result {
gpr_refcount refcount;
- cv_node *watchers;
+ cv_node* watchers;
int watchcount;
- struct pollfd *fds;
+ struct pollfd* fds;
nfds_t nfds;
int retval;
int err;
@@ -258,11 +258,11 @@ typedef struct poll_result {
typedef struct poll_args {
gpr_cv trigger;
int trigger_set;
- struct pollfd *fds;
+ struct pollfd* fds;
nfds_t nfds;
- poll_result *result;
- struct poll_args *next;
- struct poll_args *prev;
+ poll_result* result;
+ struct poll_args* next;
+ struct poll_args* prev;
} poll_args;
// This is a 2-tiered cache, we mantain a hash table
@@ -270,8 +270,8 @@ typedef struct poll_args {
// of that call. We also maintain a freelist of inactive
// poll threads.
typedef struct poll_hash_table {
- poll_args *free_pollers;
- poll_args **active_pollers;
+ poll_args* free_pollers;
+ poll_args** active_pollers;
unsigned int size;
unsigned int count;
} poll_hash_table;
@@ -286,7 +286,7 @@ cv_fd_table g_cvfds;
#ifndef NDEBUG
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
-static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
@@ -297,13 +297,13 @@ static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
-static void ref_by(grpc_fd *fd, int n) {
+static void ref_by(grpc_fd* fd, int n) {
#endif
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
#ifndef NDEBUG
-static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
+static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
@@ -312,7 +312,7 @@ static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
}
#else
-static void unref_by(grpc_fd *fd, int n) {
+static void unref_by(grpc_fd* fd, int n) {
#endif
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
@@ -325,8 +325,8 @@ static void unref_by(grpc_fd *fd, int n) {
}
}
-static grpc_fd *fd_create(int fd, const char *name) {
- grpc_fd *r = (grpc_fd *)gpr_malloc(sizeof(*r));
+static grpc_fd* fd_create(int fd, const char* name) {
+ grpc_fd* r = (grpc_fd*)gpr_malloc(sizeof(*r));
gpr_mu_init(&r->mu);
gpr_atm_rel_store(&r->refst, 1);
r->shutdown = 0;
@@ -341,21 +341,21 @@ static grpc_fd *fd_create(int fd, const char *name) {
r->released = 0;
r->read_notifier_pollset = NULL;
- char *name2;
+ char* name2;
gpr_asprintf(&name2, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&r->iomgr_object, name2);
gpr_free(name2);
return r;
}
-static bool fd_is_orphaned(grpc_fd *fd) {
+static bool fd_is_orphaned(grpc_fd* fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
}
/* Return the read-notifier pollset */
-static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
- grpc_pollset *notifier = NULL;
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
+ grpc_pollset* notifier = NULL;
gpr_mu_lock(&fd->mu);
notifier = fd->read_notifier_pollset;
@@ -364,19 +364,19 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
return notifier;
}
-static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx,
- grpc_fd_watcher *watcher) {
+static grpc_error* pollset_kick_locked(grpc_exec_ctx* exec_ctx,
+ grpc_fd_watcher* watcher) {
gpr_mu_lock(&watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
- grpc_error *err =
+ grpc_error* err =
pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
return err;
}
-static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd) {
+static void maybe_wake_one_watcher_locked(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
@@ -386,8 +386,8 @@ static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx,
}
}
-static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
- grpc_fd_watcher *watcher;
+static void wake_all_watchers_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
+ grpc_fd_watcher* watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
pollset_kick_locked(exec_ctx, watcher);
@@ -400,12 +400,12 @@ static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
}
}
-static int has_watchers(grpc_fd *fd) {
+static int has_watchers(grpc_fd* fd) {
return fd->read_watcher != NULL || fd->write_watcher != NULL ||
fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
}
-static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
+static void close_fd_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
fd->closed = 1;
if (!fd->released) {
close(fd->fd);
@@ -413,7 +413,7 @@ static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
}
-static int fd_wrapped_fd(grpc_fd *fd) {
+static int fd_wrapped_fd(grpc_fd* fd) {
if (fd->released || fd->closed) {
return -1;
} else {
@@ -421,9 +421,9 @@ static int fd_wrapped_fd(grpc_fd *fd) {
}
}
-static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *on_done, int *release_fd,
- bool already_closed, const char *reason) {
+static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* on_done, int* release_fd,
+ bool already_closed, const char* reason) {
fd->on_done_closure = on_done;
fd->released = release_fd != NULL;
if (release_fd != NULL) {
@@ -445,22 +445,22 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
/* increment refcount by two to avoid changing the orphan bit */
#ifndef NDEBUG
-static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
+static void fd_ref(grpc_fd* fd, const char* reason, const char* file,
int line) {
ref_by(fd, 2, reason, file, line);
}
-static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
+static void fd_unref(grpc_fd* fd, const char* reason, const char* file,
int line) {
unref_by(fd, 2, reason, file, line);
}
#else
-static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
+static void fd_ref(grpc_fd* fd) { ref_by(fd, 2); }
-static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
+static void fd_unref(grpc_fd* fd) { unref_by(fd, 2); }
#endif
-static grpc_error *fd_shutdown_error(grpc_fd *fd) {
+static grpc_error* fd_shutdown_error(grpc_fd* fd) {
if (!fd->shutdown) {
return GRPC_ERROR_NONE;
} else {
@@ -469,8 +469,8 @@ static grpc_error *fd_shutdown_error(grpc_fd *fd) {
}
}
-static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st, grpc_closure *closure) {
+static void notify_on_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure** st, grpc_closure* closure) {
if (fd->shutdown) {
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
@@ -492,8 +492,8 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
/* returns 1 if state becomes not ready */
-static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure **st) {
+static int set_ready_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure** st) {
if (*st == CLOSURE_READY) {
/* duplicate ready ==> ignore */
return 0;
@@ -510,11 +510,11 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
static void set_read_notifier_pollset_locked(
- grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) {
+ grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_pollset* read_notifier_pollset) {
fd->read_notifier_pollset = read_notifier_pollset;
}
-static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
+static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
gpr_mu_lock(&fd->mu);
/* only shutdown once */
if (!fd->shutdown) {
@@ -530,32 +530,32 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
gpr_mu_unlock(&fd->mu);
}
-static bool fd_is_shutdown(grpc_fd *fd) {
+static bool fd_is_shutdown(grpc_fd* fd) {
gpr_mu_lock(&fd->mu);
bool r = fd->shutdown;
gpr_mu_unlock(&fd->mu);
return r;
}
-static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
gpr_mu_lock(&fd->mu);
notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
gpr_mu_unlock(&fd->mu);
}
-static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
gpr_mu_lock(&fd->mu);
notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
gpr_mu_unlock(&fd->mu);
}
-static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
- grpc_pollset_worker *worker, uint32_t read_mask,
- uint32_t write_mask, grpc_fd_watcher *watcher) {
+static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset,
+ grpc_pollset_worker* worker, uint32_t read_mask,
+ uint32_t write_mask, grpc_fd_watcher* watcher) {
uint32_t mask = 0;
- grpc_closure *cur;
+ grpc_closure* cur;
int requested;
/* keep track of pollers that have requested our events, in case they change
*/
@@ -602,12 +602,12 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
return mask;
}
-static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
+static void fd_end_poll(grpc_exec_ctx* exec_ctx, grpc_fd_watcher* watcher,
int got_read, int got_write,
- grpc_pollset *read_notifier_pollset) {
+ grpc_pollset* read_notifier_pollset) {
int was_polling = 0;
int kick = 0;
- grpc_fd *fd = watcher->fd;
+ grpc_fd* fd = watcher->fd;
if (fd == NULL) {
return;
@@ -667,26 +667,26 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
GPR_TLS_DECL(g_current_thread_poller);
GPR_TLS_DECL(g_current_thread_worker);
-static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+static void remove_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
}
-static bool pollset_has_workers(grpc_pollset *p) {
+static bool pollset_has_workers(grpc_pollset* p) {
return p->root_worker.next != &p->root_worker;
}
-static bool pollset_in_pollset_sets(grpc_pollset *p) {
+static bool pollset_in_pollset_sets(grpc_pollset* p) {
return p->pollset_set_count;
}
-static bool pollset_has_observers(grpc_pollset *p) {
+static bool pollset_has_observers(grpc_pollset* p) {
return pollset_has_workers(p) || pollset_in_pollset_sets(p);
}
-static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
+static grpc_pollset_worker* pop_front_worker(grpc_pollset* p) {
if (pollset_has_workers(p)) {
- grpc_pollset_worker *w = p->root_worker.next;
+ grpc_pollset_worker* w = p->root_worker.next;
remove_worker(p, w);
return w;
} else {
@@ -694,19 +694,19 @@ static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
}
}
-static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+static void push_back_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
worker->next = &p->root_worker;
worker->prev = worker->next->prev;
worker->prev->next = worker->next->prev = worker;
}
-static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
+static void push_front_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
worker->prev = &p->root_worker;
worker->next = worker->prev->next;
worker->prev->next = worker->next->prev = worker;
}
-static void kick_append_error(grpc_error **composite, grpc_error *error) {
+static void kick_append_error(grpc_error** composite, grpc_error* error) {
if (error == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Kick Failure");
@@ -714,11 +714,11 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
- grpc_pollset_worker *specific_worker,
+static grpc_error* pollset_kick_ext(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+ grpc_pollset_worker* specific_worker,
uint32_t flags) {
GPR_TIMER_BEGIN("pollset_kick_ext", 0);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
/* pollset->mu already held */
@@ -785,14 +785,14 @@ static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
return error;
}
-static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
+static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+ grpc_pollset_worker* specific_worker) {
return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
}
/* global state management */
-static grpc_error *pollset_global_init(void) {
+static grpc_error* pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller);
gpr_tls_init(&g_current_thread_worker);
return GRPC_ERROR_NONE;
@@ -805,7 +805,7 @@ static void pollset_global_shutdown(void) {
/* main interface */
-static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
@@ -821,11 +821,11 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
pollset->pollset_set_count = 0;
}
-static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GPR_ASSERT(!pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
while (pollset->local_wakeup_cache) {
- grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
+ grpc_cached_wakeup_fd* next = pollset->local_wakeup_cache->next;
grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
gpr_free(pollset->local_wakeup_cache);
pollset->local_wakeup_cache = next;
@@ -834,8 +834,8 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
-static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_fd *fd) {
+static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_fd* fd) {
gpr_mu_lock(&pollset->mu);
size_t i;
/* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
@@ -845,8 +845,8 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->fd_count == pollset->fd_capacity) {
pollset->fd_capacity =
GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
- pollset->fds = (grpc_fd **)gpr_realloc(
- pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
+ pollset->fds = (grpc_fd**)gpr_realloc(
+ pollset->fds, sizeof(grpc_fd*) * pollset->fd_capacity);
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
@@ -855,7 +855,7 @@ exit:
gpr_mu_unlock(&pollset->mu);
}
-static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
size_t i;
for (i = 0; i < pollset->fd_count; i++) {
@@ -865,7 +865,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
}
-static void work_combine_error(grpc_error **composite, grpc_error *error) {
+static void work_combine_error(grpc_error** composite, grpc_error* error) {
if (error == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("pollset_work");
@@ -873,12 +873,12 @@ static void work_combine_error(grpc_error **composite, grpc_error *error) {
*composite = grpc_error_add_child(*composite, error);
}
-static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
+static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
/* Avoid malloc for small number of elements. */
enum { inline_elements = 96 };
@@ -899,7 +899,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
worker.wakeup_fd =
- (grpc_cached_wakeup_fd *)gpr_malloc(sizeof(*worker.wakeup_fd));
+ (grpc_cached_wakeup_fd*)gpr_malloc(sizeof(*worker.wakeup_fd));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
@@ -941,8 +941,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
int r;
size_t i, fd_count;
nfds_t pfd_count;
- grpc_fd_watcher *watchers;
- struct pollfd *pfds;
+ grpc_fd_watcher* watchers;
+ struct pollfd* pfds;
timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
@@ -953,9 +953,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* Allocate one buffer to hold both pfds and watchers arrays */
const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2);
const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2);
- void *buf = gpr_malloc(pfd_size + watch_size);
- pfds = (struct pollfd *)buf;
- watchers = (grpc_fd_watcher *)(void *)((char *)buf + pfd_size);
+ void* buf = gpr_malloc(pfd_size + watch_size);
+ pfds = (struct pollfd*)buf;
+ watchers = (grpc_fd_watcher*)(void*)((char*)buf + pfd_size);
}
fd_count = 0;
@@ -979,7 +979,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_mu_unlock(&pollset->mu);
for (i = 1; i < pfd_count; i++) {
- grpc_fd *fd = watchers[i].fd;
+ grpc_fd* fd = watchers[i].fd;
pfds[i].events = (short)fd_begin_poll(fd, pollset, &worker, POLLIN,
POLLOUT, &watchers[i]);
GRPC_FD_UNREF(fd, "multipoller_start");
@@ -1107,8 +1107,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return error;
}
-static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1;
pollset->shutdown_done = closure;
@@ -1122,7 +1122,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
grpc_millis deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) return -1;
if (deadline == 0) return 0;
@@ -1136,22 +1136,22 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
* pollset_set_posix.c
*/
-static grpc_pollset_set *pollset_set_create(void) {
- grpc_pollset_set *pollset_set =
- (grpc_pollset_set *)gpr_zalloc(sizeof(*pollset_set));
+static grpc_pollset_set* pollset_set_create(void) {
+ grpc_pollset_set* pollset_set =
+ (grpc_pollset_set*)gpr_zalloc(sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
-static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set) {
+static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
}
for (i = 0; i < pollset_set->pollset_count; i++) {
- grpc_pollset *pollset = pollset_set->pollsets[i];
+ grpc_pollset* pollset = pollset_set->pollsets[i];
gpr_mu_lock(&pollset->mu);
pollset->pollset_set_count--;
/* check shutdown */
@@ -1170,9 +1170,9 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(pollset_set);
}
-static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
+static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {
size_t i, j;
gpr_mu_lock(&pollset->mu);
pollset->pollset_set_count++;
@@ -1181,7 +1181,7 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
- pollset_set->pollsets = (grpc_pollset **)gpr_realloc(
+ pollset_set->pollsets = (grpc_pollset**)gpr_realloc(
pollset_set->pollsets,
pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets));
}
@@ -1198,15 +1198,15 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
-static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
+static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->pollset_count; i++) {
if (pollset_set->pollsets[i] == pollset) {
pollset_set->pollset_count--;
- GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
+ GPR_SWAP(grpc_pollset*, pollset_set->pollsets[i],
pollset_set->pollsets[pollset_set->pollset_count]);
break;
}
@@ -1225,14 +1225,14 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
}
}
-static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
+static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
size_t i, j;
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
- bag->pollset_sets = (grpc_pollset_set **)gpr_realloc(
+ bag->pollset_sets = (grpc_pollset_set**)gpr_realloc(
bag->pollset_sets,
bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
@@ -1249,15 +1249,15 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&bag->mu);
}
-static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
+static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
size_t i;
gpr_mu_lock(&bag->mu);
for (i = 0; i < bag->pollset_set_count; i++) {
if (bag->pollset_sets[i] == item) {
bag->pollset_set_count--;
- GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
+ GPR_SWAP(grpc_pollset_set*, bag->pollset_sets[i],
bag->pollset_sets[bag->pollset_set_count]);
break;
}
@@ -1265,13 +1265,13 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&bag->mu);
}
-static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
+static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
- pollset_set->fds = (grpc_fd **)gpr_realloc(
+ pollset_set->fds = (grpc_fd**)gpr_realloc(
pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
}
GRPC_FD_REF(fd, "pollset_set");
@@ -1285,14 +1285,14 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
-static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
+static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
size_t i;
gpr_mu_lock(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
if (pollset_set->fds[i] == fd) {
pollset_set->fd_count--;
- GPR_SWAP(grpc_fd *, pollset_set->fds[i],
+ GPR_SWAP(grpc_fd*, pollset_set->fds[i],
pollset_set->fds[pollset_set->fd_count]);
GRPC_FD_UNREF(fd, "pollset_set");
break;
@@ -1308,10 +1308,10 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
* Condition Variable polling extensions
*/
-static void run_poll(void *args);
-static void cache_poller_locked(poll_args *args);
+static void run_poll(void* args);
+static void cache_poller_locked(poll_args* args);
-static void cache_insert_locked(poll_args *args) {
+static void cache_insert_locked(poll_args* args) {
uint32_t key = gpr_murmur_hash3(args->fds, args->nfds * sizeof(struct pollfd),
0xDEADBEEF);
key = key % poll_cache.size;
@@ -1324,13 +1324,13 @@ static void cache_insert_locked(poll_args *args) {
poll_cache.count++;
}
-static void init_result(poll_args *pargs) {
- pargs->result = (poll_result *)gpr_malloc(sizeof(poll_result));
+static void init_result(poll_args* pargs) {
+ pargs->result = (poll_result*)gpr_malloc(sizeof(poll_result));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = NULL;
pargs->result->watchcount = 0;
pargs->result->fds =
- (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
+ (struct pollfd*)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
@@ -1340,11 +1340,11 @@ static void init_result(poll_args *pargs) {
// Creates a poll_args object for a given arguments to poll().
// This object may return a poll_args in the cache.
-static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
+static poll_args* get_poller_locked(struct pollfd* fds, nfds_t count) {
uint32_t key =
gpr_murmur_hash3(fds, count * sizeof(struct pollfd), 0xDEADBEEF);
key = key % poll_cache.size;
- poll_args *curr = poll_cache.active_pollers[key];
+ poll_args* curr = poll_cache.active_pollers[key];
while (curr) {
if (curr->nfds == count &&
memcmp(curr->fds, fds, count * sizeof(struct pollfd)) == 0) {
@@ -1355,7 +1355,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
}
if (poll_cache.free_pollers) {
- poll_args *pargs = poll_cache.free_pollers;
+ poll_args* pargs = poll_cache.free_pollers;
poll_cache.free_pollers = pargs->next;
if (poll_cache.free_pollers) {
poll_cache.free_pollers->prev = NULL;
@@ -1369,7 +1369,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
return pargs;
}
- poll_args *pargs = (poll_args *)gpr_malloc(sizeof(struct poll_args));
+ poll_args* pargs = (poll_args*)gpr_malloc(sizeof(struct poll_args));
gpr_cv_init(&pargs->trigger);
pargs->fds = fds;
pargs->nfds = count;
@@ -1386,7 +1386,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
return pargs;
}
-static void cache_delete_locked(poll_args *args) {
+static void cache_delete_locked(poll_args* args) {
if (!args->prev) {
uint32_t key = gpr_murmur_hash3(
args->fds, args->nfds * sizeof(struct pollfd), 0xDEADBEEF);
@@ -1411,19 +1411,19 @@ static void cache_delete_locked(poll_args *args) {
poll_cache.free_pollers = args;
}
-static void cache_poller_locked(poll_args *args) {
+static void cache_poller_locked(poll_args* args) {
if (poll_cache.count + 1 > poll_cache.size / 2) {
- poll_args **old_active_pollers = poll_cache.active_pollers;
+ poll_args** old_active_pollers = poll_cache.active_pollers;
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
poll_cache.active_pollers =
- (poll_args **)gpr_malloc(sizeof(void *) * poll_cache.size);
+ (poll_args**)gpr_malloc(sizeof(void*) * poll_cache.size);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
for (unsigned int i = 0; i < poll_cache.size / 2; i++) {
- poll_args *curr = old_active_pollers[i];
- poll_args *next = NULL;
+ poll_args* curr = old_active_pollers[i];
+ poll_args* next = NULL;
while (curr) {
next = curr->next;
cache_insert_locked(curr);
@@ -1436,7 +1436,7 @@ static void cache_poller_locked(poll_args *args) {
cache_insert_locked(args);
}
-static void cache_destroy_locked(poll_args *args) {
+static void cache_destroy_locked(poll_args* args) {
if (args->next) {
args->next->prev = args->prev;
}
@@ -1450,7 +1450,7 @@ static void cache_destroy_locked(poll_args *args) {
gpr_free(args);
}
-static void decref_poll_result(poll_result *res) {
+static void decref_poll_result(poll_result* res) {
if (gpr_unref(&res->refcount)) {
GPR_ASSERT(!res->watchers);
gpr_free(res->fds);
@@ -1458,7 +1458,7 @@ static void decref_poll_result(poll_result *res) {
}
}
-void remove_cvn(cv_node **head, cv_node *target) {
+void remove_cvn(cv_node** head, cv_node* target) {
if (target->next) {
target->next->prev = target->prev;
}
@@ -1473,17 +1473,17 @@ void remove_cvn(cv_node **head, cv_node *target) {
gpr_timespec thread_grace;
// Poll in a background thread
-static void run_poll(void *args) {
- poll_args *pargs = (poll_args *)args;
+static void run_poll(void* args) {
+ poll_args* pargs = (poll_args*)args;
while (1) {
- poll_result *result = pargs->result;
+ poll_result* result = pargs->result;
int retval = g_cvfds.poll(result->fds, result->nfds, CV_POLL_PERIOD_MS);
gpr_mu_lock(&g_cvfds.mu);
if (retval != 0) {
result->completed = 1;
result->retval = retval;
result->err = errno;
- cv_node *watcher = result->watchers;
+ cv_node* watcher = result->watchers;
while (watcher) {
gpr_cv_signal(watcher->cv);
watcher = watcher->next;
@@ -1514,20 +1514,20 @@ static void run_poll(void *args) {
}
// This function overrides poll() to handle condition variable wakeup fds
-static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
+static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) {
unsigned int i;
int res, idx;
- cv_node *pollcv;
+ cv_node* pollcv;
int skip_poll = 0;
nfds_t nsockfds = 0;
- poll_result *result = NULL;
+ poll_result* result = NULL;
gpr_mu_lock(&g_cvfds.mu);
- pollcv = (cv_node *)gpr_malloc(sizeof(cv_node));
+ pollcv = (cv_node*)gpr_malloc(sizeof(cv_node));
pollcv->next = NULL;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
- cv_node *fd_cvs = (cv_node *)gpr_malloc(nfds * sizeof(cv_node));
+ cv_node* fd_cvs = (cv_node*)gpr_malloc(nfds * sizeof(cv_node));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
@@ -1559,8 +1559,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
res = 0;
if (!skip_poll && nsockfds > 0) {
- struct pollfd *pollfds =
- (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * nsockfds);
+ struct pollfd* pollfds =
+ (struct pollfd*)gpr_malloc(sizeof(struct pollfd) * nsockfds);
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
@@ -1570,7 +1570,7 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
idx++;
}
}
- poll_args *pargs = get_poller_locked(pollfds, nsockfds);
+ poll_args* pargs = get_poller_locked(pollfds, nsockfds);
result = pargs->result;
pollcv->next = result->watchers;
pollcv->prev = NULL;
@@ -1623,8 +1623,7 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
- g_cvfds.cvfds =
- (fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+ g_cvfds.cvfds = (fd_node*)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@@ -1641,7 +1640,7 @@ static void global_cv_fd_table_init() {
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = NULL;
- poll_cache.active_pollers = (poll_args **)gpr_malloc(sizeof(void *) * 32);
+ poll_cache.active_pollers = (poll_args**)gpr_malloc(sizeof(void*) * 32);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@@ -1711,7 +1710,7 @@ static const grpc_event_engine_vtable vtable = {
shutdown_engine,
};
-const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {
+const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request) {
if (!grpc_has_wakeup_fd()) {
return NULL;
}
@@ -1721,7 +1720,7 @@ const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) {
return &vtable;
}
-const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request) {
+const grpc_event_engine_vtable* grpc_init_poll_cv_posix(bool explicit_request) {
global_cv_fd_table_init();
grpc_enable_cv_wakeup_fds(1);
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
diff --git a/src/core/lib/iomgr/ev_poll_posix.h b/src/core/lib/iomgr/ev_poll_posix.h
index 861257204b..626e95bc8f 100644
--- a/src/core/lib/iomgr/ev_poll_posix.h
+++ b/src/core/lib/iomgr/ev_poll_posix.h
@@ -25,8 +25,8 @@
extern "C" {
#endif
-const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request);
-const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request);
+const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request);
+const grpc_event_engine_vtable* grpc_init_poll_cv_posix(bool explicit_request);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 677ee675a6..f72f5088f0 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -50,14 +50,14 @@ grpc_poll_function_type grpc_poll_function = poll;
grpc_wakeup_fd grpc_global_wakeup_fd;
-static const grpc_event_engine_vtable *g_event_engine;
-static const char *g_poll_strategy_name = NULL;
+static const grpc_event_engine_vtable* g_event_engine;
+static const char* g_poll_strategy_name = NULL;
-typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)(
+typedef const grpc_event_engine_vtable* (*event_engine_factory_fn)(
bool explicit_request);
typedef struct {
- const char *name;
+ const char* name;
event_engine_factory_fn factory;
} event_engine_factory;
@@ -78,7 +78,7 @@ int dummy_poll(struct pollfd fds[], nfds_t nfds, int timeout) {
}
} // extern "C"
-const grpc_event_engine_vtable *init_non_polling(bool explicit_request) {
+const grpc_event_engine_vtable* init_non_polling(bool explicit_request) {
if (!explicit_request) {
return nullptr;
}
@@ -97,23 +97,23 @@ static const event_engine_factory g_factories[] = {
{"poll-cv", grpc_init_poll_cv_posix}, {"none", init_non_polling},
};
-static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
+static void add(const char* beg, const char* end, char*** ss, size_t* ns) {
size_t n = *ns;
size_t np = n + 1;
- char *s;
+ char* s;
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
- s = (char *)gpr_malloc(len + 1);
+ s = (char*)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
- *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
+ *ss = (char**)gpr_realloc(*ss, sizeof(char**) * np);
(*ss)[n] = s;
*ns = np;
}
-static void split(const char *s, char ***ss, size_t *ns) {
- const char *c = strchr(s, ',');
+static void split(const char* s, char*** ss, size_t* ns) {
+ const char* c = strchr(s, ',');
if (c == NULL) {
add(s, s + strlen(s), ss, ns);
} else {
@@ -122,11 +122,11 @@ static void split(const char *s, char ***ss, size_t *ns) {
}
}
-static bool is(const char *want, const char *have) {
+static bool is(const char* want, const char* have) {
return 0 == strcmp(want, "all") || 0 == strcmp(want, have);
}
-static void try_engine(const char *engine) {
+static void try_engine(const char* engine) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) {
if (is(engine, g_factories[i].name)) {
if ((g_event_engine = g_factories[i].factory(
@@ -141,26 +141,26 @@ static void try_engine(const char *engine) {
/* This should be used for testing purposes ONLY */
void grpc_set_event_engine_test_only(
- const grpc_event_engine_vtable *ev_engine) {
+ const grpc_event_engine_vtable* ev_engine) {
g_event_engine = ev_engine;
}
-const grpc_event_engine_vtable *grpc_get_event_engine_test_only() {
+const grpc_event_engine_vtable* grpc_get_event_engine_test_only() {
return g_event_engine;
}
/* Call this only after calling grpc_event_engine_init() */
-const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
+const char* grpc_get_poll_strategy_name() { return g_poll_strategy_name; }
void grpc_event_engine_init(void) {
grpc_register_tracer(&grpc_polling_trace);
- char *s = gpr_getenv("GRPC_POLL_STRATEGY");
+ char* s = gpr_getenv("GRPC_POLL_STRATEGY");
if (s == NULL) {
s = gpr_strdup("all");
}
- char **strings = NULL;
+ char** strings = NULL;
size_t nstrings = 0;
split(s, &strings, &nstrings);
@@ -185,109 +185,109 @@ void grpc_event_engine_shutdown(void) {
g_event_engine = NULL;
}
-grpc_fd *grpc_fd_create(int fd, const char *name) {
+grpc_fd* grpc_fd_create(int fd, const char* name) {
return g_event_engine->fd_create(fd, name);
}
-int grpc_fd_wrapped_fd(grpc_fd *fd) {
+int grpc_fd_wrapped_fd(grpc_fd* fd) {
return g_event_engine->fd_wrapped_fd(fd);
}
-void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
- int *release_fd, bool already_closed, const char *reason) {
+void grpc_fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
+ int* release_fd, bool already_closed, const char* reason) {
g_event_engine->fd_orphan(exec_ctx, fd, on_done, release_fd, already_closed,
reason);
}
-void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
+void grpc_fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
g_event_engine->fd_shutdown(exec_ctx, fd, why);
}
-bool grpc_fd_is_shutdown(grpc_fd *fd) {
+bool grpc_fd_is_shutdown(grpc_fd* fd) {
return g_event_engine->fd_is_shutdown(fd);
}
-void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+void grpc_fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
g_event_engine->fd_notify_on_read(exec_ctx, fd, closure);
}
-void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure) {
+void grpc_fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure) {
g_event_engine->fd_notify_on_write(exec_ctx, fd, closure);
}
size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
-void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
g_event_engine->pollset_init(pollset, mu);
}
-void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
g_event_engine->pollset_shutdown(exec_ctx, pollset, closure);
}
-void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
g_event_engine->pollset_destroy(exec_ctx, pollset);
}
-grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
grpc_millis deadline) {
return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline);
}
-grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker) {
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
}
-void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd) {
+void grpc_pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd) {
g_event_engine->pollset_add_fd(exec_ctx, pollset, fd);
}
-grpc_pollset_set *grpc_pollset_set_create(void) {
+grpc_pollset_set* grpc_pollset_set_create(void) {
return g_event_engine->pollset_set_create();
}
-void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set) {
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set) {
g_event_engine->pollset_set_destroy(exec_ctx, pollset_set);
}
-void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {
g_event_engine->pollset_set_add_pollset(exec_ctx, pollset_set, pollset);
}
-void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset) {
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset) {
g_event_engine->pollset_set_del_pollset(exec_ctx, pollset_set, pollset);
}
-void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
g_event_engine->pollset_set_add_pollset_set(exec_ctx, bag, item);
}
-void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item) {
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item) {
g_event_engine->pollset_set_del_pollset_set(exec_ctx, bag, item);
}
-void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
+void grpc_pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
g_event_engine->pollset_set_add_fd(exec_ctx, pollset_set, fd);
}
-void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd) {
+void grpc_pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd) {
g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
}
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index bc4456c2a2..d719b8f3c9 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -38,50 +38,50 @@ typedef struct grpc_fd grpc_fd;
typedef struct grpc_event_engine_vtable {
size_t pollset_size;
- grpc_fd *(*fd_create)(int fd, const char *name);
- int (*fd_wrapped_fd)(grpc_fd *fd);
- void (*fd_orphan)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
- int *release_fd, bool already_closed, const char *reason);
- void (*fd_shutdown)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why);
- void (*fd_notify_on_read)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure);
- void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure);
- bool (*fd_is_shutdown)(grpc_fd *fd);
- grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd);
-
- void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu);
- void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure);
- void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
- grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker,
+ grpc_fd* (*fd_create)(int fd, const char* name);
+ int (*fd_wrapped_fd)(grpc_fd* fd);
+ void (*fd_orphan)(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
+ int* release_fd, bool already_closed, const char* reason);
+ void (*fd_shutdown)(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why);
+ void (*fd_notify_on_read)(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
+ void (*fd_notify_on_write)(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
+ bool (*fd_is_shutdown)(grpc_fd* fd);
+ grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd);
+
+ void (*pollset_init)(grpc_pollset* pollset, gpr_mu** mu);
+ void (*pollset_shutdown)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure);
+ void (*pollset_destroy)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
+ grpc_error* (*pollset_work)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
grpc_millis deadline);
- grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker);
- void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd);
-
- grpc_pollset_set *(*pollset_set_create)(void);
- void (*pollset_set_destroy)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set);
- void (*pollset_set_add_pollset)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset);
- void (*pollset_set_del_pollset)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset);
- void (*pollset_set_add_pollset_set)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item);
- void (*pollset_set_del_pollset_set)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item);
- void (*pollset_set_add_fd)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd);
- void (*pollset_set_del_fd)(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd);
+ grpc_error* (*pollset_kick)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker);
+ void (*pollset_add_fd)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd);
+
+ grpc_pollset_set* (*pollset_set_create)(void);
+ void (*pollset_set_destroy)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set);
+ void (*pollset_set_add_pollset)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset);
+ void (*pollset_set_del_pollset)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset);
+ void (*pollset_set_add_pollset_set)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item);
+ void (*pollset_set_del_pollset_set)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item);
+ void (*pollset_set_add_fd)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
+ void (*pollset_set_del_fd)(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
void (*shutdown_engine)(void);
} grpc_event_engine_vtable;
@@ -90,15 +90,15 @@ void grpc_event_engine_init(void);
void grpc_event_engine_shutdown(void);
/* Return the name of the poll strategy */
-const char *grpc_get_poll_strategy_name();
+const char* grpc_get_poll_strategy_name();
/* Create a wrapped file descriptor.
Requires fd is a non-blocking file descriptor.
This takes ownership of closing fd. */
-grpc_fd *grpc_fd_create(int fd, const char *name);
+grpc_fd* grpc_fd_create(int fd, const char* name);
/* Return the wrapped fd, or -1 if it has been released or closed. */
-int grpc_fd_wrapped_fd(grpc_fd *fd);
+int grpc_fd_wrapped_fd(grpc_fd* fd);
/* Releases fd to be asynchronously destroyed.
on_done is called when the underlying file descriptor is definitely close()d.
@@ -107,14 +107,14 @@ int grpc_fd_wrapped_fd(grpc_fd *fd);
Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write.
MUST NOT be called with a pollset lock taken */
-void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
- int *release_fd, bool already_closed, const char *reason);
+void grpc_fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
+ int* release_fd, bool already_closed, const char* reason);
/* Has grpc_fd_shutdown been called on an fd? */
-bool grpc_fd_is_shutdown(grpc_fd *fd);
+bool grpc_fd_is_shutdown(grpc_fd* fd);
/* Cause any current and future callbacks to fail. */
-void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why);
+void grpc_fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why);
/* Register read interest, causing read_cb to be called once when fd becomes
readable, on deadline specified by deadline, or on shutdown triggered by
@@ -129,38 +129,38 @@ void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why);
underlying platform. This means that users must drain fd in read_cb before
calling notify_on_read again. Users are also expected to handle spurious
events, i.e read_cb is called while nothing can be readable from fd */
-void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure);
+void grpc_fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
/* Exactly the same semantics as above, except based on writable events. */
-void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- grpc_closure *closure);
+void grpc_fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ grpc_closure* closure);
/* Return the read notifier pollset from the fd */
-grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
- grpc_fd *fd);
+grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_fd* fd);
/* pollset_posix functions */
/* Add an fd to a pollset */
-void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- struct grpc_fd *fd);
+void grpc_pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ struct grpc_fd* fd);
/* pollset_set_posix functions */
-void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd);
-void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set, grpc_fd *fd);
+void grpc_pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
+void grpc_pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set, grpc_fd* fd);
/* override to allow tests to hook poll() usage */
-typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
+typedef int (*grpc_poll_function_type)(struct pollfd*, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
/* WARNING: The following two functions should be used for testing purposes
* ONLY */
-void grpc_set_event_engine_test_only(const grpc_event_engine_vtable *);
-const grpc_event_engine_vtable *grpc_get_event_engine_test_only();
+void grpc_set_event_engine_test_only(const grpc_event_engine_vtable*);
+const grpc_event_engine_vtable* grpc_get_event_engine_test_only();
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index 0394a00f3e..0a0ed8a055 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -25,7 +25,7 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/profiling/timers.h"
-bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
+bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx* exec_ctx) {
if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) {
if (exec_ctx->check_ready_to_finish(exec_ctx,
exec_ctx->check_ready_to_finish_arg)) {
@@ -38,26 +38,26 @@ bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
}
}
-bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
+bool grpc_never_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored) {
return false;
}
-bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
+bool grpc_always_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored) {
return true;
}
-bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx) {
+bool grpc_exec_ctx_has_work(grpc_exec_ctx* exec_ctx) {
return exec_ctx->active_combiner != NULL ||
!grpc_closure_list_empty(exec_ctx->closure_list);
}
-void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
+void grpc_exec_ctx_finish(grpc_exec_ctx* exec_ctx) {
exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
grpc_exec_ctx_flush(exec_ctx);
}
-static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
+static void exec_ctx_run(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
#ifndef NDEBUG
closure->scheduled = false;
if (GRPC_TRACER_ON(grpc_trace_closure)) {
@@ -76,16 +76,16 @@ static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
GRPC_ERROR_UNREF(error);
}
-bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
+bool grpc_exec_ctx_flush(grpc_exec_ctx* exec_ctx) {
bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
for (;;) {
if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
- grpc_closure *c = exec_ctx->closure_list.head;
+ grpc_closure* c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
+ grpc_closure* next = c->next_data.next;
+ grpc_error* error = c->error_data.error;
did_something = true;
exec_ctx_run(exec_ctx, c, error);
c = next;
@@ -99,8 +99,8 @@ bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
return did_something;
}
-static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
+static void exec_ctx_sched(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
}
@@ -138,7 +138,7 @@ static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
return (gpr_atm)x;
}
-grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) {
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx* exec_ctx) {
if (!exec_ctx->now_is_valid) {
exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
exec_ctx->now_is_valid = true;
@@ -146,7 +146,7 @@ grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx) {
return exec_ctx->now;
}
-void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx) {
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx* exec_ctx) {
exec_ctx->now_is_valid = false;
}
@@ -179,4 +179,4 @@ grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) {
static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
exec_ctx_run, exec_ctx_sched, "exec_ctx"};
static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
-grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
+grpc_closure_scheduler* grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 44b9be7aa9..bd27506152 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -68,13 +68,13 @@ typedef struct grpc_combiner grpc_combiner;
struct grpc_exec_ctx {
grpc_closure_list closure_list;
/** currently active combiner: updated only via combiner.c */
- grpc_combiner *active_combiner;
+ grpc_combiner* active_combiner;
/** last active combiner in the active combiner list */
- grpc_combiner *last_combiner;
+ grpc_combiner* last_combiner;
uintptr_t flags;
unsigned starting_cpu;
- void *check_ready_to_finish_arg;
- bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
+ void* check_ready_to_finish_arg;
+ bool (*check_ready_to_finish)(grpc_exec_ctx* exec_ctx, void* arg);
bool now_is_valid;
grpc_millis now;
@@ -93,33 +93,33 @@ struct grpc_exec_ctx {
#define GRPC_EXEC_CTX_INIT \
GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL)
-extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx;
+extern grpc_closure_scheduler* grpc_schedule_on_exec_ctx;
-bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx);
+bool grpc_exec_ctx_has_work(grpc_exec_ctx* exec_ctx);
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held.
* Returns true if work was performed, false otherwise. */
-bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
+bool grpc_exec_ctx_flush(grpc_exec_ctx* exec_ctx);
/** Finish any pending work for a grpc_exec_ctx. Must be called before
* the instance is destroyed, or work may be lost. */
-void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
+void grpc_exec_ctx_finish(grpc_exec_ctx* exec_ctx);
/** Returns true if we'd like to leave this execution context as soon as
possible: useful for deciding whether to do something more or not depending
on outside context */
-bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx);
+bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx* exec_ctx);
/** A finish check that is never ready to finish */
-bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
+bool grpc_never_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored);
/** A finish check that is always ready to finish */
-bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
+bool grpc_always_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_shutdown(void);
-grpc_millis grpc_exec_ctx_now(grpc_exec_ctx *exec_ctx);
-void grpc_exec_ctx_invalidate_now(grpc_exec_ctx *exec_ctx);
+grpc_millis grpc_exec_ctx_now(grpc_exec_ctx* exec_ctx);
+void grpc_exec_ctx_invalidate_now(grpc_exec_ctx* exec_ctx);
gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index 92c3e70301..2786492494 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -44,7 +44,7 @@ typedef struct {
gpr_thd_id id;
} thread_state;
-static thread_state *g_thread_state;
+static thread_state* g_thread_state;
static size_t g_max_threads;
static gpr_atm g_cur_threads;
static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
@@ -54,15 +54,15 @@ GPR_TLS_DECL(g_this_thread_state);
static grpc_tracer_flag executor_trace =
GRPC_TRACER_INITIALIZER(false, "executor");
-static void executor_thread(void *arg);
+static void executor_thread(void* arg);
-static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
+static size_t run_closures(grpc_exec_ctx* exec_ctx, grpc_closure_list list) {
size_t n = 0;
- grpc_closure *c = list.head;
+ grpc_closure* c = list.head;
while (c != NULL) {
- grpc_closure *next = c->next_data.next;
- grpc_error *error = c->error_data.error;
+ grpc_closure* next = c->next_data.next;
+ grpc_error* error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
@@ -88,7 +88,7 @@ bool grpc_executor_is_threaded() {
return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
}
-void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
+void grpc_executor_set_threading(grpc_exec_ctx* exec_ctx, bool threading) {
gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
if (threading) {
if (cur_threads > 0) return;
@@ -96,7 +96,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
gpr_atm_no_barrier_store(&g_cur_threads, 1);
gpr_tls_init(&g_this_thread_state);
g_thread_state =
- (thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads);
+ (thread_state*)gpr_zalloc(sizeof(thread_state) * g_max_threads);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
@@ -133,18 +133,18 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
}
-void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
+void grpc_executor_init(grpc_exec_ctx* exec_ctx) {
grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
-void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
+void grpc_executor_shutdown(grpc_exec_ctx* exec_ctx) {
grpc_executor_set_threading(exec_ctx, false);
}
-static void executor_thread(void *arg) {
- thread_state *ts = (thread_state *)arg;
+static void executor_thread(void* arg) {
+ thread_state* ts = (thread_state*)arg;
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
grpc_exec_ctx exec_ctx =
@@ -184,8 +184,8 @@ static void executor_thread(void *arg) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error, bool is_short) {
+static void executor_push(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error, bool is_short) {
bool retry_push;
if (is_short) {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
@@ -207,13 +207,13 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
- thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
+ thread_state* ts = (thread_state*)gpr_tls_get(&g_this_thread_state);
if (ts == NULL) {
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
}
- thread_state *orig_ts = ts;
+ thread_state* orig_ts = ts;
bool try_new_thread;
for (;;) {
@@ -276,13 +276,13 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
} while (retry_push);
}
-static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
+static void executor_push_short(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
executor_push(exec_ctx, closure, error, true);
}
-static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_error *error) {
+static void executor_push_long(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_error* error) {
executor_push(exec_ctx, closure, error, false);
}
@@ -295,7 +295,7 @@ static const grpc_closure_scheduler_vtable executor_vtable_long = {
executor_push_long, executor_push_long, "executor"};
static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
-grpc_closure_scheduler *grpc_executor_scheduler(
+grpc_closure_scheduler* grpc_executor_scheduler(
grpc_executor_job_length length) {
return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
: &executor_scheduler_long;
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index ef5ac56c83..8418ace06e 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -35,19 +35,19 @@ typedef enum {
* This mechanism is meant to outsource work (grpc_closure instances) to a
* thread, for those cases where blocking isn't an option but there isn't a
* non-blocking solution available. */
-void grpc_executor_init(grpc_exec_ctx *exec_ctx);
+void grpc_executor_init(grpc_exec_ctx* exec_ctx);
-grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
+grpc_closure_scheduler* grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
-void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);
+void grpc_executor_shutdown(grpc_exec_ctx* exec_ctx);
/** Is the executor multi-threaded? */
bool grpc_executor_is_threaded();
/* enable/disable threading - must be called after grpc_executor_init and before
grpc_executor_shutdown */
-void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable);
+void grpc_executor_set_threading(grpc_exec_ctx* exec_ctx, bool enable);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/gethostname.h b/src/core/lib/iomgr/gethostname.h
index f335fea586..2e65b5ffbf 100644
--- a/src/core/lib/iomgr/gethostname.h
+++ b/src/core/lib/iomgr/gethostname.h
@@ -25,7 +25,7 @@ extern "C" {
// Returns the hostname of the local machine.
// Caller takes ownership of result.
-char *grpc_gethostname();
+char* grpc_gethostname();
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/gethostname_fallback.cc b/src/core/lib/iomgr/gethostname_fallback.cc
index e6f4c2f760..81e2c7aeec 100644
--- a/src/core/lib/iomgr/gethostname_fallback.cc
+++ b/src/core/lib/iomgr/gethostname_fallback.cc
@@ -23,6 +23,6 @@
#include <stddef.h>
-char *grpc_gethostname() { return NULL; }
+char* grpc_gethostname() { return NULL; }
#endif // GRPC_GETHOSTNAME_FALLBACK
diff --git a/src/core/lib/iomgr/gethostname_host_name_max.cc b/src/core/lib/iomgr/gethostname_host_name_max.cc
index cdaf097c3e..987ff1eac1 100644
--- a/src/core/lib/iomgr/gethostname_host_name_max.cc
+++ b/src/core/lib/iomgr/gethostname_host_name_max.cc
@@ -26,8 +26,8 @@
#include <grpc/support/alloc.h>
-char *grpc_gethostname() {
- char *hostname = (char *)gpr_malloc(HOST_NAME_MAX);
+char* grpc_gethostname() {
+ char* hostname = (char*)gpr_malloc(HOST_NAME_MAX);
if (gethostname(hostname, HOST_NAME_MAX) != 0) {
gpr_free(hostname);
return NULL;
diff --git a/src/core/lib/iomgr/gethostname_sysconf.cc b/src/core/lib/iomgr/gethostname_sysconf.cc
index 8441e0615e..e099fbd388 100644
--- a/src/core/lib/iomgr/gethostname_sysconf.cc
+++ b/src/core/lib/iomgr/gethostname_sysconf.cc
@@ -25,9 +25,9 @@
#include <grpc/support/alloc.h>
-char *grpc_gethostname() {
+char* grpc_gethostname() {
size_t host_name_max = (size_t)sysconf(_SC_HOST_NAME_MAX);
- char *hostname = (char *)gpr_malloc(host_name_max);
+ char* hostname = (char*)gpr_malloc(host_name_max);
if (gethostname(hostname, host_name_max) != 0) {
gpr_free(hostname);
return NULL;
diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc
index 78185cc084..6bbe5669c7 100644
--- a/src/core/lib/iomgr/iocp_windows.cc
+++ b/src/core/lib/iomgr/iocp_windows.cc
@@ -42,7 +42,7 @@ static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
-static DWORD deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
+static DWORD deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
grpc_millis deadline) {
if (deadline == GRPC_MILLIS_INF_FUTURE) {
return INFINITE;
@@ -54,15 +54,15 @@ static DWORD deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
return static_cast<DWORD>(deadline - now);
}
-grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
+grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx* exec_ctx,
grpc_millis deadline) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
ULONG_PTR completion_key;
LPOVERLAPPED overlapped;
- grpc_winsocket *socket;
- grpc_winsocket_callback_info *info;
+ grpc_winsocket* socket;
+ grpc_winsocket_callback_info* info;
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
success =
GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped,
@@ -82,7 +82,7 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
abort();
}
- socket = (grpc_winsocket *)completion_key;
+ socket = (grpc_winsocket*)completion_key;
if (overlapped == &socket->write_info.overlapped) {
info = &socket->write_info;
} else if (overlapped == &socket->read_info.overlapped) {
@@ -134,13 +134,13 @@ void grpc_iocp_shutdown(void) {
GPR_ASSERT(CloseHandle(g_iocp));
}
-void grpc_iocp_add_socket(grpc_winsocket *socket) {
+void grpc_iocp_add_socket(grpc_winsocket* socket) {
HANDLE ret;
if (socket->added_to_iocp) return;
ret = CreateIoCompletionPort((HANDLE)socket->socket, g_iocp,
(uintptr_t)socket, 0);
if (!ret) {
- char *utf8_message = gpr_format_message(WSAGetLastError());
+ char* utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message);
gpr_free(utf8_message);
__debugbreak();
diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h
index 4efbc94645..ff9b31efe2 100644
--- a/src/core/lib/iomgr/iocp_windows.h
+++ b/src/core/lib/iomgr/iocp_windows.h
@@ -33,13 +33,13 @@ typedef enum {
GRPC_IOCP_WORK_KICK
} grpc_iocp_work_status;
-grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx,
+grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx* exec_ctx,
grpc_millis deadline);
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
void grpc_iocp_flush(void);
void grpc_iocp_shutdown(void);
-void grpc_iocp_add_socket(grpc_winsocket *);
+void grpc_iocp_add_socket(grpc_winsocket*);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index d6a5b4a76c..86ea08e901 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -45,7 +45,7 @@ static gpr_cv g_rcv;
static int g_shutdown;
static grpc_iomgr_object g_root_object;
-void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
+void grpc_iomgr_init(grpc_exec_ctx* exec_ctx) {
g_shutdown = 0;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
@@ -53,15 +53,15 @@ void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
grpc_executor_init(exec_ctx);
grpc_timer_list_init(exec_ctx);
g_root_object.next = g_root_object.prev = &g_root_object;
- g_root_object.name = (char *)"root";
+ g_root_object.name = (char*)"root";
grpc_network_status_init();
grpc_iomgr_platform_init();
}
-void grpc_iomgr_start(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_init(); }
+void grpc_iomgr_start(grpc_exec_ctx* exec_ctx) { grpc_timer_manager_init(); }
static size_t count_objects(void) {
- grpc_iomgr_object *obj;
+ grpc_iomgr_object* obj;
size_t n = 0;
for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) {
n++;
@@ -69,14 +69,14 @@ static size_t count_objects(void) {
return n;
}
-static void dump_objects(const char *kind) {
- grpc_iomgr_object *obj;
+static void dump_objects(const char* kind) {
+ grpc_iomgr_object* obj;
for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) {
gpr_log(GPR_DEBUG, "%s OBJECT: %s %p", kind, obj->name, obj);
}
}
-void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
+void grpc_iomgr_shutdown(grpc_exec_ctx* exec_ctx) {
gpr_timespec shutdown_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
@@ -109,9 +109,10 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
}
if (g_root_object.next != &g_root_object) {
if (grpc_iomgr_abort_on_leaks()) {
- gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
- " iomgr objects before shutdown deadline: "
- "memory leaks are likely",
+ gpr_log(GPR_DEBUG,
+ "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
count_objects());
dump_objects("LEAKED");
abort();
@@ -121,9 +122,10 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
if (g_root_object.next != &g_root_object) {
- gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
- " iomgr objects before shutdown deadline: "
- "memory leaks are likely",
+ gpr_log(GPR_DEBUG,
+ "Failed to free %" PRIuPTR
+ " iomgr objects before shutdown deadline: "
+ "memory leaks are likely",
count_objects());
dump_objects("LEAKED");
}
@@ -148,7 +150,7 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
gpr_cv_destroy(&g_rcv);
}
-void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) {
+void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name) {
obj->name = gpr_strdup(name);
gpr_mu_lock(&g_mu);
obj->next = &g_root_object;
@@ -157,7 +159,7 @@ void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) {
gpr_mu_unlock(&g_mu);
}
-void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
+void grpc_iomgr_unregister_object(grpc_iomgr_object* obj) {
gpr_mu_lock(&g_mu);
obj->next->prev = obj->prev;
obj->prev->next = obj->next;
@@ -167,7 +169,7 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
}
bool grpc_iomgr_abort_on_leaks(void) {
- char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS");
+ char* env = gpr_getenv("GRPC_ABORT_ON_LEAKS");
bool should_we = gpr_is_true(env);
gpr_free(env);
return should_we;
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index 6c0a08b918..d1549c8c63 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -27,14 +27,14 @@ extern "C" {
#endif
/** Initializes the iomgr. */
-void grpc_iomgr_init(grpc_exec_ctx *exec_ctx);
+void grpc_iomgr_init(grpc_exec_ctx* exec_ctx);
/** Starts any background threads for iomgr. */
-void grpc_iomgr_start(grpc_exec_ctx *exec_ctx);
+void grpc_iomgr_start(grpc_exec_ctx* exec_ctx);
/** Signals the intention to shutdown the iomgr. Expects to be able to flush
* exec_ctx. */
-void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx);
+void grpc_iomgr_shutdown(grpc_exec_ctx* exec_ctx);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/iomgr_internal.h b/src/core/lib/iomgr/iomgr_internal.h
index 52db37c89a..b818c68da0 100644
--- a/src/core/lib/iomgr/iomgr_internal.h
+++ b/src/core/lib/iomgr/iomgr_internal.h
@@ -28,13 +28,13 @@ extern "C" {
#endif
typedef struct grpc_iomgr_object {
- char *name;
- struct grpc_iomgr_object *next;
- struct grpc_iomgr_object *prev;
+ char* name;
+ struct grpc_iomgr_object* next;
+ struct grpc_iomgr_object* prev;
} grpc_iomgr_object;
-void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name);
-void grpc_iomgr_unregister_object(grpc_iomgr_object *obj);
+void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name);
+void grpc_iomgr_unregister_object(grpc_iomgr_object* obj);
void grpc_iomgr_platform_init(void);
/** flush any globally queued work from iomgr */
diff --git a/src/core/lib/iomgr/load_file.cc b/src/core/lib/iomgr/load_file.cc
index 5cb4099ea4..97e448fb32 100644
--- a/src/core/lib/iomgr/load_file.cc
+++ b/src/core/lib/iomgr/load_file.cc
@@ -28,14 +28,14 @@
#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/support/string.h"
-grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
- grpc_slice *output) {
- unsigned char *contents = NULL;
+grpc_error* grpc_load_file(const char* filename, int add_null_terminator,
+ grpc_slice* output) {
+ unsigned char* contents = NULL;
size_t contents_size = 0;
grpc_slice result = grpc_empty_slice();
- FILE *file;
+ FILE* file;
size_t bytes_read = 0;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
GRPC_SCHEDULING_START_BLOCKING_REGION;
file = fopen(filename, "rb");
@@ -47,8 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
- contents = (unsigned char *)gpr_malloc(contents_size +
- (add_null_terminator ? 1 : 0));
+ contents =
+ (unsigned char*)gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");
@@ -64,7 +64,7 @@ end:
*output = result;
if (file != NULL) fclose(file);
if (error != GRPC_ERROR_NONE) {
- grpc_error *error_out =
+ grpc_error* error_out =
grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to load file", &error, 1),
GRPC_ERROR_STR_FILENAME,
diff --git a/src/core/lib/iomgr/load_file.h b/src/core/lib/iomgr/load_file.h
index db1ffb3d63..5b367c189d 100644
--- a/src/core/lib/iomgr/load_file.h
+++ b/src/core/lib/iomgr/load_file.h
@@ -31,8 +31,8 @@ extern "C" {
/* Loads the content of a file into a slice. add_null_terminator will add
a NULL terminator if non-zero. */
-grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
- grpc_slice *slice);
+grpc_error* grpc_load_file(const char* filename, int add_null_terminator,
+ grpc_slice* slice);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index f967b22ba9..443a8375b2 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -60,31 +60,31 @@ extern grpc_tracer_flag grpc_polling_trace;
#define FD_SHUTDOWN_BIT ((gpr_atm)1)
-void grpc_lfev_init(gpr_atm *state) {
+void grpc_lfev_init(gpr_atm* state) {
gpr_atm_no_barrier_store(state, CLOSURE_NOT_READY);
}
-void grpc_lfev_destroy(gpr_atm *state) {
+void grpc_lfev_destroy(gpr_atm* state) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (curr & FD_SHUTDOWN_BIT) {
- GRPC_ERROR_UNREF((grpc_error *)(curr & ~FD_SHUTDOWN_BIT));
+ GRPC_ERROR_UNREF((grpc_error*)(curr & ~FD_SHUTDOWN_BIT));
} else {
GPR_ASSERT(curr == CLOSURE_NOT_READY || curr == CLOSURE_READY);
}
}
-bool grpc_lfev_is_shutdown(gpr_atm *state) {
+bool grpc_lfev_is_shutdown(gpr_atm* state) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
return (curr & FD_SHUTDOWN_BIT) != 0;
}
-void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
- grpc_closure *closure, const char *variable) {
+void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
+ grpc_closure* closure, const char* variable) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
- state, (void *)curr, closure);
+ state, (void*)curr, closure);
}
switch (curr) {
case CLOSURE_NOT_READY: {
@@ -124,7 +124,7 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
- grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
+ grpc_error* shutdown_err = (grpc_error*)(curr & ~FD_SHUTDOWN_BIT);
GRPC_CLOSURE_SCHED(exec_ctx, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@@ -142,15 +142,15 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
GPR_UNREACHABLE_CODE(return );
}
-bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
- grpc_error *shutdown_err) {
+bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
+ grpc_error* shutdown_err) {
gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT;
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
- (void *)curr, grpc_error_string(shutdown_err));
+ (void*)curr, grpc_error_string(shutdown_err));
}
switch (curr) {
case CLOSURE_READY:
@@ -177,7 +177,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
if (gpr_atm_full_cas(state, curr, new_state)) {
- GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr,
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
return true;
@@ -193,14 +193,14 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
GPR_UNREACHABLE_CODE(return false);
}
-void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
- const char *variable) {
+void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
+ const char* variable) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
- (void *)curr);
+ (void*)curr);
}
switch (curr) {
@@ -228,7 +228,7 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) {
- GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr, GRPC_ERROR_NONE);
return;
}
/* else the state changed again (only possible by either a racing
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index 02229e569e..75526d6b9f 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -29,17 +29,17 @@
extern "C" {
#endif
-void grpc_lfev_init(gpr_atm *state);
-void grpc_lfev_destroy(gpr_atm *state);
-bool grpc_lfev_is_shutdown(gpr_atm *state);
+void grpc_lfev_init(gpr_atm* state);
+void grpc_lfev_destroy(gpr_atm* state);
+bool grpc_lfev_is_shutdown(gpr_atm* state);
-void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
- grpc_closure *closure, const char *variable);
+void grpc_lfev_notify_on(grpc_exec_ctx* exec_ctx, gpr_atm* state,
+ grpc_closure* closure, const char* variable);
/* Returns true on first successful shutdown */
-bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
- grpc_error *shutdown_err);
-void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
- const char *variable);
+bool grpc_lfev_set_shutdown(grpc_exec_ctx* exec_ctx, gpr_atm* state,
+ grpc_error* shutdown_err);
+void grpc_lfev_set_ready(grpc_exec_ctx* exec_ctx, gpr_atm* state,
+ const char* variable);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/network_status_tracker.cc b/src/core/lib/iomgr/network_status_tracker.cc
index 57a7faa9f1..73f8fbf9fb 100644
--- a/src/core/lib/iomgr/network_status_tracker.cc
+++ b/src/core/lib/iomgr/network_status_tracker.cc
@@ -27,8 +27,8 @@ void grpc_network_status_init(void) {
void grpc_destroy_network_status_monitor() {}
-void grpc_network_status_register_endpoint(grpc_endpoint *ep) { (void)ep; }
+void grpc_network_status_register_endpoint(grpc_endpoint* ep) { (void)ep; }
-void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { (void)ep; }
+void grpc_network_status_unregister_endpoint(grpc_endpoint* ep) { (void)ep; }
void grpc_network_status_shutdown_all_endpoints() {}
diff --git a/src/core/lib/iomgr/network_status_tracker.h b/src/core/lib/iomgr/network_status_tracker.h
index cba38d4530..3033e0a833 100644
--- a/src/core/lib/iomgr/network_status_tracker.h
+++ b/src/core/lib/iomgr/network_status_tracker.h
@@ -27,8 +27,8 @@ extern "C" {
void grpc_network_status_init(void);
void grpc_network_status_shutdown(void);
-void grpc_network_status_register_endpoint(grpc_endpoint *ep);
-void grpc_network_status_unregister_endpoint(grpc_endpoint *ep);
+void grpc_network_status_register_endpoint(grpc_endpoint* ep);
+void grpc_network_status_unregister_endpoint(grpc_endpoint* ep);
void grpc_network_status_shutdown_all_endpoints();
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/polling_entity.cc b/src/core/lib/iomgr/polling_entity.cc
index 8591a5518e..f0ef2cfe3d 100644
--- a/src/core/lib/iomgr/polling_entity.cc
+++ b/src/core/lib/iomgr/polling_entity.cc
@@ -22,7 +22,7 @@
#include "src/core/lib/iomgr/polling_entity.h"
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
- grpc_pollset_set *pollset_set) {
+ grpc_pollset_set* pollset_set) {
grpc_polling_entity pollent;
pollent.pollent.pollset_set = pollset_set;
pollent.tag = GRPC_POLLS_POLLSET_SET;
@@ -30,35 +30,35 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
}
grpc_polling_entity grpc_polling_entity_create_from_pollset(
- grpc_pollset *pollset) {
+ grpc_pollset* pollset) {
grpc_polling_entity pollent;
pollent.pollent.pollset = pollset;
pollent.tag = GRPC_POLLS_POLLSET;
return pollent;
}
-grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) {
+grpc_pollset* grpc_polling_entity_pollset(grpc_polling_entity* pollent) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
return pollent->pollent.pollset;
}
return NULL;
}
-grpc_pollset_set *grpc_polling_entity_pollset_set(
- grpc_polling_entity *pollent) {
+grpc_pollset_set* grpc_polling_entity_pollset_set(
+ grpc_polling_entity* pollent) {
if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
return pollent->pollent.pollset_set;
}
return NULL;
}
-bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) {
+bool grpc_polling_entity_is_empty(const grpc_polling_entity* pollent) {
return pollent->tag == GRPC_POLLS_NONE;
}
-void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_polling_entity *pollent,
- grpc_pollset_set *pss_dst) {
+void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
+ grpc_pollset_set* pss_dst) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
@@ -72,9 +72,9 @@ void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_polling_entity *pollent,
- grpc_pollset_set *pss_dst) {
+void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
+ grpc_pollset_set* pss_dst) {
if (pollent->tag == GRPC_POLLS_POLLSET) {
GPR_ASSERT(pollent->pollent.pollset != NULL);
grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index 009f968fac..867e085153 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -38,36 +38,36 @@ typedef enum grpc_pollset_tag {
typedef struct grpc_polling_entity {
union {
- grpc_pollset *pollset;
- grpc_pollset_set *pollset_set;
+ grpc_pollset* pollset;
+ grpc_pollset_set* pollset_set;
} pollent;
grpc_pollset_tag tag;
} grpc_polling_entity;
grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
- grpc_pollset_set *pollset_set);
+ grpc_pollset_set* pollset_set);
grpc_polling_entity grpc_polling_entity_create_from_pollset(
- grpc_pollset *pollset);
+ grpc_pollset* pollset);
/** If \a pollent contains a pollset, return it. Otherwise, return NULL */
-grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent);
+grpc_pollset* grpc_polling_entity_pollset(grpc_polling_entity* pollent);
/** If \a pollent contains a pollset_set, return it. Otherwise, return NULL */
-grpc_pollset_set *grpc_polling_entity_pollset_set(grpc_polling_entity *pollent);
+grpc_pollset_set* grpc_polling_entity_pollset_set(grpc_polling_entity* pollent);
-bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent);
+bool grpc_polling_entity_is_empty(const grpc_polling_entity* pollent);
/** Add the pollset or pollset_set in \a pollent to the destination pollset_set
* \a * pss_dst */
-void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_polling_entity *pollent,
- grpc_pollset_set *pss_dst);
+void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
+ grpc_pollset_set* pss_dst);
/** Delete the pollset or pollset_set in \a pollent from the destination
* pollset_set \a * pss_dst */
-void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_polling_entity *pollent,
- grpc_pollset_set *pss_dst);
+void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_polling_entity* pollent,
+ grpc_pollset_set* pss_dst);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 799fae154c..c99b930e8e 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -45,12 +45,12 @@ typedef struct grpc_pollset_worker grpc_pollset_worker;
size_t grpc_pollset_size(void);
/* Initialize a pollset: assumes *pollset contains all zeros */
-void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
+void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu);
/* Begin shutting down the pollset, and call closure when done.
* pollset's mutex must be held */
-void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure);
-void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure);
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
/* Do some work on a pollset.
May involve invoking asynchronous callbacks, or actually polling file
@@ -74,14 +74,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
May call grpc_closure_list_run on grpc_closure_list, without holding the
pollset
lock */
-grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
grpc_millis deadline) GRPC_MUST_USE_RESULT;
/* Break one polling thread out of polling work for this pollset.
If specific_worker is non-NULL, then kick that worker. */
-grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker)
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker)
GRPC_MUST_USE_RESULT;
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h
index 5455eda02f..0167a50a56 100644
--- a/src/core/lib/iomgr/pollset_set.h
+++ b/src/core/lib/iomgr/pollset_set.h
@@ -32,21 +32,21 @@ extern "C" {
typedef struct grpc_pollset_set grpc_pollset_set;
-grpc_pollset_set *grpc_pollset_set_create(void);
-void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set);
-void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset);
-void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *pollset_set,
- grpc_pollset *pollset);
-void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item);
-void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_pollset_set *bag,
- grpc_pollset_set *item);
+grpc_pollset_set* grpc_pollset_set_create(void);
+void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set);
+void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset);
+void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* pollset_set,
+ grpc_pollset* pollset);
+void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item);
+void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_pollset_set* bag,
+ grpc_pollset_set* item);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index b9901bf8ef..6b9c53c01c 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -55,17 +55,17 @@ gpr_mu grpc_polling_mu;
immediately in the next loop iteration.
Note: In the future, if there is a bug that involves missing wakeups in the
future, try adding a uv_async_t to kick the loop differently */
-uv_timer_t *dummy_uv_handle;
+uv_timer_t* dummy_uv_handle;
size_t grpc_pollset_size() { return sizeof(grpc_pollset); }
-void dummy_timer_cb(uv_timer_t *handle) {}
+void dummy_timer_cb(uv_timer_t* handle) {}
-void dummy_handle_close_cb(uv_handle_t *handle) { gpr_free(handle); }
+void dummy_handle_close_cb(uv_handle_t* handle) { gpr_free(handle); }
void grpc_pollset_global_init(void) {
gpr_mu_init(&grpc_polling_mu);
- dummy_uv_handle = (uv_timer_t *)gpr_malloc(sizeof(uv_timer_t));
+ dummy_uv_handle = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), dummy_uv_handle);
grpc_pollset_work_run_loop = 1;
}
@@ -73,22 +73,22 @@ void grpc_pollset_global_init(void) {
void grpc_pollset_global_shutdown(void) {
GRPC_UV_ASSERT_SAME_THREAD();
gpr_mu_destroy(&grpc_polling_mu);
- uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb);
+ uv_close((uv_handle_t*)dummy_uv_handle, dummy_handle_close_cb);
}
-static void timer_run_cb(uv_timer_t *timer) {}
+static void timer_run_cb(uv_timer_t* timer) {}
-static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
+static void timer_close_cb(uv_handle_t* handle) { handle->data = (void*)1; }
-void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
GRPC_UV_ASSERT_SAME_THREAD();
*mu = &grpc_polling_mu;
uv_timer_init(uv_default_loop(), &pollset->timer);
pollset->shutting_down = 0;
}
-void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
GPR_ASSERT(!pollset->shutting_down);
GRPC_UV_ASSERT_SAME_THREAD();
pollset->shutting_down = 1;
@@ -102,11 +102,11 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
}
-void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
GRPC_UV_ASSERT_SAME_THREAD();
- uv_close((uv_handle_t *)&pollset->timer, timer_close_cb);
+ uv_close((uv_handle_t*)&pollset->timer, timer_close_cb);
// timer.data is a boolean indicating that the timer has finished closing
- pollset->timer.data = (void *)0;
+ pollset->timer.data = (void*)0;
if (grpc_pollset_work_run_loop) {
while (!pollset->timer.data) {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
@@ -114,8 +114,8 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
}
}
-grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
uint64_t timeout;
GRPC_UV_ASSERT_SAME_THREAD();
@@ -146,8 +146,8 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker) {
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
return GRPC_ERROR_NONE;
diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc
index 01aff02c36..5998b3f5bc 100644
--- a/src/core/lib/iomgr/pollset_windows.cc
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -28,7 +28,7 @@
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_windows.h"
-#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
+#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
#ifndef NDEBUG
grpc_tracer_flag grpc_trace_fd_refcount =
@@ -36,7 +36,7 @@ grpc_tracer_flag grpc_trace_fd_refcount =
#endif
gpr_mu grpc_polling_mu;
-static grpc_pollset_worker *g_active_poller;
+static grpc_pollset_worker* g_active_poller;
static grpc_pollset_worker g_global_root_worker;
void grpc_pollset_global_init(void) {
@@ -49,22 +49,22 @@ void grpc_pollset_global_init(void) {
void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); }
-static void remove_worker(grpc_pollset_worker *worker,
+static void remove_worker(grpc_pollset_worker* worker,
grpc_pollset_worker_link_type type) {
worker->links[type].prev->links[type].next = worker->links[type].next;
worker->links[type].next->links[type].prev = worker->links[type].prev;
worker->links[type].next = worker->links[type].prev = worker;
}
-static int has_workers(grpc_pollset_worker *root,
+static int has_workers(grpc_pollset_worker* root,
grpc_pollset_worker_link_type type) {
return root->links[type].next != root;
}
-static grpc_pollset_worker *pop_front_worker(
- grpc_pollset_worker *root, grpc_pollset_worker_link_type type) {
+static grpc_pollset_worker* pop_front_worker(
+ grpc_pollset_worker* root, grpc_pollset_worker_link_type type) {
if (has_workers(root, type)) {
- grpc_pollset_worker *w = root->links[type].next;
+ grpc_pollset_worker* w = root->links[type].next;
remove_worker(w, type);
return w;
} else {
@@ -72,9 +72,9 @@ static grpc_pollset_worker *pop_front_worker(
}
}
-static void push_front_worker(grpc_pollset_worker *root,
+static void push_front_worker(grpc_pollset_worker* root,
grpc_pollset_worker_link_type type,
- grpc_pollset_worker *worker) {
+ grpc_pollset_worker* worker) {
worker->links[type].prev = root;
worker->links[type].next = worker->links[type].prev->links[type].next;
worker->links[type].prev->links[type].next =
@@ -88,15 +88,15 @@ size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
set of features for the sake of the rest of grpc. But grpc_pollset_work
won't actually do any polling, and return as quickly as possible. */
-void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
+void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
*mu = &grpc_polling_mu;
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
&pollset->root_worker;
}
-void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure) {
+void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure) {
pollset->shutting_down = 1;
grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
if (!pollset->is_iocp_worker) {
@@ -106,10 +106,10 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
}
-void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {}
+void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {}
-grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker_hdl,
+grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker_hdl,
grpc_millis deadline) {
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
@@ -124,7 +124,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_cv_init(&worker.cv);
if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
if (g_active_poller == NULL) {
- grpc_pollset_worker *next_worker;
+ grpc_pollset_worker* next_worker;
/* become poller */
pollset->is_iocp_worker = 1;
g_active_poller = &worker;
@@ -184,8 +184,8 @@ done:
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p,
- grpc_pollset_worker *specific_worker) {
+grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+ grpc_pollset_worker* specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker =
diff --git a/src/core/lib/iomgr/pollset_windows.h b/src/core/lib/iomgr/pollset_windows.h
index 2479b25286..aaeb5f585f 100644
--- a/src/core/lib/iomgr/pollset_windows.h
+++ b/src/core/lib/iomgr/pollset_windows.h
@@ -39,8 +39,8 @@ typedef enum {
} grpc_pollset_worker_link_type;
typedef struct grpc_pollset_worker_link {
- struct grpc_pollset_worker *next;
- struct grpc_pollset_worker *prev;
+ struct grpc_pollset_worker* next;
+ struct grpc_pollset_worker* prev;
} grpc_pollset_worker_link;
struct grpc_pollset;
@@ -49,7 +49,7 @@ typedef struct grpc_pollset grpc_pollset;
typedef struct grpc_pollset_worker {
gpr_cv cv;
int kicked;
- struct grpc_pollset *pollset;
+ struct grpc_pollset* pollset;
grpc_pollset_worker_link links[GRPC_POLLSET_WORKER_LINK_TYPES];
} grpc_pollset_worker;
@@ -58,7 +58,7 @@ struct grpc_pollset {
int kicked_without_pollers;
int is_iocp_worker;
grpc_pollset_worker root_worker;
- grpc_closure *on_shutdown;
+ grpc_closure* on_shutdown;
};
void grpc_pollset_global_init(void);
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 5f0634299e..847e10f177 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -36,25 +36,25 @@ typedef struct {
typedef struct {
size_t naddrs;
- grpc_resolved_address *addrs;
+ grpc_resolved_address* addrs;
} grpc_resolved_addresses;
/* Asynchronously resolve addr. Use default_port if a port isn't designated
in addr, otherwise use the port in addr. */
/* TODO(ctiller): add a timeout here */
-extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addresses);
+extern void (*grpc_resolve_address)(grpc_exec_ctx* exec_ctx, const char* addr,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses);
/* Destroy resolved addresses */
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addresses);
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addresses);
/* Resolve addr in a blocking fashion. Returns NULL on failure. On success,
result must be freed with grpc_resolved_addresses_destroy. */
-extern grpc_error *(*grpc_blocking_resolve_address)(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses);
+extern grpc_error* (*grpc_blocking_resolve_address)(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index 1b783495df..e27c730204 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -39,16 +39,16 @@
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
-static grpc_error *blocking_resolve_address_impl(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses) {
+static grpc_error* blocking_resolve_address_impl(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) {
struct addrinfo hints;
struct addrinfo *result = NULL, *resp;
- char *host;
- char *port;
+ char* host;
+ char* port;
int s;
size_t i;
- grpc_error *err;
+ grpc_error* err;
if (name[0] == 'u' && name[1] == 'n' && name[2] == 'i' && name[3] == 'x' &&
name[4] == ':' && name[5] != 0) {
@@ -85,7 +85,7 @@ static grpc_error *blocking_resolve_address_impl(
if (s != 0) {
/* Retry if well-known service name is recognized */
- const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ const char* svc[][2] = {{"http", "80"}, {"https", "443"}};
for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(port, svc[i][0]) == 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
@@ -113,12 +113,12 @@ static grpc_error *blocking_resolve_address_impl(
/* Success path: set addrs non-NULL, fill it in */
*addresses =
- (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
+ (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ (*addresses)->addrs = (grpc_resolved_address*)gpr_malloc(
sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
@@ -137,24 +137,24 @@ done:
return err;
}
-grpc_error *(*grpc_blocking_resolve_address)(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses) = blocking_resolve_address_impl;
+grpc_error* (*grpc_blocking_resolve_address)(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) = blocking_resolve_address_impl;
typedef struct {
- char *name;
- char *default_port;
- grpc_closure *on_done;
- grpc_resolved_addresses **addrs_out;
+ char* name;
+ char* default_port;
+ grpc_closure* on_done;
+ grpc_resolved_addresses** addrs_out;
grpc_closure request_closure;
- void *arg;
+ void* arg;
} request;
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
-static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
- grpc_error *error) {
- request *r = (request *)rp;
+static void do_request_thread(grpc_exec_ctx* exec_ctx, void* rp,
+ grpc_error* error) {
+ request* r = (request*)rp;
GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
@@ -163,19 +163,19 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
gpr_free(r);
}
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
if (addrs != NULL) {
gpr_free(addrs->addrs);
}
gpr_free(addrs);
}
-static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
- request *r = (request *)gpr_malloc(sizeof(request));
+static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {
+ request* r = (request*)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
@@ -186,8 +186,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
}
void (*grpc_resolve_address)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = resolve_address_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) = resolve_address_impl;
#endif
diff --git a/src/core/lib/iomgr/resolve_address_uv.cc b/src/core/lib/iomgr/resolve_address_uv.cc
index 4f7f234877..6d09fd1d02 100644
--- a/src/core/lib/iomgr/resolve_address_uv.cc
+++ b/src/core/lib/iomgr/resolve_address_uv.cc
@@ -38,23 +38,23 @@
#include <string.h>
typedef struct request {
- grpc_closure *on_done;
- grpc_resolved_addresses **addresses;
- struct addrinfo *hints;
- char *host;
- char *port;
+ grpc_closure* on_done;
+ grpc_resolved_addresses** addresses;
+ struct addrinfo* hints;
+ char* host;
+ char* port;
} request;
-static int retry_named_port_failure(int status, request *r,
+static int retry_named_port_failure(int status, request* r,
uv_getaddrinfo_cb getaddrinfo_cb) {
if (status != 0) {
// This loop is copied from resolve_address_posix.c
- const char *svc[][2] = {{"http", "80"}, {"https", "443"}};
+ const char* svc[][2] = {{"http", "80"}, {"https", "443"}};
for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) {
if (strcmp(r->port, svc[i][0]) == 0) {
int retry_status;
- uv_getaddrinfo_t *req =
- (uv_getaddrinfo_t *)gpr_malloc(sizeof(uv_getaddrinfo_t));
+ uv_getaddrinfo_t* req =
+ (uv_getaddrinfo_t*)gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r;
r->port = gpr_strdup(svc[i][1]);
retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb,
@@ -73,12 +73,12 @@ static int retry_named_port_failure(int status, request *r,
return 1;
}
-static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
- grpc_resolved_addresses **addresses) {
- struct addrinfo *resp;
+static grpc_error* handle_addrinfo_result(int status, struct addrinfo* result,
+ grpc_resolved_addresses** addresses) {
+ struct addrinfo* resp;
size_t i;
if (status != 0) {
- grpc_error *error;
+ grpc_error* error;
*addresses = NULL;
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
error =
@@ -87,12 +87,12 @@ static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
return error;
}
(*addresses) =
- (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
+ (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ (*addresses)->addrs = (grpc_resolved_address*)gpr_malloc(
sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
@@ -103,7 +103,7 @@ static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
{
for (i = 0; i < (*addresses)->naddrs; i++) {
- char *buf;
+ char* buf;
grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
gpr_free(buf);
}
@@ -111,13 +111,13 @@ static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
return GRPC_ERROR_NONE;
}
-static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
- struct addrinfo *res) {
- request *r = (request *)req->data;
+static void getaddrinfo_callback(uv_getaddrinfo_t* req, int status,
+ struct addrinfo* res) {
+ request* r = (request*)req->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *error;
+ grpc_error* error;
int retry_status;
- char *port = r->port;
+ char* port = r->port;
gpr_free(req);
retry_status = retry_named_port_failure(status, r, getaddrinfo_callback);
@@ -139,14 +139,14 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
uv_freeaddrinfo(res);
}
-static grpc_error *try_split_host_port(const char *name,
- const char *default_port, char **host,
- char **port) {
+static grpc_error* try_split_host_port(const char* name,
+ const char* default_port, char** host,
+ char** port) {
/* parse name, splitting it into host and port parts */
- grpc_error *error;
+ grpc_error* error;
gpr_split_host_port(name, host, port);
if (*host == NULL) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
@@ -155,7 +155,7 @@ static grpc_error *try_split_host_port(const char *name,
if (*port == NULL) {
// TODO(murgatroid99): add tests for this case
if (default_port == NULL) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "no port in name '%s'", name);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
@@ -166,15 +166,15 @@ static grpc_error *try_split_host_port(const char *name,
return GRPC_ERROR_NONE;
}
-static grpc_error *blocking_resolve_address_impl(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses) {
- char *host;
- char *port;
+static grpc_error* blocking_resolve_address_impl(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) {
+ char* host;
+ char* port;
struct addrinfo hints;
uv_getaddrinfo_t req;
int s;
- grpc_error *err;
+ grpc_error* err;
int retry_status;
request r;
@@ -213,28 +213,28 @@ done:
return err;
}
-grpc_error *(*grpc_blocking_resolve_address)(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses) = blocking_resolve_address_impl;
+grpc_error* (*grpc_blocking_resolve_address)(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) = blocking_resolve_address_impl;
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
if (addrs != NULL) {
gpr_free(addrs->addrs);
}
gpr_free(addrs);
}
-static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addrs) {
- uv_getaddrinfo_t *req = NULL;
- request *r = NULL;
- struct addrinfo *hints = NULL;
- char *host = NULL;
- char *port = NULL;
- grpc_error *err;
+static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) {
+ uv_getaddrinfo_t* req = NULL;
+ request* r = NULL;
+ struct addrinfo* hints = NULL;
+ char* host = NULL;
+ char* port = NULL;
+ grpc_error* err;
int s;
GRPC_UV_ASSERT_SAME_THREAD();
err = try_split_host_port(name, default_port, &host, &port);
@@ -244,16 +244,16 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
gpr_free(port);
return;
}
- r = (request *)gpr_malloc(sizeof(request));
+ r = (request*)gpr_malloc(sizeof(request));
r->on_done = on_done;
r->addresses = addrs;
r->host = host;
r->port = port;
- req = (uv_getaddrinfo_t *)gpr_malloc(sizeof(uv_getaddrinfo_t));
+ req = (uv_getaddrinfo_t*)gpr_malloc(sizeof(uv_getaddrinfo_t));
req->data = r;
/* Call getaddrinfo */
- hints = (addrinfo *)gpr_malloc(sizeof(struct addrinfo));
+ hints = (addrinfo*)gpr_malloc(sizeof(struct addrinfo));
memset(hints, 0, sizeof(struct addrinfo));
hints->ai_family = AF_UNSPEC; /* ipv4 or ipv6 */
hints->ai_socktype = SOCK_STREAM; /* stream socket */
@@ -278,8 +278,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
}
void (*grpc_resolve_address)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addrs) = resolve_address_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addrs) = resolve_address_impl;
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc
index 451f01a701..d9fc17a9f4 100644
--- a/src/core/lib/iomgr/resolve_address_windows.cc
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -41,28 +41,28 @@
#include "src/core/lib/support/string.h"
typedef struct {
- char *name;
- char *default_port;
+ char* name;
+ char* default_port;
grpc_closure request_closure;
- grpc_closure *on_done;
- grpc_resolved_addresses **addresses;
+ grpc_closure* on_done;
+ grpc_resolved_addresses** addresses;
} request;
-static grpc_error *blocking_resolve_address_impl(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses) {
+static grpc_error* blocking_resolve_address_impl(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) {
struct addrinfo hints;
struct addrinfo *result = NULL, *resp;
- char *host;
- char *port;
+ char* host;
+ char* port;
int s;
size_t i;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
/* parse name, splitting it into host and port parts */
gpr_split_host_port(name, &host, &port);
if (host == NULL) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
@@ -70,7 +70,7 @@ static grpc_error *blocking_resolve_address_impl(
}
if (port == NULL) {
if (default_port == NULL) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "no port in name '%s'", name);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
@@ -95,12 +95,12 @@ static grpc_error *blocking_resolve_address_impl(
/* Success path: set addrs non-NULL, fill it in */
(*addresses) =
- (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
+ (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
- (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
+ (*addresses)->addrs = (grpc_resolved_address*)gpr_malloc(
sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
@@ -111,7 +111,7 @@ static grpc_error *blocking_resolve_address_impl(
{
for (i = 0; i < (*addresses)->naddrs; i++) {
- char *buf;
+ char* buf;
grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0);
gpr_free(buf);
}
@@ -126,15 +126,15 @@ done:
return error;
}
-grpc_error *(*grpc_blocking_resolve_address)(
- const char *name, const char *default_port,
- grpc_resolved_addresses **addresses) = blocking_resolve_address_impl;
+grpc_error* (*grpc_blocking_resolve_address)(
+ const char* name, const char* default_port,
+ grpc_resolved_addresses** addresses) = blocking_resolve_address_impl;
/* Callback to be passed to grpc_executor to asynch-ify
* grpc_blocking_resolve_address */
-static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
- grpc_error *error) {
- request *r = (request *)rp;
+static void do_request_thread(grpc_exec_ctx* exec_ctx, void* rp,
+ grpc_error* error) {
+ request* r = (request*)rp;
if (error == GRPC_ERROR_NONE) {
error =
grpc_blocking_resolve_address(r->name, r->default_port, r->addresses);
@@ -147,19 +147,19 @@ static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
gpr_free(r);
}
-void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) {
+void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) {
if (addrs != NULL) {
gpr_free(addrs->addrs);
}
gpr_free(addrs);
}
-static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
- const char *default_port,
- grpc_pollset_set *interested_parties,
- grpc_closure *on_done,
- grpc_resolved_addresses **addresses) {
- request *r = (request *)gpr_malloc(sizeof(request));
+static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
+ const char* default_port,
+ grpc_pollset_set* interested_parties,
+ grpc_closure* on_done,
+ grpc_resolved_addresses** addresses) {
+ request* r = (request*)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
@@ -170,8 +170,8 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
}
void (*grpc_resolve_address)(
- grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
- grpc_pollset_set *interested_parties, grpc_closure *on_done,
- grpc_resolved_addresses **addresses) = resolve_address_impl;
+ grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+ grpc_pollset_set* interested_parties, grpc_closure* on_done,
+ grpc_resolved_addresses** addresses) = resolve_address_impl;
#endif
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index ecb5747da8..5077554a56 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -38,8 +38,8 @@ grpc_tracer_flag grpc_resource_quota_trace =
/* Internal linked list pointers for a resource user */
typedef struct {
- grpc_resource_user *next;
- grpc_resource_user *prev;
+ grpc_resource_user* next;
+ grpc_resource_user* prev;
} grpc_resource_user_link;
/* Resource users are kept in (potentially) several intrusive linked lists
@@ -60,7 +60,7 @@ typedef enum {
struct grpc_resource_user {
/* The quota this resource user consumes from */
- grpc_resource_quota *resource_quota;
+ grpc_resource_quota* resource_quota;
/* Closure to schedule an allocation under the resource quota combiner lock */
grpc_closure allocate_closure;
@@ -97,10 +97,10 @@ struct grpc_resource_user {
/* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
*/
- grpc_closure *reclaimers[2];
+ grpc_closure* reclaimers[2];
/* Reclaimers just posted: once we're in the combiner lock, we'll move them
to the array above */
- grpc_closure *new_reclaimers[2];
+ grpc_closure* new_reclaimers[2];
/* Trampoline closures to finish reclamation and re-enter the quota combiner
lock */
grpc_closure post_reclaimer_closure[2];
@@ -113,7 +113,7 @@ struct grpc_resource_user {
grpc_resource_user_link links[GRPC_RULIST_COUNT];
/* The name of this resource user, for debugging/tracing */
- char *name;
+ char* name;
};
struct grpc_resource_quota {
@@ -126,7 +126,7 @@ struct grpc_resource_quota {
/* Master combiner lock: all activity on a quota executes under this combiner
* (so no mutex is needed for this data structure) */
- grpc_combiner *combiner;
+ grpc_combiner* combiner;
/* Size of the resource quota */
int64_t size;
/* Amount of free memory in the resource quota */
@@ -146,26 +146,26 @@ struct grpc_resource_quota {
/* This is only really usable for debugging: it's always a stale pointer, but
a stale pointer that might just be fresh enough to guide us to where the
reclamation system is stuck */
- grpc_closure *debug_only_last_initiated_reclaimer;
- grpc_resource_user *debug_only_last_reclaimer_resource_user;
+ grpc_closure* debug_only_last_initiated_reclaimer;
+ grpc_resource_user* debug_only_last_reclaimer_resource_user;
/* Roots of all resource user lists */
- grpc_resource_user *roots[GRPC_RULIST_COUNT];
+ grpc_resource_user* roots[GRPC_RULIST_COUNT];
- char *name;
+ char* name;
};
-static void ru_unref_by(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user, gpr_atm amount);
+static void ru_unref_by(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, gpr_atm amount);
/*******************************************************************************
* list management
*/
-static void rulist_add_head(grpc_resource_user *resource_user,
+static void rulist_add_head(grpc_resource_user* resource_user,
grpc_rulist list) {
- grpc_resource_quota *resource_quota = resource_user->resource_quota;
- grpc_resource_user **root = &resource_quota->roots[list];
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ grpc_resource_user** root = &resource_quota->roots[list];
if (*root == NULL) {
*root = resource_user;
resource_user->links[list].next = resource_user->links[list].prev =
@@ -179,10 +179,10 @@ static void rulist_add_head(grpc_resource_user *resource_user,
}
}
-static void rulist_add_tail(grpc_resource_user *resource_user,
+static void rulist_add_tail(grpc_resource_user* resource_user,
grpc_rulist list) {
- grpc_resource_quota *resource_quota = resource_user->resource_quota;
- grpc_resource_user **root = &resource_quota->roots[list];
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
+ grpc_resource_user** root = &resource_quota->roots[list];
if (*root == NULL) {
*root = resource_user;
resource_user->links[list].next = resource_user->links[list].prev =
@@ -195,15 +195,15 @@ static void rulist_add_tail(grpc_resource_user *resource_user,
}
}
-static bool rulist_empty(grpc_resource_quota *resource_quota,
+static bool rulist_empty(grpc_resource_quota* resource_quota,
grpc_rulist list) {
return resource_quota->roots[list] == NULL;
}
-static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota,
+static grpc_resource_user* rulist_pop_head(grpc_resource_quota* resource_quota,
grpc_rulist list) {
- grpc_resource_user **root = &resource_quota->roots[list];
- grpc_resource_user *resource_user = *root;
+ grpc_resource_user** root = &resource_quota->roots[list];
+ grpc_resource_user* resource_user = *root;
if (resource_user == NULL) {
return NULL;
}
@@ -220,9 +220,9 @@ static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota,
return resource_user;
}
-static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) {
+static void rulist_remove(grpc_resource_user* resource_user, grpc_rulist list) {
if (resource_user->links[list].next == NULL) return;
- grpc_resource_quota *resource_quota = resource_user->resource_quota;
+ grpc_resource_quota* resource_quota = resource_user->resource_quota;
if (resource_quota->roots[list] == resource_user) {
resource_quota->roots[list] = resource_user->links[list].next;
if (resource_quota->roots[list] == resource_user) {
@@ -240,15 +240,15 @@ static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) {
* resource quota state machine
*/
-static bool rq_alloc(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota);
+static bool rq_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota);
static bool rq_reclaim_from_per_user_free_pool(
- grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota);
-static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota, bool destructive);
+ grpc_exec_ctx* exec_ctx, grpc_resource_quota* resource_quota);
+static bool rq_reclaim(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota, bool destructive);
-static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
- grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
+static void rq_step(grpc_exec_ctx* exec_ctx, void* rq, grpc_error* error) {
+ grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
resource_quota->step_scheduled = false;
do {
if (rq_alloc(exec_ctx, resource_quota)) goto done;
@@ -262,8 +262,8 @@ done:
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
}
-static void rq_step_sched(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota) {
+static void rq_step_sched(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota) {
if (resource_quota->step_scheduled) return;
resource_quota->step_scheduled = true;
grpc_resource_quota_ref_internal(resource_quota);
@@ -273,13 +273,12 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx,
/* update the atomically available resource estimate - use no barriers since
timeliness of delivery really doesn't matter much */
-static void rq_update_estimate(grpc_resource_quota *resource_quota) {
+static void rq_update_estimate(grpc_resource_quota* resource_quota) {
gpr_atm memory_usage_estimation = MEMORY_USAGE_ESTIMATION_MAX;
if (resource_quota->size != 0) {
memory_usage_estimation =
- GPR_CLAMP((gpr_atm)((1.0 -
- ((double)resource_quota->free_pool) /
- ((double)resource_quota->size)) *
+ GPR_CLAMP((gpr_atm)((1.0 - ((double)resource_quota->free_pool) /
+ ((double)resource_quota->size)) *
MEMORY_USAGE_ESTIMATION_MAX),
0, MEMORY_USAGE_ESTIMATION_MAX);
}
@@ -288,15 +287,16 @@ static void rq_update_estimate(grpc_resource_quota *resource_quota) {
}
/* returns true if all allocations are completed */
-static bool rq_alloc(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota) {
- grpc_resource_user *resource_user;
+static bool rq_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota) {
+ grpc_resource_user* resource_user;
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
- gpr_log(GPR_DEBUG, "RQ: check allocation for user %p shutdown=%" PRIdPTR
- " free_pool=%" PRId64,
+ gpr_log(GPR_DEBUG,
+ "RQ: check allocation for user %p shutdown=%" PRIdPTR
+ " free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
resource_user->free_pool);
}
@@ -320,8 +320,9 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
- gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
- " bytes; rq_free_pool -> %" PRId64,
+ gpr_log(GPR_DEBUG,
+ "RQ %s %s: grant alloc %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
}
@@ -346,8 +347,8 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
/* returns true if any memory could be reclaimed from buffers */
static bool rq_reclaim_from_per_user_free_pool(
- grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
- grpc_resource_user *resource_user;
+ grpc_exec_ctx* exec_ctx, grpc_resource_quota* resource_quota) {
+ grpc_resource_user* resource_user;
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
gpr_mu_lock(&resource_user->mu);
@@ -357,8 +358,9 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
- gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
- " bytes; rq_free_pool -> %" PRId64,
+ gpr_log(GPR_DEBUG,
+ "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
+ " bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
}
@@ -372,12 +374,12 @@ static bool rq_reclaim_from_per_user_free_pool(
}
/* returns true if reclamation is proceeding */
-static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota, bool destructive) {
+static bool rq_reclaim(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota, bool destructive) {
if (resource_quota->reclaiming) return true;
grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
: GRPC_RULIST_RECLAIMER_BENIGN;
- grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
+ grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == NULL) return false;
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
@@ -386,7 +388,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
}
resource_quota->reclaiming = true;
grpc_resource_quota_ref_internal(resource_quota);
- grpc_closure *c = resource_user->reclaimers[destructive];
+ grpc_closure* c = resource_user->reclaimers[destructive];
GPR_ASSERT(c);
resource_quota->debug_only_last_reclaimer_resource_user = resource_user;
resource_quota->debug_only_last_initiated_reclaimer = c;
@@ -402,17 +404,17 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
typedef struct {
grpc_slice_refcount base;
gpr_refcount refs;
- grpc_resource_user *resource_user;
+ grpc_resource_user* resource_user;
size_t size;
} ru_slice_refcount;
-static void ru_slice_ref(void *p) {
- ru_slice_refcount *rc = (ru_slice_refcount *)p;
+static void ru_slice_ref(void* p) {
+ ru_slice_refcount* rc = (ru_slice_refcount*)p;
gpr_ref(&rc->refs);
}
-static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- ru_slice_refcount *rc = (ru_slice_refcount *)p;
+static void ru_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
+ ru_slice_refcount* rc = (ru_slice_refcount*)p;
if (gpr_unref(&rc->refs)) {
grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
gpr_free(rc);
@@ -423,10 +425,10 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = {
ru_slice_ref, ru_slice_unref, grpc_slice_default_eq_impl,
grpc_slice_default_hash_impl};
-static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
+static grpc_slice ru_slice_create(grpc_resource_user* resource_user,
size_t size) {
- ru_slice_refcount *rc =
- (ru_slice_refcount *)gpr_malloc(sizeof(ru_slice_refcount) + size);
+ ru_slice_refcount* rc =
+ (ru_slice_refcount*)gpr_malloc(sizeof(ru_slice_refcount) + size);
rc->base.vtable = &ru_slice_vtable;
rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
@@ -434,7 +436,7 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
rc->size = size;
grpc_slice slice;
slice.refcount = &rc->base;
- slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
+ slice.data.refcounted.bytes = (uint8_t*)(rc + 1);
slice.data.refcounted.length = size;
return slice;
}
@@ -444,8 +446,8 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
* the combiner
*/
-static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = (grpc_resource_user *)ru;
+static void ru_allocate(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
+ grpc_resource_user* resource_user = (grpc_resource_user*)ru;
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(exec_ctx, resource_user->resource_quota);
@@ -453,9 +455,9 @@ static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
}
-static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
- grpc_error *error) {
- grpc_resource_user *resource_user = (grpc_resource_user *)ru;
+static void ru_add_to_free_pool(grpc_exec_ctx* exec_ctx, void* ru,
+ grpc_error* error) {
+ grpc_resource_user* resource_user = (grpc_resource_user*)ru;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@@ -465,10 +467,10 @@ static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
}
-static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user,
+static bool ru_post_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
bool destructive) {
- grpc_closure *closure = resource_user->new_reclaimers[destructive];
+ grpc_closure* closure = resource_user->new_reclaimers[destructive];
GPR_ASSERT(closure != NULL);
resource_user->new_reclaimers[destructive] = NULL;
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
@@ -480,9 +482,9 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
return true;
}
-static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
- grpc_error *error) {
- grpc_resource_user *resource_user = (grpc_resource_user *)ru;
+static void ru_post_benign_reclaimer(grpc_exec_ctx* exec_ctx, void* ru,
+ grpc_error* error) {
+ grpc_resource_user* resource_user = (grpc_resource_user*)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -495,9 +497,9 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
}
-static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
- grpc_error *error) {
- grpc_resource_user *resource_user = (grpc_resource_user *)ru;
+static void ru_post_destructive_reclaimer(grpc_exec_ctx* exec_ctx, void* ru,
+ grpc_error* error) {
+ grpc_resource_user* resource_user = (grpc_resource_user*)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@@ -512,11 +514,11 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
}
-static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
+static void ru_shutdown(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
}
- grpc_resource_user *resource_user = (grpc_resource_user *)ru;
+ grpc_resource_user* resource_user = (grpc_resource_user*)ru;
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
@@ -530,8 +532,8 @@ static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
}
}
-static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
- grpc_resource_user *resource_user = (grpc_resource_user *)ru;
+static void ru_destroy(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
+ grpc_resource_user* resource_user = (grpc_resource_user*)ru;
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
@@ -550,10 +552,10 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
gpr_free(resource_user);
}
-static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_resource_user_slice_allocator *slice_allocator =
- (grpc_resource_user_slice_allocator *)arg;
+static void ru_allocated_slices(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_resource_user_slice_allocator* slice_allocator =
+ (grpc_resource_user_slice_allocator*)arg;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
@@ -571,12 +573,12 @@ static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
typedef struct {
int64_t size;
- grpc_resource_quota *resource_quota;
+ grpc_resource_quota* resource_quota;
grpc_closure closure;
} rq_resize_args;
-static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
- rq_resize_args *a = (rq_resize_args *)args;
+static void rq_resize(grpc_exec_ctx* exec_ctx, void* args, grpc_error* error) {
+ rq_resize_args* a = (rq_resize_args*)args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
@@ -586,9 +588,9 @@ static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
gpr_free(a);
}
-static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
- grpc_error *error) {
- grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
+static void rq_reclamation_done(grpc_exec_ctx* exec_ctx, void* rq,
+ grpc_error* error) {
+ grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
resource_quota->reclaiming = false;
rq_step_sched(exec_ctx, resource_quota);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@@ -599,9 +601,9 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
*/
/* Public API */
-grpc_resource_quota *grpc_resource_quota_create(const char *name) {
- grpc_resource_quota *resource_quota =
- (grpc_resource_quota *)gpr_malloc(sizeof(*resource_quota));
+grpc_resource_quota* grpc_resource_quota_create(const char* name) {
+ grpc_resource_quota* resource_quota =
+ (grpc_resource_quota*)gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
@@ -627,8 +629,8 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
return resource_quota;
}
-void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota) {
+void grpc_resource_quota_unref_internal(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota) {
if (gpr_unref(&resource_quota->refs)) {
GRPC_COMBINER_UNREF(exec_ctx, resource_quota->combiner, "resource_quota");
gpr_free(resource_quota->name);
@@ -637,35 +639,35 @@ void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx,
}
/* Public API */
-void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) {
+void grpc_resource_quota_unref(grpc_resource_quota* resource_quota) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
grpc_exec_ctx_finish(&exec_ctx);
}
-grpc_resource_quota *grpc_resource_quota_ref_internal(
- grpc_resource_quota *resource_quota) {
+grpc_resource_quota* grpc_resource_quota_ref_internal(
+ grpc_resource_quota* resource_quota) {
gpr_ref(&resource_quota->refs);
return resource_quota;
}
/* Public API */
-void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) {
+void grpc_resource_quota_ref(grpc_resource_quota* resource_quota) {
grpc_resource_quota_ref_internal(resource_quota);
}
double grpc_resource_quota_get_memory_pressure(
- grpc_resource_quota *resource_quota) {
+ grpc_resource_quota* resource_quota) {
return ((double)(gpr_atm_no_barrier_load(
&resource_quota->memory_usage_estimation))) /
((double)MEMORY_USAGE_ESTIMATION_MAX);
}
/* Public API */
-void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
+void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a));
+ rq_resize_args* a = (rq_resize_args*)gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
@@ -675,7 +677,7 @@ void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
grpc_exec_ctx_finish(&exec_ctx);
}
-size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota) {
+size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
return (size_t)gpr_atm_no_barrier_load(&resource_quota->last_size);
}
@@ -683,13 +685,13 @@ size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota) {
* grpc_resource_user channel args api
*/
-grpc_resource_quota *grpc_resource_quota_from_channel_args(
- const grpc_channel_args *channel_args) {
+grpc_resource_quota* grpc_resource_quota_from_channel_args(
+ const grpc_channel_args* channel_args) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
- (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
@@ -698,18 +700,18 @@ grpc_resource_quota *grpc_resource_quota_from_channel_args(
return grpc_resource_quota_create(NULL);
}
-static void *rq_copy(void *rq) {
- grpc_resource_quota_ref((grpc_resource_quota *)rq);
+static void* rq_copy(void* rq) {
+ grpc_resource_quota_ref((grpc_resource_quota*)rq);
return rq;
}
-static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) {
- grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq);
+static void rq_destroy(grpc_exec_ctx* exec_ctx, void* rq) {
+ grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota*)rq);
}
-static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+static int rq_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
-const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
+const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void) {
static const grpc_arg_pointer_vtable vtable = {rq_copy, rq_destroy, rq_cmp};
return &vtable;
}
@@ -718,10 +720,10 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
* grpc_resource_user api
*/
-grpc_resource_user *grpc_resource_user_create(
- grpc_resource_quota *resource_quota, const char *name) {
- grpc_resource_user *resource_user =
- (grpc_resource_user *)gpr_malloc(sizeof(*resource_user));
+grpc_resource_user* grpc_resource_user_create(
+ grpc_resource_quota* resource_quota, const char* name) {
+ grpc_resource_user* resource_user =
+ (grpc_resource_user*)gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,
@@ -762,18 +764,18 @@ grpc_resource_user *grpc_resource_user_create(
return resource_user;
}
-grpc_resource_quota *grpc_resource_user_quota(
- grpc_resource_user *resource_user) {
+grpc_resource_quota* grpc_resource_user_quota(
+ grpc_resource_user* resource_user) {
return resource_user->resource_quota;
}
-static void ru_ref_by(grpc_resource_user *resource_user, gpr_atm amount) {
+static void ru_ref_by(grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0);
}
-static void ru_unref_by(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user, gpr_atm amount) {
+static void ru_unref_by(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount);
@@ -783,17 +785,17 @@ static void ru_unref_by(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_resource_user_ref(grpc_resource_user *resource_user) {
+void grpc_resource_user_ref(grpc_resource_user* resource_user) {
ru_ref_by(resource_user, 1);
}
-void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user) {
+void grpc_resource_user_unref(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
ru_unref_by(exec_ctx, resource_user, 1);
}
-void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user) {
+void grpc_resource_user_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
GRPC_CLOSURE_SCHED(
exec_ctx,
@@ -804,9 +806,9 @@ void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user, size_t size,
- grpc_closure *optional_on_done) {
+void grpc_resource_user_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size,
+ grpc_closure* optional_on_done) {
gpr_mu_lock(&resource_user->mu);
ru_ref_by(resource_user, (gpr_atm)size);
resource_user->free_pool -= (int64_t)size;
@@ -831,8 +833,8 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&resource_user->mu);
}
-void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user, size_t size) {
+void grpc_resource_user_free(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += (int64_t)size;
@@ -852,10 +854,10 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
}
-void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user,
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
bool destructive,
- grpc_closure *closure) {
+ grpc_closure* closure) {
GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
resource_user->new_reclaimers[destructive] = closure;
GRPC_CLOSURE_SCHED(exec_ctx,
@@ -863,8 +865,8 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
-void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user) {
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name);
@@ -875,8 +877,8 @@ void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
}
void grpc_resource_user_slice_allocator_init(
- grpc_resource_user_slice_allocator *slice_allocator,
- grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
+ grpc_resource_user_slice_allocator* slice_allocator,
+ grpc_resource_user* resource_user, grpc_iomgr_cb_func cb, void* p) {
GRPC_CLOSURE_INIT(&slice_allocator->on_allocated, ru_allocated_slices,
slice_allocator, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&slice_allocator->on_done, cb, p,
@@ -885,9 +887,9 @@ void grpc_resource_user_slice_allocator_init(
}
void grpc_resource_user_alloc_slices(
- grpc_exec_ctx *exec_ctx,
- grpc_resource_user_slice_allocator *slice_allocator, size_t length,
- size_t count, grpc_slice_buffer *dest) {
+ grpc_exec_ctx* exec_ctx,
+ grpc_resource_user_slice_allocator* slice_allocator, size_t length,
+ size_t count, grpc_slice_buffer* dest) {
slice_allocator->length = length;
slice_allocator->count = count;
slice_allocator->dest = dest;
@@ -895,8 +897,8 @@ void grpc_resource_user_alloc_slices(
count * length, &slice_allocator->on_allocated);
}
-grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user,
+grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
size_t size) {
grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL);
return ru_slice_create(resource_user, size);
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 1d4249b7e2..fcdf9c2de5 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -67,58 +67,58 @@ extern "C" {
extern grpc_tracer_flag grpc_resource_quota_trace;
-grpc_resource_quota *grpc_resource_quota_ref_internal(
- grpc_resource_quota *resource_quota);
-void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx,
- grpc_resource_quota *resource_quota);
-grpc_resource_quota *grpc_resource_quota_from_channel_args(
- const grpc_channel_args *channel_args);
+grpc_resource_quota* grpc_resource_quota_ref_internal(
+ grpc_resource_quota* resource_quota);
+void grpc_resource_quota_unref_internal(grpc_exec_ctx* exec_ctx,
+ grpc_resource_quota* resource_quota);
+grpc_resource_quota* grpc_resource_quota_from_channel_args(
+ const grpc_channel_args* channel_args);
/* Return a number indicating current memory pressure:
0.0 ==> no memory usage
1.0 ==> maximum memory usage */
double grpc_resource_quota_get_memory_pressure(
- grpc_resource_quota *resource_quota);
+ grpc_resource_quota* resource_quota);
-size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota);
+size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota);
typedef struct grpc_resource_user grpc_resource_user;
-grpc_resource_user *grpc_resource_user_create(
- grpc_resource_quota *resource_quota, const char *name);
+grpc_resource_user* grpc_resource_user_create(
+ grpc_resource_quota* resource_quota, const char* name);
/* Returns a borrowed reference to the underlying resource quota for this
resource user. */
-grpc_resource_quota *grpc_resource_user_quota(
- grpc_resource_user *resource_user);
+grpc_resource_quota* grpc_resource_user_quota(
+ grpc_resource_user* resource_user);
-void grpc_resource_user_ref(grpc_resource_user *resource_user);
-void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user);
-void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user);
+void grpc_resource_user_ref(grpc_resource_user* resource_user);
+void grpc_resource_user_unref(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user);
+void grpc_resource_user_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user);
/* Allocate from the resource user (and its quota).
If optional_on_done is NULL, then allocate immediately. This may push the
quota over-limit, at which point reclamation will kick in.
If optional_on_done is non-NULL, it will be scheduled when the allocation has
been granted by the quota. */
-void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user, size_t size,
- grpc_closure *optional_on_done);
+void grpc_resource_user_alloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size,
+ grpc_closure* optional_on_done);
/* Release memory back to the quota */
-void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user, size_t size);
+void grpc_resource_user_free(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user, size_t size);
/* Post a memory reclaimer to the resource user. Only one benign and one
destructive reclaimer can be posted at once. When executed, the reclaimer
MUST call grpc_resource_user_finish_reclamation before it completes, to
return control to the resource quota. */
-void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user,
- bool destructive, grpc_closure *closure);
+void grpc_resource_user_post_reclaimer(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
+ bool destructive, grpc_closure* closure);
/* Finish a reclamation step */
-void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user);
+void grpc_resource_user_finish_reclamation(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user);
/* Helper to allocate slices from a resource user */
typedef struct grpc_resource_user_slice_allocator {
@@ -131,27 +131,27 @@ typedef struct grpc_resource_user_slice_allocator {
/* Number of slices to allocate on the current request */
size_t count;
/* Destination for slices to allocate on the current request */
- grpc_slice_buffer *dest;
+ grpc_slice_buffer* dest;
/* Parent resource user */
- grpc_resource_user *resource_user;
+ grpc_resource_user* resource_user;
} grpc_resource_user_slice_allocator;
/* Initialize a slice allocator.
When an allocation is completed, calls \a cb with arg \p. */
void grpc_resource_user_slice_allocator_init(
- grpc_resource_user_slice_allocator *slice_allocator,
- grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p);
+ grpc_resource_user_slice_allocator* slice_allocator,
+ grpc_resource_user* resource_user, grpc_iomgr_cb_func cb, void* p);
/* Allocate \a count slices of length \a length into \a dest. Only one request
can be outstanding at a time. */
void grpc_resource_user_alloc_slices(
- grpc_exec_ctx *exec_ctx,
- grpc_resource_user_slice_allocator *slice_allocator, size_t length,
- size_t count, grpc_slice_buffer *dest);
+ grpc_exec_ctx* exec_ctx,
+ grpc_resource_user_slice_allocator* slice_allocator, size_t length,
+ size_t count, grpc_slice_buffer* dest);
/* Allocate one slice of length \a size synchronously. */
-grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user,
+grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user,
size_t size);
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/sockaddr_utils.cc b/src/core/lib/iomgr/sockaddr_utils.cc
index 8a2e6ed89b..2dbc5aa6e7 100644
--- a/src/core/lib/iomgr/sockaddr_utils.cc
+++ b/src/core/lib/iomgr/sockaddr_utils.cc
@@ -36,16 +36,16 @@
static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xff, 0xff};
-int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *resolved_addr,
- grpc_resolved_address *resolved_addr4_out) {
+int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* resolved_addr,
+ grpc_resolved_address* resolved_addr4_out) {
GPR_ASSERT(resolved_addr != resolved_addr4_out);
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
- struct sockaddr_in *addr4_out =
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ struct sockaddr_in* addr4_out =
resolved_addr4_out == NULL
? NULL
- : (struct sockaddr_in *)resolved_addr4_out->addr;
+ : (struct sockaddr_in*)resolved_addr4_out->addr;
if (addr->sa_family == AF_INET6) {
- const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr;
+ const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)addr;
if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix,
sizeof(kV4MappedPrefix)) == 0) {
if (resolved_addr4_out != NULL) {
@@ -63,14 +63,14 @@ int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *resolved_addr,
return 0;
}
-int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *resolved_addr,
- grpc_resolved_address *resolved_addr6_out) {
+int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* resolved_addr,
+ grpc_resolved_address* resolved_addr6_out) {
GPR_ASSERT(resolved_addr != resolved_addr6_out);
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
- struct sockaddr_in6 *addr6_out =
- (struct sockaddr_in6 *)resolved_addr6_out->addr;
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
+ struct sockaddr_in6* addr6_out =
+ (struct sockaddr_in6*)resolved_addr6_out->addr;
if (addr->sa_family == AF_INET) {
- const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
+ const struct sockaddr_in* addr4 = (const struct sockaddr_in*)addr;
memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out));
addr6_out->sin6_family = AF_INET6;
memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12);
@@ -82,17 +82,17 @@ int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *resolved_addr,
return 0;
}
-int grpc_sockaddr_is_wildcard(const grpc_resolved_address *resolved_addr,
- int *port_out) {
- const struct sockaddr *addr;
+int grpc_sockaddr_is_wildcard(const grpc_resolved_address* resolved_addr,
+ int* port_out) {
+ const struct sockaddr* addr;
grpc_resolved_address addr4_normalized;
if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) {
resolved_addr = &addr4_normalized;
}
- addr = (const struct sockaddr *)resolved_addr->addr;
+ addr = (const struct sockaddr*)resolved_addr->addr;
if (addr->sa_family == AF_INET) {
/* Check for 0.0.0.0 */
- const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
+ const struct sockaddr_in* addr4 = (const struct sockaddr_in*)addr;
if (addr4->sin_addr.s_addr != 0) {
return 0;
}
@@ -100,7 +100,7 @@ int grpc_sockaddr_is_wildcard(const grpc_resolved_address *resolved_addr,
return 1;
} else if (addr->sa_family == AF_INET6) {
/* Check for :: */
- const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr;
+ const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)addr;
int i;
for (i = 0; i < 16; i++) {
if (addr6->sin6_addr.s6_addr[i] != 0) {
@@ -114,15 +114,15 @@ int grpc_sockaddr_is_wildcard(const grpc_resolved_address *resolved_addr,
}
}
-void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out,
- grpc_resolved_address *wild6_out) {
+void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address* wild4_out,
+ grpc_resolved_address* wild6_out) {
grpc_sockaddr_make_wildcard4(port, wild4_out);
grpc_sockaddr_make_wildcard6(port, wild6_out);
}
void grpc_sockaddr_make_wildcard4(int port,
- grpc_resolved_address *resolved_wild_out) {
- struct sockaddr_in *wild_out = (struct sockaddr_in *)resolved_wild_out->addr;
+ grpc_resolved_address* resolved_wild_out) {
+ struct sockaddr_in* wild_out = (struct sockaddr_in*)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin_family = AF_INET;
@@ -131,9 +131,8 @@ void grpc_sockaddr_make_wildcard4(int port,
}
void grpc_sockaddr_make_wildcard6(int port,
- grpc_resolved_address *resolved_wild_out) {
- struct sockaddr_in6 *wild_out =
- (struct sockaddr_in6 *)resolved_wild_out->addr;
+ grpc_resolved_address* resolved_wild_out) {
+ struct sockaddr_in6* wild_out = (struct sockaddr_in6*)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin6_family = AF_INET6;
@@ -141,14 +140,14 @@ void grpc_sockaddr_make_wildcard6(int port,
resolved_wild_out->len = sizeof(struct sockaddr_in6);
}
-int grpc_sockaddr_to_string(char **out,
- const grpc_resolved_address *resolved_addr,
+int grpc_sockaddr_to_string(char** out,
+ const grpc_resolved_address* resolved_addr,
int normalize) {
- const struct sockaddr *addr;
+ const struct sockaddr* addr;
const int save_errno = errno;
grpc_resolved_address addr_normalized;
char ntop_buf[INET6_ADDRSTRLEN];
- const void *ip = NULL;
+ const void* ip = NULL;
int port;
uint32_t sin6_scope_id = 0;
int ret;
@@ -157,13 +156,13 @@ int grpc_sockaddr_to_string(char **out,
if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
resolved_addr = &addr_normalized;
}
- addr = (const struct sockaddr *)resolved_addr->addr;
+ addr = (const struct sockaddr*)resolved_addr->addr;
if (addr->sa_family == AF_INET) {
- const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr;
+ const struct sockaddr_in* addr4 = (const struct sockaddr_in*)addr;
ip = &addr4->sin_addr;
port = ntohs(addr4->sin_port);
} else if (addr->sa_family == AF_INET6) {
- const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr;
+ const struct sockaddr_in6* addr6 = (const struct sockaddr_in6*)addr;
ip = &addr6->sin6_addr;
port = ntohs(addr6->sin6_port);
sin6_scope_id = addr6->sin6_scope_id;
@@ -171,7 +170,7 @@ int grpc_sockaddr_to_string(char **out,
if (ip != NULL &&
grpc_inet_ntop(addr->sa_family, ip, ntop_buf, sizeof(ntop_buf)) != NULL) {
if (sin6_scope_id != 0) {
- char *host_with_scope;
+ char* host_with_scope;
/* Enclose sin6_scope_id with the format defined in RFC 6784 section 2. */
gpr_asprintf(&host_with_scope, "%s%%25%" PRIu32, ntop_buf, sin6_scope_id);
ret = gpr_join_host_port(out, host_with_scope, port);
@@ -187,17 +186,17 @@ int grpc_sockaddr_to_string(char **out,
return ret;
}
-char *grpc_sockaddr_to_uri(const grpc_resolved_address *resolved_addr) {
+char* grpc_sockaddr_to_uri(const grpc_resolved_address* resolved_addr) {
grpc_resolved_address addr_normalized;
if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) {
resolved_addr = &addr_normalized;
}
- const char *scheme = grpc_sockaddr_get_uri_scheme(resolved_addr);
+ const char* scheme = grpc_sockaddr_get_uri_scheme(resolved_addr);
if (scheme == NULL || strcmp("unix", scheme) == 0) {
return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr);
}
- char *path = NULL;
- char *uri_str = NULL;
+ char* path = NULL;
+ char* uri_str = NULL;
if (grpc_sockaddr_to_string(&path, resolved_addr,
false /* suppress errors */) &&
scheme != NULL) {
@@ -207,9 +206,9 @@ char *grpc_sockaddr_to_uri(const grpc_resolved_address *resolved_addr) {
return uri_str != NULL ? uri_str : NULL;
}
-const char *grpc_sockaddr_get_uri_scheme(
- const grpc_resolved_address *resolved_addr) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+const char* grpc_sockaddr_get_uri_scheme(
+ const grpc_resolved_address* resolved_addr) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
return "ipv4";
@@ -221,18 +220,18 @@ const char *grpc_sockaddr_get_uri_scheme(
return NULL;
}
-int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
return addr->sa_family;
}
-int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
- return ntohs(((struct sockaddr_in *)addr)->sin_port);
+ return ntohs(((struct sockaddr_in*)addr)->sin_port);
case AF_INET6:
- return ntohs(((struct sockaddr_in6 *)addr)->sin6_port);
+ return ntohs(((struct sockaddr_in6*)addr)->sin6_port);
default:
if (grpc_is_unix_socket(resolved_addr)) {
return 1;
@@ -243,17 +242,17 @@ int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) {
}
}
-int grpc_sockaddr_set_port(const grpc_resolved_address *resolved_addr,
+int grpc_sockaddr_set_port(const grpc_resolved_address* resolved_addr,
int port) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
switch (addr->sa_family) {
case AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
- ((struct sockaddr_in *)addr)->sin_port = htons((uint16_t)port);
+ ((struct sockaddr_in*)addr)->sin_port = htons((uint16_t)port);
return 1;
case AF_INET6:
GPR_ASSERT(port >= 0 && port < 65536);
- ((struct sockaddr_in6 *)addr)->sin6_port = htons((uint16_t)port);
+ ((struct sockaddr_in6*)addr)->sin6_port = htons((uint16_t)port);
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port",
diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h
index 1fd552febb..090470d49e 100644
--- a/src/core/lib/iomgr/sockaddr_utils.h
+++ b/src/core/lib/iomgr/sockaddr_utils.h
@@ -30,33 +30,33 @@ extern "C" {
If addr4_out is non-NULL, the inner IPv4 address will be copied here when
returning true. */
-int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *addr,
- grpc_resolved_address *addr4_out);
+int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* addr,
+ grpc_resolved_address* addr4_out);
/* If addr is an AF_INET address, writes the corresponding ::ffff:0.0.0.0/96
address to addr6_out and returns true. Otherwise returns false. */
-int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *addr,
- grpc_resolved_address *addr6_out);
+int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* addr,
+ grpc_resolved_address* addr6_out);
/* If addr is ::, 0.0.0.0, or ::ffff:0.0.0.0, writes the port number to
*port_out (if not NULL) and returns true, otherwise returns false. */
-int grpc_sockaddr_is_wildcard(const grpc_resolved_address *addr, int *port_out);
+int grpc_sockaddr_is_wildcard(const grpc_resolved_address* addr, int* port_out);
/* Writes 0.0.0.0:port and [::]:port to separate sockaddrs. */
-void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out,
- grpc_resolved_address *wild6_out);
+void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address* wild4_out,
+ grpc_resolved_address* wild6_out);
/* Writes 0.0.0.0:port. */
-void grpc_sockaddr_make_wildcard4(int port, grpc_resolved_address *wild_out);
+void grpc_sockaddr_make_wildcard4(int port, grpc_resolved_address* wild_out);
/* Writes [::]:port. */
-void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address *wild_out);
+void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address* wild_out);
/* Return the IP port number of a sockaddr */
-int grpc_sockaddr_get_port(const grpc_resolved_address *addr);
+int grpc_sockaddr_get_port(const grpc_resolved_address* addr);
/* Set IP port number of a sockaddr */
-int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port);
+int grpc_sockaddr_set_port(const grpc_resolved_address* addr, int port);
/* Converts a sockaddr into a newly-allocated human-readable string.
@@ -70,16 +70,16 @@ int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port);
In the unlikely event of an error, returns -1 and sets *out to NULL.
The existing value of errno is always preserved. */
-int grpc_sockaddr_to_string(char **out, const grpc_resolved_address *addr,
+int grpc_sockaddr_to_string(char** out, const grpc_resolved_address* addr,
int normalize);
/* Returns the URI string corresponding to \a addr */
-char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr);
+char* grpc_sockaddr_to_uri(const grpc_resolved_address* addr);
/* Returns the URI scheme corresponding to \a addr */
-const char *grpc_sockaddr_get_uri_scheme(const grpc_resolved_address *addr);
+const char* grpc_sockaddr_get_uri_scheme(const grpc_resolved_address* addr);
-int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr);
+int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/socket_factory_posix.cc b/src/core/lib/iomgr/socket_factory_posix.cc
index 8e907703ae..40bfecd4c2 100644
--- a/src/core/lib/iomgr/socket_factory_posix.cc
+++ b/src/core/lib/iomgr/socket_factory_posix.cc
@@ -27,28 +27,28 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-void grpc_socket_factory_init(grpc_socket_factory *factory,
- const grpc_socket_factory_vtable *vtable) {
+void grpc_socket_factory_init(grpc_socket_factory* factory,
+ const grpc_socket_factory_vtable* vtable) {
factory->vtable = vtable;
gpr_ref_init(&factory->refcount, 1);
}
-int grpc_socket_factory_socket(grpc_socket_factory *factory, int domain,
+int grpc_socket_factory_socket(grpc_socket_factory* factory, int domain,
int type, int protocol) {
return factory->vtable->socket(factory, domain, type, protocol);
}
-int grpc_socket_factory_bind(grpc_socket_factory *factory, int sockfd,
- const grpc_resolved_address *addr) {
+int grpc_socket_factory_bind(grpc_socket_factory* factory, int sockfd,
+ const grpc_resolved_address* addr) {
return factory->vtable->bind(factory, sockfd, addr);
}
-int grpc_socket_factory_compare(grpc_socket_factory *a,
- grpc_socket_factory *b) {
+int grpc_socket_factory_compare(grpc_socket_factory* a,
+ grpc_socket_factory* b) {
int c = GPR_ICMP(a, b);
if (c != 0) {
- grpc_socket_factory *sma = a;
- grpc_socket_factory *smb = b;
+ grpc_socket_factory* sma = a;
+ grpc_socket_factory* smb = b;
c = GPR_ICMP(sma->vtable, smb->vtable);
if (c == 0) {
c = sma->vtable->compare(sma, smb);
@@ -57,35 +57,35 @@ int grpc_socket_factory_compare(grpc_socket_factory *a,
return c;
}
-grpc_socket_factory *grpc_socket_factory_ref(grpc_socket_factory *factory) {
+grpc_socket_factory* grpc_socket_factory_ref(grpc_socket_factory* factory) {
gpr_ref(&factory->refcount);
return factory;
}
-void grpc_socket_factory_unref(grpc_socket_factory *factory) {
+void grpc_socket_factory_unref(grpc_socket_factory* factory) {
if (gpr_unref(&factory->refcount)) {
factory->vtable->destroy(factory);
}
}
-static void *socket_factory_arg_copy(void *p) {
- return grpc_socket_factory_ref((grpc_socket_factory *)p);
+static void* socket_factory_arg_copy(void* p) {
+ return grpc_socket_factory_ref((grpc_socket_factory*)p);
}
-static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_factory_unref((grpc_socket_factory *)p);
+static void socket_factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+ grpc_socket_factory_unref((grpc_socket_factory*)p);
}
-static int socket_factory_cmp(void *a, void *b) {
- return grpc_socket_factory_compare((grpc_socket_factory *)a,
- (grpc_socket_factory *)b);
+static int socket_factory_cmp(void* a, void* b) {
+ return grpc_socket_factory_compare((grpc_socket_factory*)a,
+ (grpc_socket_factory*)b);
}
static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
-grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
- return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY,
+grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory* factory) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SOCKET_FACTORY,
factory, &socket_factory_arg_vtable);
}
diff --git a/src/core/lib/iomgr/socket_factory_posix.h b/src/core/lib/iomgr/socket_factory_posix.h
index a46938b06e..e8257b07c4 100644
--- a/src/core/lib/iomgr/socket_factory_posix.h
+++ b/src/core/lib/iomgr/socket_factory_posix.h
@@ -30,43 +30,43 @@ extern "C" {
/** The virtual table of grpc_socket_factory */
typedef struct {
/** Replacement for socket(2) */
- int (*socket)(grpc_socket_factory *factory, int domain, int type,
+ int (*socket)(grpc_socket_factory* factory, int domain, int type,
int protocol);
/** Replacement for bind(2) */
- int (*bind)(grpc_socket_factory *factory, int sockfd,
- const grpc_resolved_address *addr);
+ int (*bind)(grpc_socket_factory* factory, int sockfd,
+ const grpc_resolved_address* addr);
/** Compare socket factory \a a and \a b */
- int (*compare)(grpc_socket_factory *a, grpc_socket_factory *b);
+ int (*compare)(grpc_socket_factory* a, grpc_socket_factory* b);
/** Destroys the socket factory instance */
- void (*destroy)(grpc_socket_factory *factory);
+ void (*destroy)(grpc_socket_factory* factory);
} grpc_socket_factory_vtable;
/** The Socket Factory interface allows changes on socket options */
struct grpc_socket_factory {
- const grpc_socket_factory_vtable *vtable;
+ const grpc_socket_factory_vtable* vtable;
gpr_refcount refcount;
};
/** called by concrete implementations to initialize the base struct */
-void grpc_socket_factory_init(grpc_socket_factory *factory,
- const grpc_socket_factory_vtable *vtable);
+void grpc_socket_factory_init(grpc_socket_factory* factory,
+ const grpc_socket_factory_vtable* vtable);
/** Wrap \a factory as a grpc_arg */
-grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory);
+grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory* factory);
/** Perform the equivalent of a socket(2) operation using \a factory */
-int grpc_socket_factory_socket(grpc_socket_factory *factory, int domain,
+int grpc_socket_factory_socket(grpc_socket_factory* factory, int domain,
int type, int protocol);
/** Perform the equivalent of a bind(2) operation using \a factory */
-int grpc_socket_factory_bind(grpc_socket_factory *factory, int sockfd,
- const grpc_resolved_address *addr);
+int grpc_socket_factory_bind(grpc_socket_factory* factory, int sockfd,
+ const grpc_resolved_address* addr);
/** Compare if \a a and \a b are the same factory or have same settings */
-int grpc_socket_factory_compare(grpc_socket_factory *a, grpc_socket_factory *b);
+int grpc_socket_factory_compare(grpc_socket_factory* a, grpc_socket_factory* b);
-grpc_socket_factory *grpc_socket_factory_ref(grpc_socket_factory *factory);
-void grpc_socket_factory_unref(grpc_socket_factory *factory);
+grpc_socket_factory* grpc_socket_factory_ref(grpc_socket_factory* factory);
+void grpc_socket_factory_unref(grpc_socket_factory* factory);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/socket_mutator.cc b/src/core/lib/iomgr/socket_mutator.cc
index b0435d5a07..ff6c0c70d8 100644
--- a/src/core/lib/iomgr/socket_mutator.cc
+++ b/src/core/lib/iomgr/socket_mutator.cc
@@ -24,27 +24,27 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
-void grpc_socket_mutator_init(grpc_socket_mutator *mutator,
- const grpc_socket_mutator_vtable *vtable) {
+void grpc_socket_mutator_init(grpc_socket_mutator* mutator,
+ const grpc_socket_mutator_vtable* vtable) {
mutator->vtable = vtable;
gpr_ref_init(&mutator->refcount, 1);
}
-grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator) {
+grpc_socket_mutator* grpc_socket_mutator_ref(grpc_socket_mutator* mutator) {
gpr_ref(&mutator->refcount);
return mutator;
}
-bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd) {
+bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator* mutator, int fd) {
return mutator->vtable->mutate_fd(fd, mutator);
}
-int grpc_socket_mutator_compare(grpc_socket_mutator *a,
- grpc_socket_mutator *b) {
+int grpc_socket_mutator_compare(grpc_socket_mutator* a,
+ grpc_socket_mutator* b) {
int c = GPR_ICMP(a, b);
if (c != 0) {
- grpc_socket_mutator *sma = a;
- grpc_socket_mutator *smb = b;
+ grpc_socket_mutator* sma = a;
+ grpc_socket_mutator* smb = b;
c = GPR_ICMP(sma->vtable, smb->vtable);
if (c == 0) {
c = sma->vtable->compare(sma, smb);
@@ -53,29 +53,29 @@ int grpc_socket_mutator_compare(grpc_socket_mutator *a,
return c;
}
-void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) {
+void grpc_socket_mutator_unref(grpc_socket_mutator* mutator) {
if (gpr_unref(&mutator->refcount)) {
mutator->vtable->destory(mutator);
}
}
-static void *socket_mutator_arg_copy(void *p) {
- return grpc_socket_mutator_ref((grpc_socket_mutator *)p);
+static void* socket_mutator_arg_copy(void* p) {
+ return grpc_socket_mutator_ref((grpc_socket_mutator*)p);
}
-static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_socket_mutator_unref((grpc_socket_mutator *)p);
+static void socket_mutator_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+ grpc_socket_mutator_unref((grpc_socket_mutator*)p);
}
-static int socket_mutator_cmp(void *a, void *b) {
- return grpc_socket_mutator_compare((grpc_socket_mutator *)a,
- (grpc_socket_mutator *)b);
+static int socket_mutator_cmp(void* a, void* b) {
+ return grpc_socket_mutator_compare((grpc_socket_mutator*)a,
+ (grpc_socket_mutator*)b);
}
static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
-grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) {
- return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR,
+grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator* mutator) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SOCKET_MUTATOR,
mutator, &socket_mutator_arg_vtable);
}
diff --git a/src/core/lib/iomgr/socket_mutator.h b/src/core/lib/iomgr/socket_mutator.h
index ba956e16f0..b4103f7e93 100644
--- a/src/core/lib/iomgr/socket_mutator.h
+++ b/src/core/lib/iomgr/socket_mutator.h
@@ -31,34 +31,34 @@ extern "C" {
/** The virtual table of grpc_socket_mutator */
typedef struct {
/** Mutates the socket opitons of \a fd */
- bool (*mutate_fd)(int fd, grpc_socket_mutator *mutator);
+ bool (*mutate_fd)(int fd, grpc_socket_mutator* mutator);
/** Compare socket mutator \a a and \a b */
- int (*compare)(grpc_socket_mutator *a, grpc_socket_mutator *b);
+ int (*compare)(grpc_socket_mutator* a, grpc_socket_mutator* b);
/** Destroys the socket mutator instance */
- void (*destory)(grpc_socket_mutator *mutator);
+ void (*destory)(grpc_socket_mutator* mutator);
} grpc_socket_mutator_vtable;
/** The Socket Mutator interface allows changes on socket options */
struct grpc_socket_mutator {
- const grpc_socket_mutator_vtable *vtable;
+ const grpc_socket_mutator_vtable* vtable;
gpr_refcount refcount;
};
/** called by concrete implementations to initialize the base struct */
-void grpc_socket_mutator_init(grpc_socket_mutator *mutator,
- const grpc_socket_mutator_vtable *vtable);
+void grpc_socket_mutator_init(grpc_socket_mutator* mutator,
+ const grpc_socket_mutator_vtable* vtable);
/** Wrap \a mutator as a grpc_arg */
-grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator);
+grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator* mutator);
/** Perform the file descriptor mutation operation of \a mutator on \a fd */
-bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd);
+bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator* mutator, int fd);
/** Compare if \a a and \a b are the same mutator or have same settings */
-int grpc_socket_mutator_compare(grpc_socket_mutator *a, grpc_socket_mutator *b);
+int grpc_socket_mutator_compare(grpc_socket_mutator* a, grpc_socket_mutator* b);
-grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator);
-void grpc_socket_mutator_unref(grpc_socket_mutator *mutator);
+grpc_socket_mutator* grpc_socket_mutator_ref(grpc_socket_mutator* mutator);
+void grpc_socket_mutator_unref(grpc_socket_mutator* mutator);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/socket_utils.h b/src/core/lib/iomgr/socket_utils.h
index d6c538ec6f..4816ab6be7 100644
--- a/src/core/lib/iomgr/socket_utils.h
+++ b/src/core/lib/iomgr/socket_utils.h
@@ -26,7 +26,7 @@ extern "C" {
#endif
/* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */
-const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size);
+const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.cc b/src/core/lib/iomgr/socket_utils_common_posix.cc
index b8e2a0cdfd..88b757a4ae 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_common_posix.cc
@@ -44,7 +44,7 @@
#include "src/core/lib/support/string.h"
/* set a socket to non blocking mode */
-grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking) {
+grpc_error* grpc_set_socket_nonblocking(int fd, int non_blocking) {
int oldflags = fcntl(fd, F_GETFL, 0);
if (oldflags < 0) {
return GRPC_OS_ERROR(errno, "fcntl");
@@ -63,7 +63,7 @@ grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
+grpc_error* grpc_set_socket_no_sigpipe_if_possible(int fd) {
#ifdef GRPC_HAVE_SO_NOSIGPIPE
int val = 1;
int newval;
@@ -81,7 +81,7 @@ grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) {
+grpc_error* grpc_set_socket_ip_pktinfo_if_possible(int fd) {
#ifdef GRPC_HAVE_IP_PKTINFO
int get_local_ip = 1;
if (0 != setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &get_local_ip,
@@ -92,7 +92,7 @@ grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) {
+grpc_error* grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) {
#ifdef GRPC_HAVE_IPV6_RECVPKTINFO
int get_local_ip = 1;
if (0 != setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &get_local_ip,
@@ -103,14 +103,14 @@ grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes) {
+grpc_error* grpc_set_socket_sndbuf(int fd, int buffer_size_bytes) {
return 0 == setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffer_size_bytes,
sizeof(buffer_size_bytes))
? GRPC_ERROR_NONE
: GRPC_OS_ERROR(errno, "setsockopt(SO_SNDBUF)");
}
-grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) {
+grpc_error* grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) {
return 0 == setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buffer_size_bytes,
sizeof(buffer_size_bytes))
? GRPC_ERROR_NONE
@@ -118,7 +118,7 @@ grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) {
}
/* set a socket to close on exec */
-grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec) {
+grpc_error* grpc_set_socket_cloexec(int fd, int close_on_exec) {
int oldflags = fcntl(fd, F_GETFD, 0);
if (oldflags < 0) {
return GRPC_OS_ERROR(errno, "fcntl");
@@ -138,7 +138,7 @@ grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec) {
}
/* set a socket to reuse old addresses */
-grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) {
+grpc_error* grpc_set_socket_reuse_addr(int fd, int reuse) {
int val = (reuse != 0);
int newval;
socklen_t intlen = sizeof(newval);
@@ -156,7 +156,7 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) {
}
/* set a socket to reuse old addresses */
-grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) {
+grpc_error* grpc_set_socket_reuse_port(int fd, int reuse) {
#ifndef SO_REUSEPORT
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"SO_REUSEPORT unavailable on compiling system");
@@ -179,7 +179,7 @@ grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) {
}
/* disable nagle */
-grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
+grpc_error* grpc_set_socket_low_latency(int fd, int low_latency) {
int val = (low_latency != 0);
int newval;
socklen_t intlen = sizeof(newval);
@@ -196,7 +196,7 @@ grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
}
/* set a socket using a grpc_socket_mutator */
-grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator) {
+grpc_error* grpc_set_socket_with_mutator(int fd, grpc_socket_mutator* mutator) {
GPR_ASSERT(mutator);
if (!grpc_socket_mutator_mutate_fd(mutator, fd)) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_socket_mutator failed.");
@@ -217,7 +217,7 @@ static void probe_ipv6_once(void) {
memset(&addr, 0, sizeof(addr));
addr.sin6_family = AF_INET6;
addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */
- if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)) == 0) {
+ if (bind(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0) {
g_ipv6_loopback_available = 1;
} else {
gpr_log(GPR_INFO,
@@ -249,35 +249,35 @@ static int set_socket_dualstack(int fd) {
}
}
-static grpc_error *error_for_fd(int fd, const grpc_resolved_address *addr) {
+static grpc_error* error_for_fd(int fd, const grpc_resolved_address* addr) {
if (fd >= 0) return GRPC_ERROR_NONE;
- char *addr_str;
+ char* addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
- grpc_error *err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"),
+ grpc_error* err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"),
GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(addr_str));
gpr_free(addr_str);
return err;
}
-grpc_error *grpc_create_dualstack_socket(
- const grpc_resolved_address *resolved_addr, int type, int protocol,
- grpc_dualstack_mode *dsmode, int *newfd) {
+grpc_error* grpc_create_dualstack_socket(
+ const grpc_resolved_address* resolved_addr, int type, int protocol,
+ grpc_dualstack_mode* dsmode, int* newfd) {
return grpc_create_dualstack_socket_using_factory(NULL, resolved_addr, type,
protocol, dsmode, newfd);
}
-static int create_socket(grpc_socket_factory *factory, int domain, int type,
+static int create_socket(grpc_socket_factory* factory, int domain, int type,
int protocol) {
return (factory != NULL)
? grpc_socket_factory_socket(factory, domain, type, protocol)
: socket(domain, type, protocol);
}
-grpc_error *grpc_create_dualstack_socket_using_factory(
- grpc_socket_factory *factory, const grpc_resolved_address *resolved_addr,
- int type, int protocol, grpc_dualstack_mode *dsmode, int *newfd) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+grpc_error* grpc_create_dualstack_socket_using_factory(
+ grpc_socket_factory* factory, const grpc_resolved_address* resolved_addr,
+ int type, int protocol, grpc_dualstack_mode* dsmode, int* newfd) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
int family = addr->sa_family;
if (family == AF_INET6) {
if (grpc_ipv6_loopback_available()) {
@@ -307,7 +307,7 @@ grpc_error *grpc_create_dualstack_socket_using_factory(
return error_for_fd(*newfd, resolved_addr);
}
-const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
GPR_ASSERT(size <= (socklen_t)-1);
return inet_ntop(af, src, dst, (socklen_t)size);
}
diff --git a/src/core/lib/iomgr/socket_utils_linux.cc b/src/core/lib/iomgr/socket_utils_linux.cc
index e7b094d216..12199c5c89 100644
--- a/src/core/lib/iomgr/socket_utils_linux.cc
+++ b/src/core/lib/iomgr/socket_utils_linux.cc
@@ -28,15 +28,15 @@
#include <sys/socket.h>
#include <sys/types.h>
-int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
int cloexec) {
int flags = 0;
GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
flags |= nonblock ? SOCK_NONBLOCK : 0;
flags |= cloexec ? SOCK_CLOEXEC : 0;
- return accept4(sockfd, (struct sockaddr *)resolved_addr->addr,
- (socklen_t *)&resolved_addr->len, flags);
+ return accept4(sockfd, (struct sockaddr*)resolved_addr->addr,
+ (socklen_t*)&resolved_addr->len, flags);
}
#endif
diff --git a/src/core/lib/iomgr/socket_utils_posix.cc b/src/core/lib/iomgr/socket_utils_posix.cc
index dfd1ffd1e3..c49cbb203b 100644
--- a/src/core/lib/iomgr/socket_utils_posix.cc
+++ b/src/core/lib/iomgr/socket_utils_posix.cc
@@ -29,13 +29,13 @@
#include <grpc/support/log.h>
#include "src/core/lib/iomgr/sockaddr.h"
-int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
int cloexec) {
int fd, flags;
GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t));
GPR_ASSERT(resolved_addr->len <= (socklen_t)-1);
- fd = accept(sockfd, (struct sockaddr *)resolved_addr->addr,
- (socklen_t *)&resolved_addr->len);
+ fd = accept(sockfd, (struct sockaddr*)resolved_addr->addr,
+ (socklen_t*)&resolved_addr->len);
if (fd >= 0) {
if (nonblock) {
flags = fcntl(fd, F_GETFL, 0);
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 73809b68d3..7a9c8139e7 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -34,23 +34,23 @@ extern "C" {
#endif
/* a wrapper for accept or accept4 */
-int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock,
+int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
int cloexec);
/* set a socket to non blocking mode */
-grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking);
+grpc_error* grpc_set_socket_nonblocking(int fd, int non_blocking);
/* set a socket to close on exec */
-grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec);
+grpc_error* grpc_set_socket_cloexec(int fd, int close_on_exec);
/* set a socket to reuse old addresses */
-grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse);
+grpc_error* grpc_set_socket_reuse_addr(int fd, int reuse);
/* disable nagle */
-grpc_error *grpc_set_socket_low_latency(int fd, int low_latency);
+grpc_error* grpc_set_socket_low_latency(int fd, int low_latency);
/* set SO_REUSEPORT */
-grpc_error *grpc_set_socket_reuse_port(int fd, int reuse);
+grpc_error* grpc_set_socket_reuse_port(int fd, int reuse);
/* Returns true if this system can create AF_INET6 sockets bound to ::1.
The value is probed once, and cached for the life of the process.
@@ -64,24 +64,24 @@ int grpc_ipv6_loopback_available(void);
/* Tries to set SO_NOSIGPIPE if available on this platform.
If SO_NO_SIGPIPE is not available, returns 1. */
-grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd);
+grpc_error* grpc_set_socket_no_sigpipe_if_possible(int fd);
/* Tries to set IP_PKTINFO if available on this platform.
If IP_PKTINFO is not available, returns 1. */
-grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd);
+grpc_error* grpc_set_socket_ip_pktinfo_if_possible(int fd);
/* Tries to set IPV6_RECVPKTINFO if available on this platform.
If IPV6_RECVPKTINFO is not available, returns 1. */
-grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd);
+grpc_error* grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd);
/* Tries to set the socket's send buffer to given size. */
-grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes);
+grpc_error* grpc_set_socket_sndbuf(int fd, int buffer_size_bytes);
/* Tries to set the socket's receive buffer to given size. */
-grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes);
+grpc_error* grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes);
/* Tries to set the socket using a grpc_socket_mutator */
-grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator);
+grpc_error* grpc_set_socket_with_mutator(int fd, grpc_socket_mutator* mutator);
/* An enum to keep track of IPv4/IPv6 socket modes.
@@ -122,16 +122,16 @@ extern int grpc_forbid_dualstack_sockets_for_testing;
IPv4, so that bind() or connect() see the correct family.
Also, it's important to distinguish between DUALSTACK and IPV6 when
listening on the [::] wildcard address. */
-grpc_error *grpc_create_dualstack_socket(const grpc_resolved_address *addr,
+grpc_error* grpc_create_dualstack_socket(const grpc_resolved_address* addr,
int type, int protocol,
- grpc_dualstack_mode *dsmode,
- int *newfd);
+ grpc_dualstack_mode* dsmode,
+ int* newfd);
/* Same as grpc_create_dualstack_socket(), but use the given socket factory (if
non-null) to create the socket, rather than calling socket() directly. */
-grpc_error *grpc_create_dualstack_socket_using_factory(
- grpc_socket_factory *factory, const grpc_resolved_address *addr, int type,
- int protocol, grpc_dualstack_mode *dsmode, int *newfd);
+grpc_error* grpc_create_dualstack_socket_using_factory(
+ grpc_socket_factory* factory, const grpc_resolved_address* addr, int type,
+ int protocol, grpc_dualstack_mode* dsmode, int* newfd);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/socket_utils_uv.cc b/src/core/lib/iomgr/socket_utils_uv.cc
index 0f7de4dfad..75316d8c24 100644
--- a/src/core/lib/iomgr/socket_utils_uv.cc
+++ b/src/core/lib/iomgr/socket_utils_uv.cc
@@ -26,7 +26,7 @@
#include <grpc/support/log.h>
-const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
uv_inet_ntop(af, src, dst, size);
return dst;
}
diff --git a/src/core/lib/iomgr/socket_utils_windows.cc b/src/core/lib/iomgr/socket_utils_windows.cc
index 6e85e4b61f..0482a1783d 100644
--- a/src/core/lib/iomgr/socket_utils_windows.cc
+++ b/src/core/lib/iomgr/socket_utils_windows.cc
@@ -25,9 +25,9 @@
#include <grpc/support/log.h>
-const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) {
+const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) {
/* Windows InetNtopA wants a mutable ip pointer */
- return InetNtopA(af, (void *)src, dst, size);
+ return InetNtopA(af, (void*)src, dst, size);
}
#endif /* GRPC_WINDOWS_SOCKETUTILS */
diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc
index 8c7f7cf683..aee80f4b4c 100644
--- a/src/core/lib/iomgr/socket_windows.cc
+++ b/src/core/lib/iomgr/socket_windows.cc
@@ -36,9 +36,9 @@
#include "src/core/lib/iomgr/pollset_windows.h"
#include "src/core/lib/iomgr/socket_windows.h"
-grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) {
- char *final_name;
- grpc_winsocket *r = (grpc_winsocket *)gpr_malloc(sizeof(grpc_winsocket));
+grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) {
+ char* final_name;
+ grpc_winsocket* r = (grpc_winsocket*)gpr_malloc(sizeof(grpc_winsocket));
memset(r, 0, sizeof(grpc_winsocket));
r->socket = socket;
gpr_mu_init(&r->state_mu);
@@ -53,7 +53,7 @@ grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) {
operations to abort them. We need to do that this way because of the
various callsites of that function, which happens to be in various
mutex hold states, and that'd be unsafe to call them directly. */
-void grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
+void grpc_winsocket_shutdown(grpc_winsocket* winsocket) {
/* Grab the function pointer for DisconnectEx for that specific socket.
It may change depending on the interface. */
int status;
@@ -76,7 +76,7 @@ void grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
if (status == 0) {
DisconnectEx(winsocket->socket, NULL, 0, 0);
} else {
- char *utf8_message = gpr_format_message(WSAGetLastError());
+ char* utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_INFO, "Unable to retrieve DisconnectEx pointer : %s",
utf8_message);
gpr_free(utf8_message);
@@ -84,19 +84,19 @@ void grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
closesocket(winsocket->socket);
}
-static void destroy(grpc_winsocket *winsocket) {
+static void destroy(grpc_winsocket* winsocket) {
grpc_iomgr_unregister_object(&winsocket->iomgr_object);
gpr_mu_destroy(&winsocket->state_mu);
gpr_free(winsocket);
}
-static bool check_destroyable(grpc_winsocket *winsocket) {
+static bool check_destroyable(grpc_winsocket* winsocket) {
return winsocket->destroy_called == true &&
winsocket->write_info.closure == NULL &&
winsocket->read_info.closure == NULL;
}
-void grpc_winsocket_destroy(grpc_winsocket *winsocket) {
+void grpc_winsocket_destroy(grpc_winsocket* winsocket) {
gpr_mu_lock(&winsocket->state_mu);
GPR_ASSERT(!winsocket->destroy_called);
winsocket->destroy_called = true;
@@ -109,9 +109,9 @@ void grpc_winsocket_destroy(grpc_winsocket *winsocket) {
-) The IOCP already completed in the background, and we need to call
the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */
-static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
- grpc_winsocket *socket, grpc_closure *closure,
- grpc_winsocket_callback_info *info) {
+static void socket_notify_on_iocp(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* socket, grpc_closure* closure,
+ grpc_winsocket_callback_info* info) {
GPR_ASSERT(info->closure == NULL);
gpr_mu_lock(&socket->state_mu);
if (info->has_pending_iocp) {
@@ -123,19 +123,19 @@ static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&socket->state_mu);
}
-void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx,
- grpc_winsocket *socket,
- grpc_closure *closure) {
+void grpc_socket_notify_on_write(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* socket,
+ grpc_closure* closure) {
socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info);
}
-void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
- grpc_closure *closure) {
+void grpc_socket_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+ grpc_closure* closure) {
socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info);
}
-void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
- grpc_winsocket_callback_info *info) {
+void grpc_socket_become_ready(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+ grpc_winsocket_callback_info* info) {
GPR_ASSERT(!info->has_pending_iocp);
gpr_mu_lock(&socket->state_mu);
if (info->closure) {
diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h
index 84fa071e89..6f6c7a7024 100644
--- a/src/core/lib/iomgr/socket_windows.h
+++ b/src/core/lib/iomgr/socket_windows.h
@@ -44,7 +44,7 @@ typedef struct grpc_winsocket_callback_info {
OVERLAPPED overlapped;
/* The callback information for the pending operation. May be empty if the
caller hasn't registered a callback yet. */
- grpc_closure *closure;
+ grpc_closure* closure;
/* A boolean to describe if the IO Completion Port got a notification for
that operation. This will happen if the operation completed before the
called had time to register a callback. We could avoid that behavior
@@ -90,26 +90,26 @@ typedef struct grpc_winsocket {
/* Create a wrapped windows handle. This takes ownership of it, meaning that
it will be responsible for closing it. */
-grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name);
+grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name);
/* Initiate an asynchronous shutdown of the socket. Will call off any pending
operation to cancel them. */
-void grpc_winsocket_shutdown(grpc_winsocket *socket);
+void grpc_winsocket_shutdown(grpc_winsocket* socket);
/* Destroy a socket. Should only be called if there's no pending operation. */
-void grpc_winsocket_destroy(grpc_winsocket *socket);
+void grpc_winsocket_destroy(grpc_winsocket* socket);
-void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx,
- grpc_winsocket *winsocket,
- grpc_closure *closure);
+void grpc_socket_notify_on_write(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* winsocket,
+ grpc_closure* closure);
-void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx,
- grpc_winsocket *winsocket,
- grpc_closure *closure);
+void grpc_socket_notify_on_read(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* winsocket,
+ grpc_closure* closure);
-void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx,
- grpc_winsocket *winsocket,
- grpc_winsocket_callback_info *ci);
+void grpc_socket_become_ready(grpc_exec_ctx* exec_ctx,
+ grpc_winsocket* winsocket,
+ grpc_winsocket_callback_info* ci);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index b2f365f2af..c18d8a9316 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -34,11 +34,11 @@ extern "C" {
NULL on failure).
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
-void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect,
- grpc_endpoint **endpoint,
- grpc_pollset_set *interested_parties,
- const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* on_connect,
+ grpc_endpoint** endpoint,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline);
#ifdef __cplusplus
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 5611dd9062..e72d70c027 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -47,21 +47,21 @@ extern grpc_tracer_flag grpc_tcp_trace;
typedef struct {
gpr_mu mu;
- grpc_fd *fd;
+ grpc_fd* fd;
grpc_timer alarm;
grpc_closure on_alarm;
int refs;
grpc_closure write_closure;
- grpc_pollset_set *interested_parties;
- char *addr_str;
- grpc_endpoint **ep;
- grpc_closure *closure;
- grpc_channel_args *channel_args;
+ grpc_pollset_set* interested_parties;
+ char* addr_str;
+ grpc_endpoint** ep;
+ grpc_closure* closure;
+ grpc_channel_args* channel_args;
} async_connect;
-static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
- const grpc_channel_args *channel_args) {
- grpc_error *err = GRPC_ERROR_NONE;
+static grpc_error* prepare_socket(const grpc_resolved_address* addr, int fd,
+ const grpc_channel_args* channel_args) {
+ grpc_error* err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@@ -79,8 +79,8 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
- grpc_socket_mutator *mutator =
- (grpc_socket_mutator *)channel_args->args[i].value.pointer.p;
+ grpc_socket_mutator* mutator =
+ (grpc_socket_mutator*)channel_args->args[i].value.pointer.p;
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
@@ -96,18 +96,19 @@ done:
return err;
}
-static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
+static void tc_on_alarm(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
int done;
- async_connect *ac = (async_connect *)acp;
+ async_connect* ac = (async_connect*)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str);
}
gpr_mu_lock(&ac->mu);
if (ac->fd != NULL) {
- grpc_fd_shutdown(exec_ctx, ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "connect() timed out"));
+ grpc_fd_shutdown(
+ exec_ctx, ac->fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
@@ -119,26 +120,26 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
}
}
-grpc_endpoint *grpc_tcp_client_create_from_fd(
- grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
- const char *addr_str) {
+grpc_endpoint* grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx* exec_ctx, grpc_fd* fd, const grpc_channel_args* channel_args,
+ const char* addr_str) {
return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str);
}
-static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = (async_connect *)acp;
+static void on_writable(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+ async_connect* ac = (async_connect*)acp;
int so_error = 0;
socklen_t so_error_size;
int err;
int done;
- grpc_endpoint **ep = ac->ep;
- grpc_closure *closure = ac->closure;
- grpc_fd *fd;
+ grpc_endpoint** ep = ac->ep;
+ grpc_closure* closure = ac->closure;
+ grpc_fd* fd;
GRPC_ERROR_REF(error);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str, str);
}
@@ -216,11 +217,11 @@ finish:
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (error != GRPC_ERROR_NONE) {
- char *error_descr;
+ char* error_descr;
grpc_slice str;
bool ret = grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION, &str);
GPR_ASSERT(ret);
- char *desc = grpc_slice_to_c_string(str);
+ char* desc = grpc_slice_to_c_string(str);
gpr_asprintf(&error_descr, "Failed to connect to remote host: %s", desc);
error = grpc_error_set_str(error, GRPC_ERROR_STR_DESCRIPTION,
grpc_slice_from_copied_string(error_descr));
@@ -238,22 +239,22 @@ finish:
GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
}
-static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties,
- const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+static void tcp_client_connect_impl(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) {
int fd;
grpc_dualstack_mode dsmode;
int err;
- async_connect *ac;
+ async_connect* ac;
grpc_resolved_address addr6_v4mapped;
grpc_resolved_address addr4_copy;
- grpc_fd *fdobj;
- char *name;
- char *addr_str;
- grpc_error *error;
+ grpc_fd* fdobj;
+ char* name;
+ char* addr_str;
+ grpc_error* error;
*ep = NULL;
@@ -279,8 +280,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
do {
GPR_ASSERT(addr->len < ~(socklen_t)0);
- err =
- connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len);
+ err = connect(fd, (const struct sockaddr*)addr->addr, (socklen_t)addr->len);
} while (err < 0 && errno == EINTR);
addr_str = grpc_sockaddr_to_uri(addr);
@@ -304,7 +304,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
- ac = (async_connect *)gpr_malloc(sizeof(async_connect));
+ ac = (async_connect*)gpr_malloc(sizeof(async_connect));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;
@@ -336,17 +336,17 @@ done:
// overridden by api_fuzzer.c
extern "C" {
void (*grpc_tcp_client_connect_impl)(
- grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+ grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) = tcp_client_connect_impl;
}
-void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_endpoint **ep,
- grpc_pollset_set *interested_parties,
- const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
index 8740511804..13d917891e 100644
--- a/src/core/lib/iomgr/tcp_client_posix.h
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -27,9 +27,9 @@
extern "C" {
#endif
-grpc_endpoint *grpc_tcp_client_create_from_fd(
- grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
- const char *addr_str);
+grpc_endpoint* grpc_tcp_client_create_from_fd(
+ grpc_exec_ctx* exec_ctx, grpc_fd* fd, const grpc_channel_args* channel_args,
+ const char* addr_str);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc
index f3e9366299..15345c8091 100644
--- a/src/core/lib/iomgr/tcp_client_uv.cc
+++ b/src/core/lib/iomgr/tcp_client_uv.cc
@@ -38,29 +38,29 @@ typedef struct grpc_uv_tcp_connect {
uv_connect_t connect_req;
grpc_timer alarm;
grpc_closure on_alarm;
- uv_tcp_t *tcp_handle;
- grpc_closure *closure;
- grpc_endpoint **endpoint;
+ uv_tcp_t* tcp_handle;
+ grpc_closure* closure;
+ grpc_endpoint** endpoint;
int refs;
- char *addr_name;
- grpc_resource_quota *resource_quota;
+ char* addr_name;
+ grpc_resource_quota* resource_quota;
} grpc_uv_tcp_connect;
-static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
- grpc_uv_tcp_connect *connect) {
+static void uv_tcp_connect_cleanup(grpc_exec_ctx* exec_ctx,
+ grpc_uv_tcp_connect* connect) {
grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota);
gpr_free(connect->addr_name);
gpr_free(connect);
}
-static void tcp_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+static void tcp_close_callback(uv_handle_t* handle) { gpr_free(handle); }
-static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
- grpc_error *error) {
+static void uv_tc_on_alarm(grpc_exec_ctx* exec_ctx, void* acp,
+ grpc_error* error) {
int done;
- grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)acp;
+ grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str);
}
@@ -68,7 +68,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
/* error == NONE implies that the timer ran out, and wasn't cancelled. If
it was cancelled, then the handler that cancelled it also should close
the handle, if applicable */
- uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
+ uv_close((uv_handle_t*)connect->tcp_handle, tcp_close_callback);
}
done = (--connect->refs == 0);
if (done) {
@@ -76,12 +76,12 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
}
}
-static void uv_tc_on_connect(uv_connect_t *req, int status) {
- grpc_uv_tcp_connect *connect = (grpc_uv_tcp_connect *)req->data;
+static void uv_tc_on_connect(uv_connect_t* req, int status) {
+ grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)req->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
int done;
- grpc_closure *closure = connect->closure;
+ grpc_closure* closure = connect->closure;
grpc_timer_cancel(&exec_ctx, &connect->alarm);
if (status == 0) {
*connect->endpoint = grpc_tcp_create(
@@ -102,7 +102,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
error = grpc_error_set_str(
error, GRPC_ERROR_STR_OS_ERROR,
grpc_slice_from_static_string(uv_strerror(status)));
- uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
+ uv_close((uv_handle_t*)connect->tcp_handle, tcp_close_callback);
}
}
done = (--connect->refs == 0);
@@ -114,14 +114,14 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
- grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties,
- const grpc_channel_args *channel_args,
- const grpc_resolved_address *resolved_addr,
+static void tcp_client_connect_impl(grpc_exec_ctx* exec_ctx,
+ grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* resolved_addr,
grpc_millis deadline) {
- grpc_uv_tcp_connect *connect;
- grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ grpc_uv_tcp_connect* connect;
+ grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
(void)channel_args;
(void)interested_parties;
@@ -132,15 +132,15 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
}
}
- connect = (grpc_uv_tcp_connect *)gpr_zalloc(sizeof(grpc_uv_tcp_connect));
+ connect = (grpc_uv_tcp_connect*)gpr_zalloc(sizeof(grpc_uv_tcp_connect));
connect->closure = closure;
connect->endpoint = ep;
- connect->tcp_handle = (uv_tcp_t *)gpr_malloc(sizeof(uv_tcp_t));
+ connect->tcp_handle = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
@@ -154,8 +154,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// TODO(murgatroid99): figure out what the return value here means
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
- (const struct sockaddr *)resolved_addr->addr,
- uv_tc_on_connect);
+ (const struct sockaddr*)resolved_addr->addr, uv_tc_on_connect);
GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);
@@ -164,17 +163,17 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// overridden by api_fuzzer.c
extern "C" {
void (*grpc_tcp_client_connect_impl)(
- grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+ grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) = tcp_client_connect_impl;
}
-void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_endpoint **ep,
- grpc_pollset_set *interested_parties,
- const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc
index 9adf7ee4e9..103e6b78de 100644
--- a/src/core/lib/iomgr/tcp_client_windows.cc
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -40,21 +40,21 @@
#include "src/core/lib/iomgr/timer.h"
typedef struct {
- grpc_closure *on_done;
+ grpc_closure* on_done;
gpr_mu mu;
- grpc_winsocket *socket;
+ grpc_winsocket* socket;
grpc_timer alarm;
grpc_closure on_alarm;
- char *addr_name;
+ char* addr_name;
int refs;
grpc_closure on_connect;
- grpc_endpoint **endpoint;
- grpc_channel_args *channel_args;
+ grpc_endpoint** endpoint;
+ grpc_channel_args* channel_args;
} async_connect;
-static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
- async_connect *ac,
- grpc_winsocket *socket) {
+static void async_connect_unlock_and_cleanup(grpc_exec_ctx* exec_ctx,
+ async_connect* ac,
+ grpc_winsocket* socket) {
int done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (done) {
@@ -66,10 +66,10 @@ static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx,
if (socket != NULL) grpc_winsocket_destroy(socket);
}
-static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = (async_connect *)acp;
+static void on_alarm(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+ async_connect* ac = (async_connect*)acp;
gpr_mu_lock(&ac->mu);
- grpc_winsocket *socket = ac->socket;
+ grpc_winsocket* socket = ac->socket;
ac->socket = NULL;
if (socket != NULL) {
grpc_winsocket_shutdown(socket);
@@ -77,16 +77,16 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
}
-static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
- async_connect *ac = (async_connect *)acp;
- grpc_endpoint **ep = ac->endpoint;
+static void on_connect(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+ async_connect* ac = (async_connect*)acp;
+ grpc_endpoint** ep = ac->endpoint;
GPR_ASSERT(*ep == NULL);
- grpc_closure *on_done = ac->on_done;
+ grpc_closure* on_done = ac->on_done;
GRPC_ERROR_REF(error);
gpr_mu_lock(&ac->mu);
- grpc_winsocket *socket = ac->socket;
+ grpc_winsocket* socket = ac->socket;
ac->socket = NULL;
gpr_mu_unlock(&ac->mu);
@@ -123,21 +123,21 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
static void tcp_client_connect_impl(
- grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint,
- grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr, grpc_millis deadline) {
+ grpc_exec_ctx* exec_ctx, grpc_closure* on_done, grpc_endpoint** endpoint,
+ grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr, grpc_millis deadline) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
int status;
grpc_resolved_address addr6_v4mapped;
grpc_resolved_address local_address;
- async_connect *ac;
- grpc_winsocket *socket = NULL;
+ async_connect* ac;
+ grpc_winsocket* socket = NULL;
LPFN_CONNECTEX ConnectEx;
GUID guid = WSAID_CONNECTEX;
DWORD ioctl_num_bytes;
- grpc_winsocket_callback_info *info;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_winsocket_callback_info* info;
+ grpc_error* error = GRPC_ERROR_NONE;
*endpoint = NULL;
@@ -172,8 +172,8 @@ static void tcp_client_connect_impl(
grpc_sockaddr_make_wildcard6(0, &local_address);
- status = bind(sock, (struct sockaddr *)&local_address.addr,
- (int)local_address.len);
+ status =
+ bind(sock, (struct sockaddr*)&local_address.addr, (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -181,8 +181,8 @@ static void tcp_client_connect_impl(
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
- success = ConnectEx(sock, (struct sockaddr *)&addr->addr, (int)addr->len,
- NULL, 0, NULL, &info->overlapped);
+ success = ConnectEx(sock, (struct sockaddr*)&addr->addr, (int)addr->len, NULL,
+ 0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */
@@ -194,7 +194,7 @@ static void tcp_client_connect_impl(
}
}
- ac = (async_connect *)gpr_malloc(sizeof(async_connect));
+ ac = (async_connect*)gpr_malloc(sizeof(async_connect));
ac->on_done = on_done;
ac->socket = socket;
gpr_mu_init(&ac->mu);
@@ -211,8 +211,8 @@ static void tcp_client_connect_impl(
failure:
GPR_ASSERT(error != GRPC_ERROR_NONE);
- char *target_uri = grpc_sockaddr_to_uri(addr);
- grpc_error *final_error = grpc_error_set_str(
+ char* target_uri = grpc_sockaddr_to_uri(addr);
+ grpc_error* final_error = grpc_error_set_str(
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Failed to connect",
&error, 1),
GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(target_uri));
@@ -228,17 +228,17 @@ failure:
// overridden by api_fuzzer.c
extern "C" {
void (*grpc_tcp_client_connect_impl)(
- grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
- grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+ grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) = tcp_client_connect_impl;
}
-void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
- grpc_endpoint **ep,
- grpc_pollset_set *interested_parties,
- const grpc_channel_args *channel_args,
- const grpc_resolved_address *addr,
+void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
+ grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
grpc_millis deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index b7c1803ded..fbbb1762b7 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -65,7 +65,7 @@ grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp");
typedef struct {
grpc_endpoint base;
- grpc_fd *em_fd;
+ grpc_fd* em_fd;
int fd;
bool finished_edge;
double target_length;
@@ -79,48 +79,48 @@ typedef struct {
/* garbage after the last read */
grpc_slice_buffer last_read_buffer;
- grpc_slice_buffer *incoming_buffer;
- grpc_slice_buffer *outgoing_buffer;
+ grpc_slice_buffer* incoming_buffer;
+ grpc_slice_buffer* outgoing_buffer;
/** slice within outgoing_buffer to write next */
size_t outgoing_slice_idx;
/** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */
size_t outgoing_byte_idx;
- grpc_closure *read_cb;
- grpc_closure *write_cb;
- grpc_closure *release_fd_cb;
- int *release_fd;
+ grpc_closure* read_cb;
+ grpc_closure* write_cb;
+ grpc_closure* release_fd_cb;
+ int* release_fd;
grpc_closure read_done_closure;
grpc_closure write_done_closure;
- char *peer_string;
+ char* peer_string;
- grpc_resource_user *resource_user;
+ grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
typedef struct backup_poller {
- gpr_mu *pollset_mu;
+ gpr_mu* pollset_mu;
grpc_closure run_poller;
} backup_poller;
-#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
+#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
static gpr_atm g_uncovered_notifications_pending;
static gpr_atm g_backup_poller; /* backup_poller* */
-static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
- grpc_error *error);
-static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
- grpc_error *error);
-static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
- void *arg /* grpc_tcp */,
- grpc_error *error);
-
-static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
- grpc_error *error_ignored) {
- backup_poller *p = (backup_poller *)bp;
+static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
+static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx* exec_ctx,
+ void* arg /* grpc_tcp */,
+ grpc_error* error);
+
+static void done_poller(grpc_exec_ctx* exec_ctx, void* bp,
+ grpc_error* error_ignored) {
+ backup_poller* p = (backup_poller*)bp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
}
@@ -128,9 +128,9 @@ static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
gpr_free(p);
}
-static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
- grpc_error *error_ignored) {
- backup_poller *p = (backup_poller *)bp;
+static void run_poller(grpc_exec_ctx* exec_ctx, void* bp,
+ grpc_error* error_ignored) {
+ backup_poller* p = (backup_poller*)bp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
@@ -165,8 +165,8 @@ static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
}
}
-static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
- backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
+static void drop_uncovered(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+ backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
@@ -176,8 +176,8 @@ static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(old_count != 1);
}
-static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
- backup_poller *p;
+static void cover_self(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+ backup_poller* p;
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
@@ -186,7 +186,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
}
if (old_count == 0) {
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
- p = (backup_poller *)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
+ p = (backup_poller*)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
}
@@ -198,7 +198,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
GRPC_ERROR_NONE);
} else {
- while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
+ while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
// spin waiting for backup poller
}
}
@@ -211,7 +211,7 @@ static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
}
}
-static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void notify_on_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
}
@@ -220,7 +220,7 @@ static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
}
-static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void notify_on_write(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
}
@@ -231,20 +231,20 @@ static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
}
-static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
+static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
- drop_uncovered(exec_ctx, (grpc_tcp *)arg);
+ drop_uncovered(exec_ctx, (grpc_tcp*)arg);
tcp_handle_write(exec_ctx, arg, error);
}
-static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
+static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
-static void finish_estimate(grpc_tcp *tcp) {
+static void finish_estimate(grpc_tcp* tcp) {
/* If we read >80% of the target buffer in one read loop, increase the size
of the target buffer to either the amount read, or twice its previous
value */
@@ -258,8 +258,8 @@ static void finish_estimate(grpc_tcp *tcp) {
tcp->bytes_read_this_round = 0;
}
-static size_t get_target_read_size(grpc_tcp *tcp) {
- grpc_resource_quota *rq = grpc_resource_user_quota(tcp->resource_user);
+static size_t get_target_read_size(grpc_tcp* tcp) {
+ grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user);
double pressure = grpc_resource_quota_get_memory_pressure(rq);
double target =
tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
@@ -276,26 +276,26 @@ static size_t get_target_read_size(grpc_tcp *tcp) {
return sz;
}
-static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) {
+static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
return grpc_error_set_str(
grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_copied_string(tcp->peer_string));
}
-static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
- grpc_error *error);
-static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
- grpc_error *error);
+static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
+static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error);
-static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_error *why) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void tcp_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_fd_shutdown(exec_ctx, tcp->em_fd, why);
grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
-static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
false /* already_closed */, "tcp_unref_orphan");
grpc_slice_buffer_destroy_internal(exec_ctx, &tcp->last_read_buffer);
@@ -308,8 +308,8 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
#define TCP_UNREF(cl, tcp, reason) \
tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
- const char *reason, const char *file, int line) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -321,7 +321,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
}
}
-static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
@@ -334,34 +334,34 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
#else
#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
}
-static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
#endif
-static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+static void tcp_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
- grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer);
TCP_UNREF(exec_ctx, tcp, "destroy");
}
-static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
- grpc_error *error) {
- grpc_closure *cb = tcp->read_cb;
+static void call_read_cb(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ grpc_error* error) {
+ grpc_closure* cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
for (i = 0; i < tcp->incoming_buffer->count; i++) {
- char *dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
+ char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump);
@@ -374,7 +374,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
}
#define MAX_READ_IOVEC 4
-static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_do_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@@ -447,9 +447,9 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_TIMER_END("tcp_continue_read", 0);
}
-static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
- grpc_error *error) {
- grpc_tcp *tcp = (grpc_tcp *)tcpp;
+static void tcp_read_allocation_done(grpc_exec_ctx* exec_ctx, void* tcpp,
+ grpc_error* error) {
+ grpc_tcp* tcp = (grpc_tcp*)tcpp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
@@ -465,7 +465,7 @@ static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
}
}
-static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_continue_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
@@ -482,9 +482,9 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
}
}
-static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
- grpc_error *error) {
- grpc_tcp *tcp = (grpc_tcp *)arg;
+static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error) {
+ grpc_tcp* tcp = (grpc_tcp*)arg;
GPR_ASSERT(!tcp->finished_edge);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
@@ -501,9 +501,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
}
}
-static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *incoming_buffer, grpc_closure *cb) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void tcp_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* incoming_buffer, grpc_closure* cb) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
tcp->incoming_buffer = incoming_buffer;
@@ -520,8 +520,8 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* returns true if done, false if pending; if returning true, *error is set */
#define MAX_WRITE_IOVEC 1000
-static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
- grpc_error **error) {
+static bool tcp_flush(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ grpc_error** error) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
msg_iovlen_type iov_size;
@@ -610,10 +610,10 @@ static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
};
}
-static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
- grpc_error *error) {
- grpc_tcp *tcp = (grpc_tcp *)arg;
- grpc_closure *cb;
+static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
+ grpc_error* error) {
+ grpc_tcp* tcp = (grpc_tcp*)arg;
+ grpc_closure* cb;
if (error != GRPC_ERROR_NONE) {
cb = tcp->write_cb;
@@ -632,7 +632,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
cb = tcp->write_cb;
tcp->write_cb = NULL;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
@@ -641,16 +641,16 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
}
}
-static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *buf, grpc_closure *cb) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_error *error = GRPC_ERROR_NONE;
+static void tcp_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* buf, grpc_closure* cb) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_error* error = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t i;
for (i = 0; i < buf->count; i++) {
- char *data =
+ char* data =
grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
@@ -683,7 +683,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str);
}
GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
@@ -692,37 +692,37 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
GPR_TIMER_END("tcp_write", 0);
}
-static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset *pollset) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void tcp_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd);
}
-static void tcp_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset_set *pollset_set) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void tcp_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd);
}
-static void tcp_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *ep,
- grpc_pollset_set *pollset_set) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void tcp_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_pollset_set_del_fd(exec_ctx, pollset_set, tcp->em_fd);
}
-static char *tcp_get_peer(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static char* tcp_get_peer(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return gpr_strdup(tcp->peer_string);
}
-static int tcp_get_fd(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static int tcp_get_fd(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return tcp->fd;
}
-static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return tcp->resource_user;
}
@@ -739,13 +739,13 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
#define MAX_CHUNK_SIZE 32 * 1024 * 1024
-grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
- const grpc_channel_args *channel_args,
- const char *peer_string) {
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_fd* em_fd,
+ const grpc_channel_args* channel_args,
+ const char* peer_string) {
int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
int tcp_max_read_chunk_size = 4 * 1024 * 1024;
int tcp_min_read_chunk_size = 256;
- grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+ grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 ==
@@ -770,7 +770,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
}
}
@@ -781,7 +781,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
tcp_max_read_chunk_size);
- grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
+ grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
tcp->fd = grpc_fd_wrapped_fd(em_fd);
@@ -810,16 +810,16 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
return &tcp->base;
}
-int grpc_tcp_fd(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+int grpc_tcp_fd(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
GPR_ASSERT(ep->vtable == &vtable);
return grpc_fd_wrapped_fd(tcp->em_fd);
}
-void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- int *fd, grpc_closure *done) {
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ int* fd, grpc_closure* done) {
grpc_network_status_unregister_endpoint(ep);
- grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_tcp* tcp = (grpc_tcp*)ep;
GPR_ASSERT(ep->vtable == &vtable);
tcp->release_fd = fd;
tcp->release_fd_cb = done;
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index 47e78fa67e..ff1060b0ff 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -41,21 +41,21 @@ extern grpc_tracer_flag grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
-grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
- const grpc_channel_args *args,
- const char *peer_string);
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
+ const grpc_channel_args* args,
+ const char* peer_string);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
Requires: ep must be a tcp endpoint.
*/
-int grpc_tcp_fd(grpc_endpoint *ep);
+int grpc_tcp_fd(grpc_endpoint* ep);
/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
* will be called when the endpoint is destroyed.
* Requires: ep must be a tcp endpoint and fd must not be NULL. */
-void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- int *fd, grpc_closure *done);
+void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ int* fd, grpc_closure* done);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index 8f9ce3819e..ef983199b8 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -35,7 +35,7 @@ typedef struct grpc_tcp_server grpc_tcp_server;
typedef struct grpc_tcp_server_acceptor {
/* grpc_tcp_server_cb functions share a ref on from_server that is valid
until the function returns. */
- grpc_tcp_server *from_server;
+ grpc_tcp_server* from_server;
/* Indices that may be passed to grpc_tcp_server_port_fd(). */
unsigned port_index;
unsigned fd_index;
@@ -43,23 +43,23 @@ typedef struct grpc_tcp_server_acceptor {
/* Called for newly connected TCP connections.
Takes ownership of acceptor. */
-typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_endpoint *ep,
- grpc_pollset *accepting_pollset,
- grpc_tcp_server_acceptor *acceptor);
+typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_endpoint* ep,
+ grpc_pollset* accepting_pollset,
+ grpc_tcp_server_acceptor* acceptor);
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
-grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
- grpc_closure *shutdown_complete,
- const grpc_channel_args *args,
- grpc_tcp_server **server);
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server);
/* Start listening to bound ports */
-void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
- grpc_pollset **pollsets, size_t pollset_count,
- grpc_tcp_server_cb on_accept_cb, void *cb_arg);
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* server,
+ grpc_pollset** pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void* cb_arg);
/* Add a port to the server, returning the newly allocated port on success, or
-1 on failure.
@@ -70,37 +70,37 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
- int *out_port);
+grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* out_port);
/* Number of fds at the given port_index, or 0 if port_index is out of
bounds. */
-unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, unsigned port_index);
+unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s, unsigned port_index);
/* Returns the file descriptor of the Mth (fd_index) listening socket of the Nth
(port_index) call to add_port() on this server, or -1 if the indices are out
of bounds. The file descriptor remains owned by the server, and will be
cleaned up when the ref count reaches zero. */
-int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
+int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
unsigned fd_index);
/* Ref s and return s. */
-grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s);
+grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s);
/* shutdown_starting is called when ref count has reached zero and the server is
about to be destroyed. The server will be deleted after it returns. Calling
grpc_tcp_server_ref() from it has no effect. */
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
- grpc_closure *shutdown_starting);
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting);
/* If the refcount drops to zero, enqueue calls on exec_ctx to
shutdown_listeners and delete s. */
-void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s);
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s);
/* Shutdown the fds of listeners. */
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
- grpc_tcp_server *s);
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index 06612d639c..6fcbac03a2 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -68,13 +68,13 @@ static void init(void) {
#endif
}
-grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
- grpc_closure *shutdown_complete,
- const grpc_channel_args *args,
- grpc_tcp_server **server) {
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
gpr_once_init(&check_init, init);
- grpc_tcp_server *s = (grpc_tcp_server *)gpr_zalloc(sizeof(grpc_tcp_server));
+ grpc_tcp_server* s = (grpc_tcp_server*)gpr_zalloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
@@ -116,7 +116,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->shutdown);
gpr_mu_unlock(&s->mu);
@@ -127,7 +127,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_mu_destroy(&s->mu);
while (s->head) {
- grpc_tcp_listener *sp = s->head;
+ grpc_tcp_listener* sp = s->head;
s->head = sp->next;
gpr_free(sp);
}
@@ -136,9 +136,9 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(s);
}
-static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
- grpc_error *error) {
- grpc_tcp_server *s = (grpc_tcp_server *)server;
+static void destroyed_port(grpc_exec_ctx* exec_ctx, void* server,
+ grpc_error* error) {
+ grpc_tcp_server* s = (grpc_tcp_server*)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -153,14 +153,14 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+static void deactivated_all_ports(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->shutdown);
if (s->head) {
- grpc_tcp_listener *sp;
+ grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
@@ -175,7 +175,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
}
-static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@@ -183,10 +183,11 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* shutdown all fd's */
if (s->active_ports) {
- grpc_tcp_listener *sp;
+ grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
- grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "Server destroyed"));
+ grpc_fd_shutdown(
+ exec_ctx, sp->emfd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
}
gpr_mu_unlock(&s->mu);
} else {
@@ -196,9 +197,9 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
/* event manager callback when reads are ready */
-static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
- grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
- grpc_pollset *read_notifier_pollset;
+static void on_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* err) {
+ grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
+ grpc_pollset* read_notifier_pollset;
if (err != GRPC_ERROR_NONE) {
goto error;
}
@@ -211,8 +212,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
/* loop until accept4 returns EAGAIN, and then re-arm notification */
for (;;) {
grpc_resolved_address addr;
- char *addr_str;
- char *name;
+ char* addr_str;
+ char* name;
addr.len = sizeof(struct sockaddr_storage);
/* Note: If we ever decide to return this address to the user, remember to
strip off the ::ffff:0.0.0.0/96 prefix first. */
@@ -246,13 +247,13 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
}
- grpc_fd *fdobj = grpc_fd_create(fd, name);
+ grpc_fd* fdobj = grpc_fd_create(fd, name);
grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
// Create acceptor.
- grpc_tcp_server_acceptor *acceptor =
- (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
+ grpc_tcp_server_acceptor* acceptor =
+ (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
@@ -279,18 +280,18 @@ error:
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
-static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
+static grpc_error* add_wildcard_addrs_to_server(grpc_tcp_server* s,
unsigned port_index,
int requested_port,
- int *out_port) {
+ int* out_port) {
grpc_resolved_address wild4;
grpc_resolved_address wild6;
unsigned fd_index = 0;
grpc_dualstack_mode dsmode;
- grpc_tcp_listener *sp = NULL;
- grpc_tcp_listener *sp2 = NULL;
- grpc_error *v6_err = GRPC_ERROR_NONE;
- grpc_error *v4_err = GRPC_ERROR_NONE;
+ grpc_tcp_listener* sp = NULL;
+ grpc_tcp_listener* sp2 = NULL;
+ grpc_error* v6_err = GRPC_ERROR_NONE;
+ grpc_error* v4_err = GRPC_ERROR_NONE;
*out_port = -1;
if (grpc_tcp_server_have_ifaddrs() && s->expand_wildcard_addrs) {
@@ -335,7 +336,7 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
}
return GRPC_ERROR_NONE;
} else {
- grpc_error *root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ grpc_error* root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to add any wildcard listeners");
GPR_ASSERT(v6_err != GRPC_ERROR_NONE && v4_err != GRPC_ERROR_NONE);
root_err = grpc_error_add_child(root_err, v6_err);
@@ -344,13 +345,13 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
}
}
-static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
- grpc_tcp_listener *sp = NULL;
- char *addr_str;
- char *name;
- grpc_error *err;
+static grpc_error* clone_port(grpc_tcp_listener* listener, unsigned count) {
+ grpc_tcp_listener* sp = NULL;
+ char* addr_str;
+ char* name;
+ grpc_error* err;
- for (grpc_tcp_listener *l = listener->next; l && l->is_sibling; l = l->next) {
+ for (grpc_tcp_listener* l = listener->next; l && l->is_sibling; l = l->next) {
l->fd_index += count;
}
@@ -366,7 +367,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
- sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener*)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
listener->next = sp;
/* sp (the new listener) is a sibling of 'listener' (the original
@@ -392,16 +393,16 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
- int *out_port) {
- grpc_tcp_listener *sp;
+grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* out_port) {
+ grpc_tcp_listener* sp;
grpc_resolved_address sockname_temp;
grpc_resolved_address addr6_v4mapped;
int requested_port = grpc_sockaddr_get_port(addr);
unsigned port_index = 0;
grpc_dualstack_mode dsmode;
- grpc_error *err;
+ grpc_error* err;
*out_port = -1;
if (s->tail != NULL) {
port_index = s->tail->port_index + 1;
@@ -413,8 +414,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
if (requested_port == 0) {
for (sp = s->head; sp; sp = sp->next) {
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp.addr,
- (socklen_t *)&sockname_temp.len)) {
+ if (0 == getsockname(sp->fd, (struct sockaddr*)&sockname_temp.addr,
+ (socklen_t*)&sockname_temp.len)) {
int used_port = grpc_sockaddr_get_port(&sockname_temp);
if (used_port > 0) {
memcpy(&sockname_temp, addr, sizeof(grpc_resolved_address));
@@ -442,10 +443,10 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
/* Return listener at port_index or NULL. Should only be called with s->mu
locked. */
-static grpc_tcp_listener *get_port_index(grpc_tcp_server *s,
+static grpc_tcp_listener* get_port_index(grpc_tcp_server* s,
unsigned port_index) {
unsigned num_ports = 0;
- grpc_tcp_listener *sp;
+ grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
if (!sp->is_sibling) {
if (++num_ports > port_index) {
@@ -456,11 +457,11 @@ static grpc_tcp_listener *get_port_index(grpc_tcp_server *s,
return NULL;
}
-unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
+unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s,
unsigned port_index) {
unsigned num_fds = 0;
gpr_mu_lock(&s->mu);
- grpc_tcp_listener *sp = get_port_index(s, port_index);
+ grpc_tcp_listener* sp = get_port_index(s, port_index);
for (; sp; sp = sp->sibling) {
++num_fds;
}
@@ -468,10 +469,10 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
return num_fds;
}
-int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
+int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index,
unsigned fd_index) {
gpr_mu_lock(&s->mu);
- grpc_tcp_listener *sp = get_port_index(s, port_index);
+ grpc_tcp_listener* sp = get_port_index(s, port_index);
for (; sp; sp = sp->sibling, --fd_index) {
if (fd_index == 0) {
gpr_mu_unlock(&s->mu);
@@ -482,12 +483,12 @@ int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
return -1;
}
-void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
- grpc_pollset **pollsets, size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s,
+ grpc_pollset** pollsets, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
- void *on_accept_cb_arg) {
+ void* on_accept_cb_arg) {
size_t i;
- grpc_tcp_listener *sp;
+ grpc_tcp_listener* sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb);
@@ -524,20 +525,20 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
gpr_mu_unlock(&s->mu);
}
-grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
gpr_ref_non_zero(&s->refs);
return s;
}
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
- grpc_closure *shutdown_starting) {
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
gpr_mu_lock(&s->mu);
grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
GRPC_ERROR_NONE);
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
@@ -547,13 +548,13 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
}
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
- grpc_tcp_server *s) {
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
s->shutdown_listeners = true;
/* shutdown all fd's */
if (s->active_ports) {
- grpc_tcp_listener *sp;
+ grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_fd_shutdown(exec_ctx, sp->emfd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"));
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix.h b/src/core/lib/iomgr/tcp_server_utils_posix.h
index 6746333960..608fba3346 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix.h
+++ b/src/core/lib/iomgr/tcp_server_utils_posix.h
@@ -31,22 +31,22 @@ extern "C" {
/* one listening port */
typedef struct grpc_tcp_listener {
int fd;
- grpc_fd *emfd;
- grpc_tcp_server *server;
+ grpc_fd* emfd;
+ grpc_tcp_server* server;
grpc_resolved_address addr;
int port;
unsigned port_index;
unsigned fd_index;
grpc_closure read_closure;
grpc_closure destroyed_closure;
- struct grpc_tcp_listener *next;
+ struct grpc_tcp_listener* next;
/* sibling is a linked list of all listeners for a given port. add_port and
clone_port place all new listeners in the same sibling list. A member of
the 'sibling' list is also a member of the 'next' list. The head of each
sibling list has is_sibling==0, and subsequent members of sibling lists
have is_sibling==1. is_sibling allows separate sibling lists to be
identified while iterating through 'next'. */
- struct grpc_tcp_listener *sibling;
+ struct grpc_tcp_listener* sibling;
int is_sibling;
} grpc_tcp_listener;
@@ -55,7 +55,7 @@ struct grpc_tcp_server {
gpr_refcount refs;
/* Called whenever accept() succeeds on a server port. */
grpc_tcp_server_cb on_accept_cb;
- void *on_accept_cb_arg;
+ void* on_accept_cb_arg;
gpr_mu mu;
@@ -74,18 +74,18 @@ struct grpc_tcp_server {
bool expand_wildcard_addrs;
/* linked list of server ports */
- grpc_tcp_listener *head;
- grpc_tcp_listener *tail;
+ grpc_tcp_listener* head;
+ grpc_tcp_listener* tail;
unsigned nports;
/* List of closures passed to shutdown_starting_add(). */
grpc_closure_list shutdown_starting;
/* shutdown callback */
- grpc_closure *shutdown_complete;
+ grpc_closure* shutdown_complete;
/* all pollsets interested in new connections */
- grpc_pollset **pollsets;
+ grpc_pollset** pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
@@ -93,31 +93,31 @@ struct grpc_tcp_server {
gpr_atm next_pollset_to_assign;
/* channel args for this server */
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
};
/* If successful, add a listener to \a s for \a addr, set \a dsmode for the
socket, and return the \a listener. */
-grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
+grpc_error* grpc_tcp_server_add_addr(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
unsigned port_index, unsigned fd_index,
- grpc_dualstack_mode *dsmode,
- grpc_tcp_listener **listener);
+ grpc_dualstack_mode* dsmode,
+ grpc_tcp_listener** listener);
/* Get all addresses assigned to network interfaces on the machine and create a
listener for each. requested_port is the port to use for every listener, or 0
to select one random port that will be used for every listener. Set *out_port
to the port selected. Return GRPC_ERROR_NONE only if all listeners were
added. */
-grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
+grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s,
unsigned port_index,
int requested_port,
- int *out_port);
+ int* out_port);
/* Prepare a recently-created socket for listening. */
-grpc_error *grpc_tcp_server_prepare_socket(int fd,
- const grpc_resolved_address *addr,
- bool so_reuseport, int *port);
+grpc_error* grpc_tcp_server_prepare_socket(int fd,
+ const grpc_resolved_address* addr,
+ bool so_reuseport, int* port);
/* Ruturn true if the platform supports ifaddrs */
bool grpc_tcp_server_have_ifaddrs(void);
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
index a828bee074..51e6731729 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
@@ -46,14 +46,14 @@ static int s_max_accept_queue_size;
static void init_max_accept_queue_size(void) {
int n = SOMAXCONN;
char buf[64];
- FILE *fp = fopen("/proc/sys/net/core/somaxconn", "r");
+ FILE* fp = fopen("/proc/sys/net/core/somaxconn", "r");
if (fp == NULL) {
/* 2.4 kernel. */
s_max_accept_queue_size = SOMAXCONN;
return;
}
if (fgets(buf, sizeof buf, fp)) {
- char *end;
+ char* end;
long i = strtol(buf, &end, 10);
if (i > 0 && i <= INT_MAX && end && *end == 0) {
n = (int)i;
@@ -75,16 +75,16 @@ static int get_max_accept_queue_size(void) {
return s_max_accept_queue_size;
}
-static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
- const grpc_resolved_address *addr,
+static grpc_error* add_socket_to_server(grpc_tcp_server* s, int fd,
+ const grpc_resolved_address* addr,
unsigned port_index, unsigned fd_index,
- grpc_tcp_listener **listener) {
- grpc_tcp_listener *sp = NULL;
+ grpc_tcp_listener** listener) {
+ grpc_tcp_listener* sp = NULL;
int port = -1;
- char *addr_str;
- char *name;
+ char* addr_str;
+ char* name;
- grpc_error *err =
+ grpc_error* err =
grpc_tcp_server_prepare_socket(fd, addr, s->so_reuseport, &port);
if (err == GRPC_ERROR_NONE) {
GPR_ASSERT(port > 0);
@@ -93,7 +93,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener*)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -122,14 +122,14 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
/* If successful, add a listener to s for addr, set *dsmode for the socket, and
return the *listener. */
-grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
+grpc_error* grpc_tcp_server_add_addr(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
unsigned port_index, unsigned fd_index,
- grpc_dualstack_mode *dsmode,
- grpc_tcp_listener **listener) {
+ grpc_dualstack_mode* dsmode,
+ grpc_tcp_listener** listener) {
grpc_resolved_address addr4_copy;
int fd;
- grpc_error *err =
+ grpc_error* err =
grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd);
if (err != GRPC_ERROR_NONE) {
return err;
@@ -142,11 +142,11 @@ grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s,
}
/* Prepare a recently-created socket for listening. */
-grpc_error *grpc_tcp_server_prepare_socket(int fd,
- const grpc_resolved_address *addr,
- bool so_reuseport, int *port) {
+grpc_error* grpc_tcp_server_prepare_socket(int fd,
+ const grpc_resolved_address* addr,
+ bool so_reuseport, int* port) {
grpc_resolved_address sockname_temp;
- grpc_error *err = GRPC_ERROR_NONE;
+ grpc_error* err = GRPC_ERROR_NONE;
GPR_ASSERT(fd >= 0);
@@ -169,7 +169,7 @@ grpc_error *grpc_tcp_server_prepare_socket(int fd,
if (err != GRPC_ERROR_NONE) goto error;
GPR_ASSERT(addr->len < ~(socklen_t)0);
- if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) {
+ if (bind(fd, (struct sockaddr*)addr->addr, (socklen_t)addr->len) < 0) {
err = GRPC_OS_ERROR(errno, "bind");
goto error;
}
@@ -181,8 +181,8 @@ grpc_error *grpc_tcp_server_prepare_socket(int fd,
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
- (socklen_t *)&sockname_temp.len) < 0) {
+ if (getsockname(fd, (struct sockaddr*)sockname_temp.addr,
+ (socklen_t*)&sockname_temp.len) < 0) {
err = GRPC_OS_ERROR(errno, "getsockname");
goto error;
}
@@ -195,7 +195,7 @@ error:
if (fd >= 0) {
close(fd);
}
- grpc_error *ret =
+ grpc_error* ret =
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Unable to configure socket", &err, 1),
GRPC_ERROR_INT_FD, fd);
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
index a243b69f35..b7437dbf4d 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
@@ -36,9 +36,9 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
/* Return the listener in s with address addr or NULL. */
-static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s,
- grpc_resolved_address *addr) {
- grpc_tcp_listener *l;
+static grpc_tcp_listener* find_listener_with_addr(grpc_tcp_server* s,
+ grpc_resolved_address* addr) {
+ grpc_tcp_listener* l;
gpr_mu_lock(&s->mu);
for (l = s->head; l != NULL; l = l->next) {
if (l->addr.len != addr->len) {
@@ -53,12 +53,12 @@ static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s,
}
/* Bind to "::" to get a port number not used by any address. */
-static grpc_error *get_unused_port(int *port) {
+static grpc_error* get_unused_port(int* port) {
grpc_resolved_address wild;
grpc_sockaddr_make_wildcard6(0, &wild);
grpc_dualstack_mode dsmode;
int fd;
- grpc_error *err =
+ grpc_error* err =
grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd);
if (err != GRPC_ERROR_NONE) {
return err;
@@ -66,12 +66,12 @@ static grpc_error *get_unused_port(int *port) {
if (dsmode == GRPC_DSMODE_IPV4) {
grpc_sockaddr_make_wildcard4(0, &wild);
}
- if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) {
+ if (bind(fd, (const struct sockaddr*)wild.addr, (socklen_t)wild.len) != 0) {
err = GRPC_OS_ERROR(errno, "bind");
close(fd);
return err;
}
- if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) !=
+ if (getsockname(fd, (struct sockaddr*)wild.addr, (socklen_t*)&wild.len) !=
0) {
err = GRPC_OS_ERROR(errno, "getsockname");
close(fd);
@@ -83,15 +83,15 @@ static grpc_error *get_unused_port(int *port) {
: GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
+grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s,
unsigned port_index,
int requested_port,
- int *out_port) {
- struct ifaddrs *ifa = NULL;
- struct ifaddrs *ifa_it;
+ int* out_port) {
+ struct ifaddrs* ifa = NULL;
+ struct ifaddrs* ifa_it;
unsigned fd_index = 0;
- grpc_tcp_listener *sp = NULL;
- grpc_error *err = GRPC_ERROR_NONE;
+ grpc_tcp_listener* sp = NULL;
+ grpc_error* err = GRPC_ERROR_NONE;
if (requested_port == 0) {
/* Note: There could be a race where some local addrs can listen on the
selected port and some can't. The sane way to handle this would be to
@@ -109,10 +109,10 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
}
for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) {
grpc_resolved_address addr;
- char *addr_str = NULL;
+ char* addr_str = NULL;
grpc_dualstack_mode dsmode;
- grpc_tcp_listener *new_sp = NULL;
- const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>");
+ grpc_tcp_listener* new_sp = NULL;
+ const char* ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>");
if (ifa_it->ifa_addr == NULL) {
continue;
} else if (ifa_it->ifa_addr->sa_family == AF_INET) {
@@ -144,8 +144,8 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
}
if ((err = grpc_tcp_server_add_addr(s, &addr, port_index, fd_index, &dsmode,
&new_sp)) != GRPC_ERROR_NONE) {
- char *err_str = NULL;
- grpc_error *root_err;
+ char* err_str = NULL;
+ grpc_error* root_err;
if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) {
err_str = gpr_strdup("Failed to add listener");
}
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
index 34eab20d6a..2d72b95def 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc
@@ -22,10 +22,10 @@
#include "src/core/lib/iomgr/tcp_server_utils_posix.h"
-grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
+grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s,
unsigned port_index,
int requested_port,
- int *out_port) {
+ int* out_port) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ifaddrs available");
}
diff --git a/src/core/lib/iomgr/tcp_server_uv.cc b/src/core/lib/iomgr/tcp_server_uv.cc
index 348838c495..0eed4d428f 100644
--- a/src/core/lib/iomgr/tcp_server_uv.cc
+++ b/src/core/lib/iomgr/tcp_server_uv.cc
@@ -37,12 +37,12 @@
/* one listening port */
typedef struct grpc_tcp_listener grpc_tcp_listener;
struct grpc_tcp_listener {
- uv_tcp_t *handle;
- grpc_tcp_server *server;
+ uv_tcp_t* handle;
+ grpc_tcp_server* server;
unsigned port_index;
int port;
/* linked list */
- struct grpc_tcp_listener *next;
+ struct grpc_tcp_listener* next;
bool closed;
@@ -54,37 +54,37 @@ struct grpc_tcp_server {
/* Called whenever accept() succeeds on a server port. */
grpc_tcp_server_cb on_accept_cb;
- void *on_accept_cb_arg;
+ void* on_accept_cb_arg;
int open_ports;
/* linked list of server ports */
- grpc_tcp_listener *head;
- grpc_tcp_listener *tail;
+ grpc_tcp_listener* head;
+ grpc_tcp_listener* tail;
/* List of closures passed to shutdown_starting_add(). */
grpc_closure_list shutdown_starting;
/* shutdown callback */
- grpc_closure *shutdown_complete;
+ grpc_closure* shutdown_complete;
bool shutdown;
- grpc_resource_quota *resource_quota;
+ grpc_resource_quota* resource_quota;
};
-grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
- grpc_closure *shutdown_complete,
- const grpc_channel_args *args,
- grpc_tcp_server **server) {
- grpc_tcp_server *s = (grpc_tcp_server *)gpr_malloc(sizeof(grpc_tcp_server));
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
+ grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
s->resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota *)args->args[i].value.pointer.p);
+ (grpc_resource_quota*)args->args[i].value.pointer.p);
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
@@ -107,26 +107,26 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
GRPC_UV_ASSERT_SAME_THREAD();
gpr_ref(&s->refs);
return s;
}
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
- grpc_closure *shutdown_starting) {
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
GRPC_ERROR_NONE);
}
-static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
GPR_ASSERT(s->shutdown);
if (s->shutdown_complete != NULL) {
GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
while (s->head) {
- grpc_tcp_listener *sp = s->head;
+ grpc_tcp_listener* sp = s->head;
s->head = sp->next;
sp->next = NULL;
gpr_free(sp->handle);
@@ -136,8 +136,8 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(s);
}
-static void handle_close_callback(uv_handle_t *handle) {
- grpc_tcp_listener *sp = (grpc_tcp_listener *)handle->data;
+static void handle_close_callback(uv_handle_t* handle) {
+ grpc_tcp_listener* sp = (grpc_tcp_listener*)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
sp->server->open_ports--;
if (sp->server->open_ports == 0 && sp->server->shutdown) {
@@ -146,16 +146,16 @@ static void handle_close_callback(uv_handle_t *handle) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static void close_listener(grpc_tcp_listener *sp) {
+static void close_listener(grpc_tcp_listener* sp) {
if (!sp->closed) {
sp->closed = true;
- uv_close((uv_handle_t *)sp->handle, handle_close_callback);
+ uv_close((uv_handle_t*)sp->handle, handle_close_callback);
}
}
-static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
int immediately_done = 0;
- grpc_tcp_listener *sp;
+ grpc_tcp_listener* sp;
GPR_ASSERT(!s->shutdown);
s->shutdown = true;
@@ -172,7 +172,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
}
-void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
GRPC_UV_ASSERT_SAME_THREAD();
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
@@ -189,25 +189,25 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
}
-static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) {
- grpc_tcp_server_acceptor *acceptor =
- (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
- uv_tcp_t *client = NULL;
- grpc_endpoint *ep = NULL;
+static void finish_accept(grpc_exec_ctx* exec_ctx, grpc_tcp_listener* sp) {
+ grpc_tcp_server_acceptor* acceptor =
+ (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
+ uv_tcp_t* client = NULL;
+ grpc_endpoint* ep = NULL;
grpc_resolved_address peer_name;
- char *peer_name_string;
+ char* peer_name_string;
int err;
- uv_tcp_t *server = sp->handle;
+ uv_tcp_t* server = sp->handle;
- client = (uv_tcp_t *)gpr_malloc(sizeof(uv_tcp_t));
+ client = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
uv_tcp_init(uv_default_loop(), client);
// UV documentation says this is guaranteed to succeed
- uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
+ uv_accept((uv_stream_t*)server, (uv_stream_t*)client);
peer_name_string = NULL;
memset(&peer_name, 0, sizeof(grpc_resolved_address));
peer_name.len = sizeof(struct sockaddr_storage);
- err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
- (int *)&peer_name.len);
+ err = uv_tcp_getpeername(client, (struct sockaddr*)&peer_name.addr,
+ (int*)&peer_name.len);
if (err == 0) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
@@ -230,8 +230,8 @@ static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) {
gpr_free(peer_name_string);
}
-static void on_connect(uv_stream_t *server, int status) {
- grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
+static void on_connect(uv_stream_t* server, int status) {
+ grpc_tcp_listener* sp = (grpc_tcp_listener*)server->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (status < 0) {
@@ -260,18 +260,18 @@ static void on_connect(uv_stream_t *server, int status) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
- const grpc_resolved_address *addr,
+static grpc_error* add_socket_to_server(grpc_tcp_server* s, uv_tcp_t* handle,
+ const grpc_resolved_address* addr,
unsigned port_index,
- grpc_tcp_listener **listener) {
- grpc_tcp_listener *sp = NULL;
+ grpc_tcp_listener** listener) {
+ grpc_tcp_listener* sp = NULL;
int port = -1;
int status;
- grpc_error *error;
+ grpc_error* error;
grpc_resolved_address sockname_temp;
// The last argument to uv_tcp_bind is flags
- status = uv_tcp_bind(handle, (struct sockaddr *)addr->addr, 0);
+ status = uv_tcp_bind(handle, (struct sockaddr*)addr->addr, 0);
if (status != 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to bind to port");
error =
@@ -280,7 +280,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
return error;
}
- status = uv_listen((uv_stream_t *)handle, SOMAXCONN, on_connect);
+ status = uv_listen((uv_stream_t*)handle, SOMAXCONN, on_connect);
if (status != 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to listen to port");
error =
@@ -290,8 +290,8 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
}
sockname_temp.len = (int)sizeof(struct sockaddr_storage);
- status = uv_tcp_getsockname(handle, (struct sockaddr *)&sockname_temp.addr,
- (int *)&sockname_temp.len);
+ status = uv_tcp_getsockname(handle, (struct sockaddr*)&sockname_temp.addr,
+ (int*)&sockname_temp.len);
if (status != 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getsockname failed");
error =
@@ -304,7 +304,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
GPR_ASSERT(port >= 0);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = (grpc_tcp_listener *)gpr_zalloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener*)gpr_zalloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -325,19 +325,19 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
- int *port) {
+grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* port) {
// This function is mostly copied from tcp_server_windows.c
- grpc_tcp_listener *sp = NULL;
- uv_tcp_t *handle;
+ grpc_tcp_listener* sp = NULL;
+ uv_tcp_t* handle;
grpc_resolved_address addr6_v4mapped;
grpc_resolved_address wildcard;
- grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address* allocated_addr = NULL;
grpc_resolved_address sockname_temp;
unsigned port_index = 0;
int status;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
int family;
GRPC_UV_ASSERT_SAME_THREAD();
@@ -352,12 +352,12 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
for (sp = s->head; sp; sp = sp->next) {
sockname_temp.len = sizeof(struct sockaddr_storage);
if (0 == uv_tcp_getsockname(sp->handle,
- (struct sockaddr *)&sockname_temp.addr,
- (int *)&sockname_temp.len)) {
+ (struct sockaddr*)&sockname_temp.addr,
+ (int*)&sockname_temp.len)) {
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
- allocated_addr = (grpc_resolved_address *)gpr_malloc(
- sizeof(grpc_resolved_address));
+ allocated_addr =
+ (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
@@ -378,14 +378,14 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
addr = &wildcard;
}
- handle = (uv_tcp_t *)gpr_malloc(sizeof(uv_tcp_t));
+ handle = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t));
family = grpc_sockaddr_get_family(addr);
status = uv_tcp_init_ex(uv_default_loop(), handle, (unsigned int)family);
#if defined(GPR_LINUX) && defined(SO_REUSEPORT)
if (family == AF_INET || family == AF_INET6) {
int fd;
- uv_fileno((uv_handle_t *)handle, &fd);
+ uv_fileno((uv_handle_t*)handle, &fd);
int enable = 1;
setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable));
}
@@ -404,9 +404,9 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
gpr_free(allocated_addr);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- char *port_string;
+ char* port_string;
grpc_sockaddr_to_string(&port_string, addr, 0);
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
if (port_string) {
gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
gpr_free(port_string);
@@ -416,7 +416,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
}
if (error != GRPC_ERROR_NONE) {
- grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ grpc_error* error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to add port to server", &error, 1);
GRPC_ERROR_UNREF(error);
error = error_out;
@@ -428,10 +428,10 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
return error;
}
-void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
- grpc_pollset **pollsets, size_t pollset_count,
- grpc_tcp_server_cb on_accept_cb, void *cb_arg) {
- grpc_tcp_listener *sp;
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* server,
+ grpc_pollset** pollsets, size_t pollset_count,
+ grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
+ grpc_tcp_listener* sp;
(void)pollsets;
(void)pollset_count;
GRPC_UV_ASSERT_SAME_THREAD();
@@ -450,7 +450,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
}
}
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
- grpc_tcp_server *s) {}
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {}
#endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_windows.cc b/src/core/lib/iomgr/tcp_server_windows.cc
index f198aaaa5b..f538194895 100644
--- a/src/core/lib/iomgr/tcp_server_windows.cc
+++ b/src/core/lib/iomgr/tcp_server_windows.cc
@@ -52,11 +52,11 @@ struct grpc_tcp_listener {
/* This will hold the socket for the next accept. */
SOCKET new_socket;
/* The listener winsocket. */
- grpc_winsocket *socket;
+ grpc_winsocket* socket;
/* The actual TCP port number. */
int port;
unsigned port_index;
- grpc_tcp_server *server;
+ grpc_tcp_server* server;
/* The cached AcceptEx for that port. */
LPFN_ACCEPTEX AcceptEx;
int shutting_down;
@@ -64,7 +64,7 @@ struct grpc_tcp_listener {
/* closure for socket notification of accept being ready */
grpc_closure on_accept;
/* linked list */
- struct grpc_tcp_listener *next;
+ struct grpc_tcp_listener* next;
};
/* the overall server */
@@ -72,7 +72,7 @@ struct grpc_tcp_server {
gpr_refcount refs;
/* Called whenever accept() succeeds on a server port. */
grpc_tcp_server_cb on_accept_cb;
- void *on_accept_cb_arg;
+ void* on_accept_cb_arg;
gpr_mu mu;
@@ -80,25 +80,25 @@ struct grpc_tcp_server {
int active_ports;
/* linked list of server ports */
- grpc_tcp_listener *head;
- grpc_tcp_listener *tail;
+ grpc_tcp_listener* head;
+ grpc_tcp_listener* tail;
/* List of closures passed to shutdown_starting_add(). */
grpc_closure_list shutdown_starting;
/* shutdown callback */
- grpc_closure *shutdown_complete;
+ grpc_closure* shutdown_complete;
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
};
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
-grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
- grpc_closure *shutdown_complete,
- const grpc_channel_args *args,
- grpc_tcp_server **server) {
- grpc_tcp_server *s = (grpc_tcp_server *)gpr_malloc(sizeof(grpc_tcp_server));
+grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
+ grpc_closure* shutdown_complete,
+ const grpc_channel_args* args,
+ grpc_tcp_server** server) {
+ grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
s->channel_args = grpc_channel_args_copy(args);
gpr_ref_init(&s->refs, 1);
gpr_mu_init(&s->mu);
@@ -114,15 +114,15 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_tcp_server *s = (grpc_tcp_server *)arg;
+static void destroy_server(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_tcp_server* s = (grpc_tcp_server*)arg;
/* Now that the accepts have been aborted, we can destroy the sockets.
The IOCP won't get notified on these, so we can flag them as already
closed by the system. */
while (s->head) {
- grpc_tcp_listener *sp = s->head;
+ grpc_tcp_listener* sp = s->head;
s->head = sp->next;
sp->next = NULL;
grpc_winsocket_destroy(sp->socket);
@@ -132,32 +132,33 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(s);
}
-static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
- grpc_tcp_server *s) {
+static void finish_shutdown_locked(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {
if (s->shutdown_complete != NULL) {
GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
- GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s,
- grpc_schedule_on_exec_ctx),
- GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(
+ exec_ctx,
+ GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx),
+ GRPC_ERROR_NONE);
}
-grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
+grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) {
gpr_ref_non_zero(&s->refs);
return s;
}
-void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
- grpc_closure *shutdown_starting) {
+void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s,
+ grpc_closure* shutdown_starting) {
gpr_mu_lock(&s->mu);
grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
GRPC_ERROR_NONE);
gpr_mu_unlock(&s->mu);
}
-static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
- grpc_tcp_listener *sp;
+static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+ grpc_tcp_listener* sp;
gpr_mu_lock(&s->mu);
/* First, shutdown all fd's. This will queue abortion calls for all
@@ -173,7 +174,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
+void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
if (gpr_unref(&s->refs)) {
grpc_tcp_server_shutdown_listeners(exec_ctx, s);
gpr_mu_lock(&s->mu);
@@ -184,11 +185,11 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
/* Prepare (bind) a recently-created socket for listening. */
-static grpc_error *prepare_socket(SOCKET sock,
- const grpc_resolved_address *addr,
- int *port) {
+static grpc_error* prepare_socket(SOCKET sock,
+ const grpc_resolved_address* addr,
+ int* port) {
grpc_resolved_address sockname_temp;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
int sockname_temp_len;
error = grpc_tcp_prepare_socket(sock);
@@ -196,7 +197,7 @@ static grpc_error *prepare_socket(SOCKET sock,
goto failure;
}
- if (bind(sock, (const struct sockaddr *)addr->addr, (int)addr->len) ==
+ if (bind(sock, (const struct sockaddr*)addr->addr, (int)addr->len) ==
SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@@ -208,7 +209,7 @@ static grpc_error *prepare_socket(SOCKET sock,
}
sockname_temp_len = sizeof(struct sockaddr_storage);
- if (getsockname(sock, (struct sockaddr *)sockname_temp.addr,
+ if (getsockname(sock, (struct sockaddr*)sockname_temp.addr,
&sockname_temp_len) == SOCKET_ERROR) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname");
goto failure;
@@ -220,7 +221,7 @@ static grpc_error *prepare_socket(SOCKET sock,
failure:
GPR_ASSERT(error != GRPC_ERROR_NONE);
- char *tgtaddr = grpc_sockaddr_to_uri(addr);
+ char* tgtaddr = grpc_sockaddr_to_uri(addr);
grpc_error_set_int(
grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to prepare server socket", &error, 1),
@@ -233,8 +234,8 @@ failure:
return error;
}
-static void decrement_active_ports_and_notify_locked(grpc_exec_ctx *exec_ctx,
- grpc_tcp_listener *sp) {
+static void decrement_active_ports_and_notify_locked(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_listener* sp) {
sp->shutting_down = 0;
GPR_ASSERT(sp->server->active_ports > 0);
if (0 == --sp->server->active_ports) {
@@ -244,13 +245,13 @@ static void decrement_active_ports_and_notify_locked(grpc_exec_ctx *exec_ctx,
/* In order to do an async accept, we need to create a socket first which
will be the one assigned to the new incoming connection. */
-static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx,
- grpc_tcp_listener *port) {
+static grpc_error* start_accept_locked(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_listener* port) {
SOCKET sock = INVALID_SOCKET;
BOOL success;
DWORD addrlen = sizeof(struct sockaddr_in6) + 16;
DWORD bytes_received = 0;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (port->shutting_down) {
return GRPC_ERROR_NONE;
@@ -295,14 +296,14 @@ failure:
}
/* Event manager callback when reads are ready. */
-static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
+static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
SOCKET sock = sp->new_socket;
- grpc_winsocket_callback_info *info = &sp->socket->read_info;
- grpc_endpoint *ep = NULL;
+ grpc_winsocket_callback_info* info = &sp->socket->read_info;
+ grpc_endpoint* ep = NULL;
grpc_resolved_address peer_name;
- char *peer_name_string;
- char *fd_name;
+ char* peer_name_string;
+ char* fd_name;
DWORD transfered_bytes;
DWORD flags;
BOOL wsa_success;
@@ -316,7 +317,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
this is necessary in the read/write case, it's useless for the accept
case. We only need to adjust the pending callback count */
if (error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Skipping on_accept due to error: %s", msg);
gpr_mu_unlock(&sp->server->mu);
@@ -330,7 +331,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
&transfered_bytes, FALSE, &flags);
if (!wsa_success) {
if (!sp->shutting_down) {
- char *utf8_message = gpr_format_message(WSAGetLastError());
+ char* utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message);
gpr_free(utf8_message);
}
@@ -339,20 +340,19 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
if (!sp->shutting_down) {
peer_name_string = NULL;
err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
- (char *)&sp->socket->socket, sizeof(sp->socket->socket));
+ (char*)&sp->socket->socket, sizeof(sp->socket->socket));
if (err) {
- char *utf8_message = gpr_format_message(WSAGetLastError());
+ char* utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
gpr_free(utf8_message);
}
int peer_name_len = (int)peer_name.len;
- err =
- getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len);
+ err = getpeername(sock, (struct sockaddr*)peer_name.addr, &peer_name_len);
peer_name.len = (size_t)peer_name_len;
if (!err) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
- char *utf8_message = gpr_format_message(WSAGetLastError());
+ char* utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);
gpr_free(utf8_message);
}
@@ -370,8 +370,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
managed to accept a connection, and created an endpoint. */
if (ep) {
// Create acceptor.
- grpc_tcp_server_acceptor *acceptor =
- (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
+ grpc_tcp_server_acceptor* acceptor =
+ (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;
@@ -390,17 +390,17 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_unlock(&sp->server->mu);
}
-static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
- const grpc_resolved_address *addr,
+static grpc_error* add_socket_to_server(grpc_tcp_server* s, SOCKET sock,
+ const grpc_resolved_address* addr,
unsigned port_index,
- grpc_tcp_listener **listener) {
- grpc_tcp_listener *sp = NULL;
+ grpc_tcp_listener** listener) {
+ grpc_tcp_listener* sp = NULL;
int port = -1;
int status;
GUID guid = WSAID_ACCEPTEX;
DWORD ioctl_num_bytes;
LPFN_ACCEPTEX AcceptEx;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
/* We need to grab the AcceptEx pointer for that port, as it may be
interface-dependent. We'll cache it to avoid doing that again. */
@@ -409,7 +409,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
&AcceptEx, sizeof(AcceptEx), &ioctl_num_bytes, NULL, NULL);
if (status != 0) {
- char *utf8_message = gpr_format_message(WSAGetLastError());
+ char* utf8_message = gpr_format_message(WSAGetLastError());
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message);
closesocket(sock);
@@ -424,7 +424,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
GPR_ASSERT(port >= 0);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
+ sp = (grpc_tcp_listener*)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -448,17 +448,17 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
- int *port) {
- grpc_tcp_listener *sp = NULL;
+grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s,
+ const grpc_resolved_address* addr,
+ int* port) {
+ grpc_tcp_listener* sp = NULL;
SOCKET sock;
grpc_resolved_address addr6_v4mapped;
grpc_resolved_address wildcard;
- grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address* allocated_addr = NULL;
grpc_resolved_address sockname_temp;
unsigned port_index = 0;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (s->tail != NULL) {
port_index = s->tail->port_index + 1;
@@ -470,13 +470,13 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
for (sp = s->head; sp; sp = sp->next) {
int sockname_temp_len = sizeof(struct sockaddr_storage);
if (0 == getsockname(sp->socket->socket,
- (struct sockaddr *)sockname_temp.addr,
+ (struct sockaddr*)sockname_temp.addr,
&sockname_temp_len)) {
sockname_temp.len = (size_t)sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
- allocated_addr = (grpc_resolved_address *)gpr_malloc(
- sizeof(grpc_resolved_address));
+ allocated_addr =
+ (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
@@ -510,7 +510,7 @@ done:
gpr_free(allocated_addr);
if (error != GRPC_ERROR_NONE) {
- grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ grpc_error* error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to add port to server", &error, 1);
GRPC_ERROR_UNREF(error);
error = error_out;
@@ -522,11 +522,11 @@ done:
return error;
}
-void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
- grpc_pollset **pollset, size_t pollset_count,
+void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s,
+ grpc_pollset** pollset, size_t pollset_count,
grpc_tcp_server_cb on_accept_cb,
- void *on_accept_cb_arg) {
- grpc_tcp_listener *sp;
+ void* on_accept_cb_arg) {
+ grpc_tcp_listener* sp;
GPR_ASSERT(on_accept_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->on_accept_cb);
@@ -541,7 +541,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
gpr_mu_unlock(&s->mu);
}
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
- grpc_tcp_server *s) {}
+void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
+ grpc_tcp_server* s) {}
#endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index 99b9f1ea2d..ac9ca4ea11 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -47,25 +47,25 @@ typedef struct {
uv_write_t write_req;
uv_shutdown_t shutdown_req;
- uv_tcp_t *handle;
+ uv_tcp_t* handle;
- grpc_closure *read_cb;
- grpc_closure *write_cb;
+ grpc_closure* read_cb;
+ grpc_closure* write_cb;
grpc_slice read_slice;
- grpc_slice_buffer *read_slices;
- grpc_slice_buffer *write_slices;
- uv_buf_t *write_buffers;
+ grpc_slice_buffer* read_slices;
+ grpc_slice_buffer* write_slices;
+ uv_buf_t* write_buffers;
- grpc_resource_user *resource_user;
+ grpc_resource_user* resource_user;
bool shutting_down;
- char *peer_string;
- grpc_pollset *pollset;
+ char* peer_string;
+ grpc_pollset* pollset;
} grpc_tcp;
-static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp->handle);
@@ -77,8 +77,8 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
#define TCP_UNREF(exec_ctx, tcp, reason) \
tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
- const char *reason, const char *file, int line) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -90,7 +90,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
}
}
-static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
@@ -103,45 +103,45 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
#else
#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
}
-static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
#endif
-static void uv_close_callback(uv_handle_t *handle) {
+static void uv_close_callback(uv_handle_t* handle) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_tcp *tcp = (grpc_tcp *)handle->data;
+ grpc_tcp* tcp = (grpc_tcp*)handle->data;
TCP_UNREF(&exec_ctx, tcp, "destroy");
grpc_exec_ctx_finish(&exec_ctx);
}
-static grpc_slice alloc_read_slice(grpc_exec_ctx *exec_ctx,
- grpc_resource_user *resource_user) {
+static grpc_slice alloc_read_slice(grpc_exec_ctx* exec_ctx,
+ grpc_resource_user* resource_user) {
return grpc_resource_user_slice_malloc(exec_ctx, resource_user,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
}
-static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
- uv_buf_t *buf) {
+static void alloc_uv_buf(uv_handle_t* handle, size_t suggested_size,
+ uv_buf_t* buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_tcp *tcp = (grpc_tcp *)handle->data;
+ grpc_tcp* tcp = (grpc_tcp*)handle->data;
(void)suggested_size;
- buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
+ buf->base = (char*)GRPC_SLICE_START_PTR(tcp->read_slice);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
grpc_exec_ctx_finish(&exec_ctx);
}
-static void read_callback(uv_stream_t *stream, ssize_t nread,
- const uv_buf_t *buf) {
+static void read_callback(uv_stream_t* stream, ssize_t nread,
+ const uv_buf_t* buf) {
grpc_slice sub;
- grpc_error *error;
+ grpc_error* error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_tcp *tcp = (grpc_tcp *)stream->data;
- grpc_closure *cb = tcp->read_cb;
+ grpc_tcp* tcp = (grpc_tcp*)stream->data;
+ grpc_closure* cb = tcp->read_cb;
if (nread == 0) {
// Nothing happened. Wait for the next callback
return;
@@ -160,11 +160,11 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
error = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t i;
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
for (i = 0; i < tcp->read_slices->count; i++) {
- char *dump = grpc_dump_slice(tcp->read_slices->slices[i],
+ char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
dump);
@@ -179,11 +179,11 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
grpc_exec_ctx_finish(&exec_ctx);
}
-static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *read_slices, grpc_closure *cb) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void uv_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* read_slices, grpc_closure* cb) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
int status;
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
GRPC_UV_ASSERT_SAME_THREAD();
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
@@ -192,7 +192,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
// TODO(murgatroid99): figure out what the return value here means
status =
- uv_read_start((uv_stream_t *)tcp->handle, alloc_uv_buf, read_callback);
+ uv_read_start((uv_stream_t*)tcp->handle, alloc_uv_buf, read_callback);
if (status != 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start");
error =
@@ -201,16 +201,16 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
}
}
-static void write_callback(uv_write_t *req, int status) {
- grpc_tcp *tcp = (grpc_tcp *)req->data;
- grpc_error *error;
+static void write_callback(uv_write_t* req, int status) {
+ grpc_tcp* tcp = (grpc_tcp*)req->data;
+ grpc_error* error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_closure *cb = tcp->write_cb;
+ grpc_closure* cb = tcp->write_cb;
tcp->write_cb = NULL;
TCP_UNREF(&exec_ctx, tcp, "write");
if (status == 0) {
@@ -219,7 +219,7 @@ static void write_callback(uv_write_t *req, int status) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
}
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(error);
+ const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
gpr_free(tcp->write_buffers);
@@ -229,22 +229,22 @@ static void write_callback(uv_write_t *req, int status) {
grpc_exec_ctx_finish(&exec_ctx);
}
-static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *write_slices,
- grpc_closure *cb) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
- uv_buf_t *buffers;
+static void uv_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* write_slices,
+ grpc_closure* cb) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
+ uv_buf_t* buffers;
unsigned int buffer_count;
unsigned int i;
- grpc_slice *slice;
- uv_write_t *write_req;
+ grpc_slice* slice;
+ uv_write_t* write_req;
GRPC_UV_ASSERT_SAME_THREAD();
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t j;
for (j = 0; j < write_slices->count; j++) {
- char *data = grpc_dump_slice(write_slices->slices[j],
+ char* data = grpc_dump_slice(write_slices->slices[j],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
@@ -252,8 +252,9 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
if (tcp->shutting_down) {
- GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
- "TCP socket is shutting down"));
+ GRPC_CLOSURE_SCHED(
+ exec_ctx, cb,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP socket is shutting down"));
return;
}
@@ -269,12 +270,12 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count;
- buffers = (uv_buf_t *)gpr_malloc(sizeof(uv_buf_t) * buffer_count);
+ buffers = (uv_buf_t*)gpr_malloc(sizeof(uv_buf_t) * buffer_count);
grpc_resource_user_alloc(exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) {
slice = &tcp->write_slices->slices[i];
- buffers[i].base = (char *)GRPC_SLICE_START_PTR(*slice);
+ buffers[i].base = (char*)GRPC_SLICE_START_PTR(*slice);
buffers[i].len = GRPC_SLICE_LENGTH(*slice);
}
tcp->write_buffers = buffers;
@@ -282,72 +283,72 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
write_req->data = tcp;
TCP_REF(tcp, "write");
// TODO(murgatroid99): figure out what the return value here means
- uv_write(write_req, (uv_stream_t *)tcp->handle, buffers, buffer_count,
+ uv_write(write_req, (uv_stream_t*)tcp->handle, buffers, buffer_count,
write_callback);
}
-static void uv_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset *pollset) {
+static void uv_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* pollset) {
// No-op. We're ignoring pollsets currently
(void)exec_ctx;
(void)ep;
(void)pollset;
- grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_tcp* tcp = (grpc_tcp*)ep;
tcp->pollset = pollset;
}
-static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset_set *pollset) {
+static void uv_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pollset) {
// No-op. We're ignoring pollsets currently
(void)exec_ctx;
(void)ep;
(void)pollset;
}
-static void uv_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *ep,
- grpc_pollset_set *pollset) {
+static void uv_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pollset) {
// No-op. We're ignoring pollsets currently
(void)exec_ctx;
(void)ep;
(void)pollset;
}
-static void shutdown_callback(uv_shutdown_t *req, int status) {}
+static void shutdown_callback(uv_shutdown_t* req, int status) {}
-static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_error *why) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void uv_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
if (!tcp->shutting_down) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
- const char *str = grpc_error_string(why);
+ const char* str = grpc_error_string(why);
gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->handle, str);
}
tcp->shutting_down = true;
- uv_shutdown_t *req = &tcp->shutdown_req;
- uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);
+ uv_shutdown_t* req = &tcp->shutdown_req;
+ uv_shutdown(req, (uv_stream_t*)tcp->handle, shutdown_callback);
grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
GRPC_ERROR_UNREF(why);
}
-static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+static void uv_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
- grpc_tcp *tcp = (grpc_tcp *)ep;
- uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
+ grpc_tcp* tcp = (grpc_tcp*)ep;
+ uv_close((uv_handle_t*)tcp->handle, uv_close_callback);
}
-static char *uv_get_peer(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static char* uv_get_peer(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return gpr_strdup(tcp->peer_string);
}
-static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static grpc_resource_user* uv_get_resource_user(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return tcp->resource_user;
}
-static int uv_get_fd(grpc_endpoint *ep) { return -1; }
+static int uv_get_fd(grpc_endpoint* ep) { return -1; }
static grpc_endpoint_vtable vtable = {uv_endpoint_read,
uv_endpoint_write,
@@ -360,10 +361,10 @@ static grpc_endpoint_vtable vtable = {uv_endpoint_read,
uv_get_peer,
uv_get_fd};
-grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
- grpc_resource_quota *resource_quota,
- char *peer_string) {
- grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
+grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
+ grpc_resource_quota* resource_quota,
+ char* peer_string) {
+ grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
@@ -386,7 +387,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_network_status_register_endpoint(&tcp->base);
#ifndef GRPC_UV_TCP_HOLD_LOOP
- uv_unref((uv_handle_t *)handle);
+ uv_unref((uv_handle_t*)handle);
#endif
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
index 3399535b42..8a4914935a 100644
--- a/src/core/lib/iomgr/tcp_uv.h
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -42,9 +42,9 @@ extern grpc_tracer_flag grpc_tcp_trace;
extern "C" {
#endif
-grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
- grpc_resource_quota *resource_quota,
- char *peer_string);
+grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
+ grpc_resource_quota* resource_quota,
+ char* peer_string);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index 6efcff84b8..04922b4037 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -51,7 +51,7 @@
grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp");
-static grpc_error *set_non_block(SOCKET sock) {
+static grpc_error* set_non_block(SOCKET sock) {
int status;
uint32_t param = 1;
DWORD ret;
@@ -62,18 +62,18 @@ static grpc_error *set_non_block(SOCKET sock) {
: GRPC_WSA_ERROR(WSAGetLastError(), "WSAIoctl(GRPC_FIONBIO)");
}
-static grpc_error *set_dualstack(SOCKET sock) {
+static grpc_error* set_dualstack(SOCKET sock) {
int status;
unsigned long param = 0;
- status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&param,
+ status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&param,
sizeof(param));
return status == 0
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)");
}
-grpc_error *grpc_tcp_prepare_socket(SOCKET sock) {
- grpc_error *err;
+grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
+ grpc_error* err;
err = set_non_block(sock);
if (err != GRPC_ERROR_NONE) return err;
err = set_dualstack(sock);
@@ -85,31 +85,31 @@ typedef struct grpc_tcp {
/* This is our C++ class derivation emulation. */
grpc_endpoint base;
/* The one socket this endpoint is using. */
- grpc_winsocket *socket;
+ grpc_winsocket* socket;
/* Refcounting how many operations are in progress. */
gpr_refcount refcount;
grpc_closure on_read;
grpc_closure on_write;
- grpc_closure *read_cb;
- grpc_closure *write_cb;
+ grpc_closure* read_cb;
+ grpc_closure* write_cb;
grpc_slice read_slice;
- grpc_slice_buffer *write_slices;
- grpc_slice_buffer *read_slices;
+ grpc_slice_buffer* write_slices;
+ grpc_slice_buffer* read_slices;
- grpc_resource_user *resource_user;
+ grpc_resource_user* resource_user;
/* The IO Completion Port runs from another thread. We need some mechanism
to protect ourselves when requesting a shutdown. */
gpr_mu mu;
int shutting_down;
- grpc_error *shutdown_error;
+ grpc_error* shutdown_error;
- char *peer_string;
+ char* peer_string;
} grpc_tcp;
-static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string);
@@ -122,8 +122,8 @@ static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
#define TCP_UNREF(exec_ctx, tcp, reason) \
tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
- const char *reason, const char *file, int line) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
+ const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -135,7 +135,7 @@ static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
}
}
-static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
+static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
@@ -148,28 +148,28 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
#else
#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
+static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
if (gpr_unref(&tcp->refcount)) {
tcp_free(exec_ctx, tcp);
}
}
-static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
+static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
#endif
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
- grpc_tcp *tcp = (grpc_tcp *)tcpp;
- grpc_closure *cb = tcp->read_cb;
- grpc_winsocket *socket = tcp->socket;
+static void on_read(grpc_exec_ctx* exec_ctx, void* tcpp, grpc_error* error) {
+ grpc_tcp* tcp = (grpc_tcp*)tcpp;
+ grpc_closure* cb = tcp->read_cb;
+ grpc_winsocket* socket = tcp->socket;
grpc_slice sub;
- grpc_winsocket_callback_info *info = &socket->read_info;
+ grpc_winsocket_callback_info* info = &socket->read_info;
GRPC_ERROR_REF(error);
if (error == GRPC_ERROR_NONE) {
if (info->wsa_error != 0 && !tcp->shutting_down) {
- char *utf8_message = gpr_format_message(info->wsa_error);
+ char* utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
gpr_free(utf8_message);
grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
@@ -192,11 +192,11 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
-static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *read_slices, grpc_closure *cb) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_winsocket *handle = tcp->socket;
- grpc_winsocket_callback_info *info = &handle->read_info;
+static void win_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* read_slices, grpc_closure* cb) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_winsocket* handle = tcp->socket;
+ grpc_winsocket_callback_info* info = &handle->read_info;
int status;
DWORD bytes_read = 0;
DWORD flags = 0;
@@ -218,7 +218,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
buffer.len = (ULONG)GRPC_SLICE_LENGTH(
tcp->read_slice); // we know slice size fits in 32bit.
- buffer.buf = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
+ buffer.buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slice);
TCP_REF(tcp, "read");
@@ -253,11 +253,11 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
/* Asynchronous callback from the IOCP, or the background thread. */
-static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
- grpc_tcp *tcp = (grpc_tcp *)tcpp;
- grpc_winsocket *handle = tcp->socket;
- grpc_winsocket_callback_info *info = &handle->write_info;
- grpc_closure *cb;
+static void on_write(grpc_exec_ctx* exec_ctx, void* tcpp, grpc_error* error) {
+ grpc_tcp* tcp = (grpc_tcp*)tcpp;
+ grpc_winsocket* handle = tcp->socket;
+ grpc_winsocket_callback_info* info = &handle->write_info;
+ grpc_closure* cb;
GRPC_ERROR_REF(error);
@@ -279,17 +279,17 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
}
/* Initiates a write. */
-static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_slice_buffer *slices, grpc_closure *cb) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
- grpc_winsocket *socket = tcp->socket;
- grpc_winsocket_callback_info *info = &socket->write_info;
+static void win_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
+ grpc_winsocket* socket = tcp->socket;
+ grpc_winsocket_callback_info* info = &socket->write_info;
unsigned i;
DWORD bytes_sent;
int status;
WSABUF local_buffers[16];
- WSABUF *allocated = NULL;
- WSABUF *buffers = local_buffers;
+ WSABUF* allocated = NULL;
+ WSABUF* buffers = local_buffers;
size_t len;
if (tcp->shutting_down) {
@@ -304,7 +304,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_slices = slices;
GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
- buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
+ buffers = (WSABUF*)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
allocated = buffers;
}
@@ -312,7 +312,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
len = GRPC_SLICE_LENGTH(tcp->write_slices->slices[i]);
GPR_ASSERT(len <= ULONG_MAX);
buffers[i].len = (ULONG)len;
- buffers[i].buf = (char *)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]);
+ buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
@@ -324,7 +324,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
connection that has its send queue filled up. But if we don't, then we can
avoid doing an async write operation at all. */
if (info->wsa_error != WSAEWOULDBLOCK) {
- grpc_error *error = status == 0
+ grpc_error* error = status == 0
? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(info->wsa_error, "WSASend");
GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
@@ -355,25 +355,25 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write);
}
-static void win_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset *ps) {
- grpc_tcp *tcp;
+static void win_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset* ps) {
+ grpc_tcp* tcp;
(void)ps;
- tcp = (grpc_tcp *)ep;
+ tcp = (grpc_tcp*)ep;
grpc_iocp_add_socket(tcp->socket);
}
-static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_pollset_set *pss) {
- grpc_tcp *tcp;
+static void win_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_pollset_set* pss) {
+ grpc_tcp* tcp;
(void)pss;
- tcp = (grpc_tcp *)ep;
+ tcp = (grpc_tcp*)ep;
grpc_iocp_add_socket(tcp->socket);
}
-static void win_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *ep,
- grpc_pollset_set *pss) {}
+static void win_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* ep,
+ grpc_pollset_set* pss) {}
/* Initiates a shutdown of the TCP endpoint. This will queue abort callbacks
for the potential read and write operations. It is up to the caller to
@@ -381,9 +381,9 @@ static void win_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
we're not going to protect against these. However the IO Completion Port
callback will happen from another thread, so we need to protect against
concurrent access of the data structure in that regard. */
-static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
- grpc_error *why) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static void win_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+ grpc_error* why) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
gpr_mu_lock(&tcp->mu);
/* At that point, what may happen is that we're already inside the IOCP
callback. See the comments in on_read and on_write. */
@@ -398,23 +398,23 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
}
-static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
+static void win_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep);
- grpc_tcp *tcp = (grpc_tcp *)ep;
+ grpc_tcp* tcp = (grpc_tcp*)ep;
TCP_UNREF(exec_ctx, tcp, "destroy");
}
-static char *win_get_peer(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static char* win_get_peer(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return gpr_strdup(tcp->peer_string);
}
-static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) {
- grpc_tcp *tcp = (grpc_tcp *)ep;
+static grpc_resource_user* win_get_resource_user(grpc_endpoint* ep) {
+ grpc_tcp* tcp = (grpc_tcp*)ep;
return tcp->resource_user;
}
-static int win_get_fd(grpc_endpoint *ep) { return -1; }
+static int win_get_fd(grpc_endpoint* ep) { return -1; }
static grpc_endpoint_vtable vtable = {win_read,
win_write,
@@ -427,20 +427,20 @@ static grpc_endpoint_vtable vtable = {win_read,
win_get_peer,
win_get_fd};
-grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
- grpc_channel_args *channel_args,
- const char *peer_string) {
- grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+ grpc_channel_args* channel_args,
+ const char* peer_string) {
+ grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
- (grpc_resource_quota *)channel_args->args[i].value.pointer.p);
+ (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
}
}
- grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
+ grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
tcp->socket = socket;
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index f3697f707c..aea5bab47d 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -39,11 +39,11 @@ extern "C" {
/* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle.
*/
-grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
- grpc_channel_args *channel_args,
- const char *peer_string);
+grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+ grpc_channel_args* channel_args,
+ const char* peer_string);
-grpc_error *grpc_tcp_prepare_socket(SOCKET sock);
+grpc_error* grpc_tcp_prepare_socket(SOCKET sock);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index 419e834cf1..cd8334eceb 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -44,12 +44,12 @@ typedef struct grpc_timer grpc_timer;
application code should check the error to determine how it was invoked. The
application callback is also responsible for maintaining information about
when to free up any user-level state. */
-void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- grpc_millis deadline, grpc_closure *closure);
+void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
+ grpc_millis deadline, grpc_closure* closure);
/* Initialize *timer without setting it. This can later be passed through
the regular init or cancel */
-void grpc_timer_init_unset(grpc_timer *timer);
+void grpc_timer_init_unset(grpc_timer* timer);
/* Note that there is no timer destroy function. This is because the
timer is a one-time occurrence with a guarantee that the callback will
@@ -77,7 +77,7 @@ void grpc_timer_init_unset(grpc_timer *timer);
matches this aim.
Requires: cancel() must happen after init() on a given timer */
-void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer);
+void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer);
/* iomgr internal api for dealing with timers */
@@ -94,10 +94,10 @@ typedef enum {
*next is never guaranteed to be updated on any given execution; however,
with high probability at least one thread in the system will see an update
at any time slice. */
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- grpc_millis *next);
-void grpc_timer_list_init(grpc_exec_ctx *exec_ctx);
-void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx);
+grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
+ grpc_millis* next);
+void grpc_timer_list_init(grpc_exec_ctx* exec_ctx);
+void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx);
/* Consume a kick issued by grpc_kick_poller */
void grpc_timer_consume_kick(void);
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index b8e895de6f..2333f180d4 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -81,7 +81,7 @@ static timer_shard g_shards[NUM_SHARDS];
/* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e
* the deadline of the next timer in each shard).
* Access to this is protected by g_shared_mutables.mu */
-static timer_shard *g_shard_queue[NUM_SHARDS];
+static timer_shard* g_shard_queue[NUM_SHARDS];
#ifndef NDEBUG
@@ -90,7 +90,7 @@ static timer_shard *g_shard_queue[NUM_SHARDS];
#define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */
-static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL};
+static grpc_timer* g_timer_ht[NUM_HASH_BUCKETS] = {NULL};
static void init_timer_ht() {
for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
@@ -98,11 +98,11 @@ static void init_timer_ht() {
}
}
-static bool is_in_ht(grpc_timer *t) {
+static bool is_in_ht(grpc_timer* t) {
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
gpr_mu_lock(&g_hash_mu[i]);
- grpc_timer *p = g_timer_ht[i];
+ grpc_timer* p = g_timer_ht[i];
while (p != NULL && p != t) {
p = p->hash_table_next;
}
@@ -111,18 +111,18 @@ static bool is_in_ht(grpc_timer *t) {
return (p == t);
}
-static void add_to_ht(grpc_timer *t) {
+static void add_to_ht(grpc_timer* t) {
GPR_ASSERT(!t->hash_table_next);
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
gpr_mu_lock(&g_hash_mu[i]);
- grpc_timer *p = g_timer_ht[i];
+ grpc_timer* p = g_timer_ht[i];
while (p != NULL && p != t) {
p = p->hash_table_next;
}
if (p == t) {
- grpc_closure *c = t->closure;
+ grpc_closure* c = t->closure;
gpr_log(GPR_ERROR,
"** Duplicate timer (%p) being added. Closure: (%p), created at: "
"(%s:%d), scheduled at: (%s:%d) **",
@@ -137,7 +137,7 @@ static void add_to_ht(grpc_timer *t) {
gpr_mu_unlock(&g_hash_mu[i]);
}
-static void remove_from_ht(grpc_timer *t) {
+static void remove_from_ht(grpc_timer* t) {
size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
bool removed = false;
@@ -146,7 +146,7 @@ static void remove_from_ht(grpc_timer *t) {
g_timer_ht[i] = g_timer_ht[i]->hash_table_next;
removed = true;
} else if (g_timer_ht[i] != NULL) {
- grpc_timer *p = g_timer_ht[i];
+ grpc_timer* p = g_timer_ht[i];
while (p->hash_table_next != NULL && p->hash_table_next != t) {
p = p->hash_table_next;
}
@@ -159,7 +159,7 @@ static void remove_from_ht(grpc_timer *t) {
gpr_mu_unlock(&g_hash_mu[i]);
if (!removed) {
- grpc_closure *c = t->closure;
+ grpc_closure* c = t->closure;
gpr_log(GPR_ERROR,
"** Removing timer (%p) that is not added to hash table. Closure "
"(%p), created at: (%s:%d), scheduled at: (%s:%d) **",
@@ -175,9 +175,9 @@ static void remove_from_ht(grpc_timer *t) {
* be pending. A timer is added to hash table only-if it is added to the
* timer shard.
* Therefore, if timer->pending is false, it cannot be in hash table */
-static void validate_non_pending_timer(grpc_timer *t) {
+static void validate_non_pending_timer(grpc_timer* t) {
if (!t->pending && is_in_ht(t)) {
- grpc_closure *c = t->closure;
+ grpc_closure* c = t->closure;
gpr_log(GPR_ERROR,
"** gpr_timer_cancel() called on a non-pending timer (%p) which "
"is in the hash table. Closure: (%p), created at: (%s:%d), "
@@ -227,18 +227,18 @@ static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
return a + b;
}
-static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
+static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx* exec_ctx,
gpr_atm now,
- gpr_atm *next,
- grpc_error *error);
+ gpr_atm* next,
+ grpc_error* error);
-static gpr_atm compute_min_deadline(timer_shard *shard) {
+static gpr_atm compute_min_deadline(timer_shard* shard) {
return grpc_timer_heap_is_empty(&shard->heap)
? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
}
-void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
+void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
uint32_t i;
g_shared_mutables.initialized = true;
@@ -251,7 +251,7 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
grpc_register_tracer(&grpc_timer_check_trace);
for (i = 0; i < NUM_SHARDS; i++) {
- timer_shard *shard = &g_shards[i];
+ timer_shard* shard = &g_shards[i];
gpr_mu_init(&shard->mu);
grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
0.5);
@@ -266,13 +266,13 @@ void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {
INIT_TIMER_HASH_TABLE();
}
-void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
+void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {
int i;
run_some_expired_timers(
exec_ctx, GPR_ATM_MAX, NULL,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < NUM_SHARDS; i++) {
- timer_shard *shard = &g_shards[i];
+ timer_shard* shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
grpc_timer_heap_destroy(&shard->heap);
}
@@ -282,19 +282,19 @@ void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
}
/* returns true if the first element in the list */
-static void list_join(grpc_timer *head, grpc_timer *timer) {
+static void list_join(grpc_timer* head, grpc_timer* timer) {
timer->next = head;
timer->prev = head->prev;
timer->next->prev = timer->prev->next = timer;
}
-static void list_remove(grpc_timer *timer) {
+static void list_remove(grpc_timer* timer) {
timer->next->prev = timer->prev;
timer->prev->next = timer->next;
}
static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
- timer_shard *temp;
+ timer_shard* temp;
temp = g_shard_queue[first_shard_queue_index];
g_shard_queue[first_shard_queue_index] =
g_shard_queue[first_shard_queue_index + 1];
@@ -305,7 +305,7 @@ static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
first_shard_queue_index + 1;
}
-static void note_deadline_change(timer_shard *shard) {
+static void note_deadline_change(timer_shard* shard) {
while (shard->shard_queue_index > 0 &&
shard->min_deadline <
g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
@@ -318,12 +318,12 @@ static void note_deadline_change(timer_shard *shard) {
}
}
-void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
+void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; }
-void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- grpc_millis deadline, grpc_closure *closure) {
+void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
+ grpc_millis deadline, grpc_closure* closure) {
int is_first_timer = 0;
- timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
+ timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
timer->closure = closure;
timer->deadline = deadline;
@@ -368,8 +368,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
list_join(&shard->list, timer);
}
if (GRPC_TRACER_ON(grpc_timer_trace)) {
- gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
- " => is_first_timer=%s",
+ gpr_log(GPR_DEBUG,
+ " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
+ " => is_first_timer=%s",
(int)(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
}
@@ -410,13 +411,13 @@ void grpc_timer_consume_kick(void) {
gpr_tls_set(&g_last_seen_min_timer, 0);
}
-void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
+void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
if (!g_shared_mutables.initialized) {
/* must have already been cancelled, also the shard mutex is invalid */
return;
}
- timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
+ timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
gpr_mu_lock(&shard->mu);
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
@@ -444,7 +445,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
'queue_deadline_cap') into into shard->heap.
Returns 'true' if shard->heap has atleast ONE element
REQUIRES: shard->mu locked */
-static int refill_heap(timer_shard *shard, gpr_atm now) {
+static int refill_heap(timer_shard* shard, gpr_atm now) {
/* Compute the new queue window width and bound by the limits: */
double computed_deadline_delta =
grpc_time_averaged_stats_update_average(&shard->stats) *
@@ -481,8 +482,8 @@ static int refill_heap(timer_shard *shard, gpr_atm now) {
/* This pops the next non-cancelled timer with deadline <= now from the
queue, or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
-static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
- grpc_timer *timer;
+static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
+ grpc_timer* timer;
for (;;) {
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
@@ -512,11 +513,11 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
}
/* REQUIRES: shard->mu unlocked */
-static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
- gpr_atm now, gpr_atm *new_min_deadline,
- grpc_error *error) {
+static size_t pop_timers(grpc_exec_ctx* exec_ctx, timer_shard* shard,
+ gpr_atm now, gpr_atm* new_min_deadline,
+ grpc_error* error) {
size_t n = 0;
- grpc_timer *timer;
+ grpc_timer* timer;
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
REMOVE_FROM_HASH_TABLE(timer);
@@ -532,10 +533,10 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
return n;
}
-static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
+static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx* exec_ctx,
gpr_atm now,
- gpr_atm *next,
- grpc_error *error) {
+ gpr_atm* next,
+ grpc_error* error) {
grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
@@ -600,8 +601,8 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
return result;
}
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- grpc_millis *next) {
+grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
+ grpc_millis* next) {
// prelude
grpc_millis now = grpc_exec_ctx_now(exec_ctx);
@@ -620,21 +621,22 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
return GRPC_TIMERS_CHECKED_AND_EMPTY;
}
- grpc_error *shutdown_error =
+ grpc_error* shutdown_error =
now != GRPC_MILLIS_INF_FUTURE
? GRPC_ERROR_NONE
: GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
// tracing
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
- char *next_str;
+ char* next_str;
if (next == NULL) {
next_str = gpr_strdup("NULL");
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
- gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRIdPTR
- " next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
+ gpr_log(GPR_DEBUG,
+ "TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
+ " glob_min=%" PRIdPTR,
now, next_str, gpr_tls_get(&g_last_seen_min_timer),
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
gpr_free(next_str);
@@ -644,7 +646,7 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
run_some_expired_timers(exec_ctx, now, next, shutdown_error);
// tracing
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
- char *next_str;
+ char* next_str;
if (next == NULL) {
next_str = gpr_strdup("NULL");
} else {
diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h
index f0597f6ea0..190381e904 100644
--- a/src/core/lib/iomgr/timer_generic.h
+++ b/src/core/lib/iomgr/timer_generic.h
@@ -26,11 +26,11 @@ struct grpc_timer {
gpr_atm deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
bool pending;
- struct grpc_timer *next;
- struct grpc_timer *prev;
- grpc_closure *closure;
+ struct grpc_timer* next;
+ struct grpc_timer* prev;
+ grpc_closure* closure;
#ifndef NDEBUG
- struct grpc_timer *hash_table_next;
+ struct grpc_timer* hash_table_next;
#endif
};
diff --git a/src/core/lib/iomgr/timer_heap.cc b/src/core/lib/iomgr/timer_heap.cc
index 2648d5da5d..b350452c63 100644
--- a/src/core/lib/iomgr/timer_heap.cc
+++ b/src/core/lib/iomgr/timer_heap.cc
@@ -32,7 +32,7 @@
position. This functor is called each time immediately after modifying a
value in the underlying container, with the offset of the modified element as
its argument. */
-static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) {
+static void adjust_upwards(grpc_timer** first, uint32_t i, grpc_timer* t) {
while (i > 0) {
uint32_t parent = (uint32_t)(((int)i - 1) / 2);
if (first[parent]->deadline <= t->deadline) break;
@@ -47,17 +47,16 @@ static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) {
/* Adjusts a heap so as to move a hole at position i farther away from the root,
until a suitable position is found for element t. Then, copies t into that
position. */
-static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
- grpc_timer *t) {
+static void adjust_downwards(grpc_timer** first, uint32_t i, uint32_t length,
+ grpc_timer* t) {
for (;;) {
uint32_t left_child = 1u + 2u * i;
if (left_child >= length) break;
uint32_t right_child = left_child + 1;
- uint32_t next_i =
- right_child < length &&
- first[left_child]->deadline > first[right_child]->deadline
- ? right_child
- : left_child;
+ uint32_t next_i = right_child < length && first[left_child]->deadline >
+ first[right_child]->deadline
+ ? right_child
+ : left_child;
if (t->deadline <= first[next_i]->deadline) break;
first[i] = first[next_i];
first[i]->heap_index = i;
@@ -70,16 +69,16 @@ static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
#define SHRINK_MIN_ELEMS 8
#define SHRINK_FULLNESS_FACTOR 2
-static void maybe_shrink(grpc_timer_heap *heap) {
+static void maybe_shrink(grpc_timer_heap* heap) {
if (heap->timer_count >= 8 &&
heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
- heap->timers = (grpc_timer **)gpr_realloc(
- heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer**)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer*));
}
}
-static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) {
+static void note_changed_priority(grpc_timer_heap* heap, grpc_timer* timer) {
uint32_t i = timer->heap_index;
uint32_t parent = (uint32_t)(((int)i - 1) / 2);
if (heap->timers[parent]->deadline > timer->deadline) {
@@ -89,18 +88,18 @@ static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) {
}
}
-void grpc_timer_heap_init(grpc_timer_heap *heap) {
+void grpc_timer_heap_init(grpc_timer_heap* heap) {
memset(heap, 0, sizeof(*heap));
}
-void grpc_timer_heap_destroy(grpc_timer_heap *heap) { gpr_free(heap->timers); }
+void grpc_timer_heap_destroy(grpc_timer_heap* heap) { gpr_free(heap->timers); }
-int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
+int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer) {
if (heap->timer_count == heap->timer_capacity) {
heap->timer_capacity =
GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
- heap->timers = (grpc_timer **)gpr_realloc(
- heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
+ heap->timers = (grpc_timer**)gpr_realloc(
+ heap->timers, heap->timer_capacity * sizeof(grpc_timer*));
}
timer->heap_index = heap->timer_count;
adjust_upwards(heap->timers, heap->timer_count, timer);
@@ -108,7 +107,7 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
return timer->heap_index == 0;
}
-void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer) {
+void grpc_timer_heap_remove(grpc_timer_heap* heap, grpc_timer* timer) {
uint32_t i = timer->heap_index;
if (i == heap->timer_count - 1) {
heap->timer_count--;
@@ -122,15 +121,15 @@ void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer) {
note_changed_priority(heap, heap->timers[i]);
}
-int grpc_timer_heap_is_empty(grpc_timer_heap *heap) {
+int grpc_timer_heap_is_empty(grpc_timer_heap* heap) {
return heap->timer_count == 0;
}
-grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap) {
+grpc_timer* grpc_timer_heap_top(grpc_timer_heap* heap) {
return heap->timers[0];
}
-void grpc_timer_heap_pop(grpc_timer_heap *heap) {
+void grpc_timer_heap_pop(grpc_timer_heap* heap) {
grpc_timer_heap_remove(heap, grpc_timer_heap_top(heap));
}
diff --git a/src/core/lib/iomgr/timer_heap.h b/src/core/lib/iomgr/timer_heap.h
index 228d038ab3..ae56e5a73e 100644
--- a/src/core/lib/iomgr/timer_heap.h
+++ b/src/core/lib/iomgr/timer_heap.h
@@ -26,22 +26,22 @@ extern "C" {
#endif
typedef struct {
- grpc_timer **timers;
+ grpc_timer** timers;
uint32_t timer_count;
uint32_t timer_capacity;
} grpc_timer_heap;
/* return 1 if the new timer is the first timer in the heap */
-int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer);
+int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer);
-void grpc_timer_heap_init(grpc_timer_heap *heap);
-void grpc_timer_heap_destroy(grpc_timer_heap *heap);
+void grpc_timer_heap_init(grpc_timer_heap* heap);
+void grpc_timer_heap_destroy(grpc_timer_heap* heap);
-void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer);
-grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap);
-void grpc_timer_heap_pop(grpc_timer_heap *heap);
+void grpc_timer_heap_remove(grpc_timer_heap* heap, grpc_timer* timer);
+grpc_timer* grpc_timer_heap_top(grpc_timer_heap* heap);
+void grpc_timer_heap_pop(grpc_timer_heap* heap);
-int grpc_timer_heap_is_empty(grpc_timer_heap *heap);
+int grpc_timer_heap_is_empty(grpc_timer_heap* heap);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 1248f82189..16e9e7e707 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -30,7 +30,7 @@
typedef struct completed_thread {
gpr_thd_id t;
- struct completed_thread *next;
+ struct completed_thread* next;
} completed_thread;
extern "C" grpc_tracer_flag grpc_timer_check_trace;
@@ -48,7 +48,7 @@ static int g_thread_count;
// number of threads sitting around waiting
static int g_waiter_count;
// linked list of threads that have completed (and need joining)
-static completed_thread *g_completed_threads;
+static completed_thread* g_completed_threads;
// was the manager kicked by the timer system
static bool g_kicked;
// is there a thread waiting until the next timer should fire?
@@ -59,16 +59,16 @@ static grpc_millis g_timed_waiter_deadline;
// generation counter to track which thread is waiting for the next timer
static uint64_t g_timed_waiter_generation;
-static void timer_thread(void *completed_thread_ptr);
+static void timer_thread(void* completed_thread_ptr);
static void gc_completed_threads(void) {
if (g_completed_threads != NULL) {
- completed_thread *to_gc = g_completed_threads;
+ completed_thread* to_gc = g_completed_threads;
g_completed_threads = NULL;
gpr_mu_unlock(&g_mu);
while (to_gc != NULL) {
gpr_thd_join(to_gc->t);
- completed_thread *next = to_gc->next;
+ completed_thread* next = to_gc->next;
gpr_free(to_gc);
to_gc = next;
}
@@ -86,7 +86,7 @@ static void start_timer_thread_and_unlock(void) {
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
- completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct));
+ completed_thread* ct = (completed_thread*)gpr_malloc(sizeof(*ct));
// The call to gpr_thd_new() has to be under the same lock used by
// gc_completed_threads(), particularly due to ct->t, which is written here
// (internally by gpr_thd_new) and read there. Otherwise it's possible for ct
@@ -104,7 +104,7 @@ void grpc_timer_manager_tick() {
grpc_exec_ctx_finish(&exec_ctx);
}
-static void run_some_timers(grpc_exec_ctx *exec_ctx) {
+static void run_some_timers(grpc_exec_ctx* exec_ctx) {
// if there's something to execute...
gpr_mu_lock(&g_mu);
// remove a waiter from the pool, and start another thread if necessary
@@ -138,7 +138,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) {
// wait until 'next' (or forever if there is already a timed waiter in the pool)
// returns true if the thread should continue executing (false if it should
// shutdown)
-static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) {
+static bool wait_until(grpc_exec_ctx* exec_ctx, grpc_millis next) {
gpr_mu_lock(&g_mu);
// if we're not threaded anymore, leave
if (!g_threaded) {
@@ -221,7 +221,7 @@ static bool wait_until(grpc_exec_ctx *exec_ctx, grpc_millis next) {
return true;
}
-static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
+static void timer_main_loop(grpc_exec_ctx* exec_ctx) {
for (;;) {
grpc_millis next = GRPC_MILLIS_INF_FUTURE;
grpc_exec_ctx_invalidate_now(exec_ctx);
@@ -254,7 +254,7 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) {
}
}
-static void timer_thread_cleanup(completed_thread *ct) {
+static void timer_thread_cleanup(completed_thread* ct) {
gpr_mu_lock(&g_mu);
// terminate the thread: drop the waiter count, thread count, and let whomever
// stopped the threading stuff know that we're done
@@ -271,14 +271,14 @@ static void timer_thread_cleanup(completed_thread *ct) {
}
}
-static void timer_thread(void *completed_thread_ptr) {
+static void timer_thread(void* completed_thread_ptr) {
// this threads exec_ctx: we try to run things through to completion here
// since it's easy to spin up new threads
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
timer_main_loop(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
- timer_thread_cleanup((completed_thread *)completed_thread_ptr);
+ timer_thread_cleanup((completed_thread*)completed_thread_ptr);
}
static void start_threads(void) {
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index ccbbe357ae..df40e54ae6 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -35,16 +35,16 @@ grpc_tracer_flag grpc_timer_check_trace =
GRPC_TRACER_INITIALIZER(false, "timer_check");
}
-static void timer_close_callback(uv_handle_t *handle) { gpr_free(handle); }
+static void timer_close_callback(uv_handle_t* handle) { gpr_free(handle); }
-static void stop_uv_timer(uv_timer_t *handle) {
+static void stop_uv_timer(uv_timer_t* handle) {
uv_timer_stop(handle);
- uv_unref((uv_handle_t *)handle);
- uv_close((uv_handle_t *)handle, timer_close_callback);
+ uv_unref((uv_handle_t*)handle);
+ uv_close((uv_handle_t*)handle, timer_close_callback);
}
-void run_expired_timer(uv_timer_t *handle) {
- grpc_timer *timer = (grpc_timer *)handle->data;
+void run_expired_timer(uv_timer_t* handle) {
+ grpc_timer* timer = (grpc_timer*)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_UV_ASSERT_SAME_THREAD();
GPR_ASSERT(timer->pending);
@@ -54,10 +54,10 @@ void run_expired_timer(uv_timer_t *handle) {
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
- grpc_millis deadline, grpc_closure *closure) {
+void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
+ grpc_millis deadline, grpc_closure* closure) {
uint64_t timeout;
- uv_timer_t *uv_timer;
+ uv_timer_t* uv_timer;
GRPC_UV_ASSERT_SAME_THREAD();
timer->closure = closure;
if (deadline <= grpc_exec_ctx_now(exec_ctx)) {
@@ -67,7 +67,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
timer->pending = 1;
timeout = (uint64_t)(deadline - grpc_exec_ctx_now(exec_ctx));
- uv_timer = (uv_timer_t *)gpr_malloc(sizeof(uv_timer_t));
+ uv_timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
uv_timer_init(uv_default_loop(), uv_timer);
uv_timer->data = timer;
timer->uv_timer = uv_timer;
@@ -75,27 +75,27 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
/* We assume that gRPC timers are only used alongside other active gRPC
objects, and that there will therefore always be something else keeping
the uv loop alive whenever there is a timer */
- uv_unref((uv_handle_t *)uv_timer);
+ uv_unref((uv_handle_t*)uv_timer);
}
-void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; }
+void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = 0; }
-void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
+void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
GRPC_UV_ASSERT_SAME_THREAD();
if (timer->pending) {
timer->pending = 0;
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
- stop_uv_timer((uv_timer_t *)timer->uv_timer);
+ stop_uv_timer((uv_timer_t*)timer->uv_timer);
}
}
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
- grpc_millis *next) {
+grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
+ grpc_millis* next) {
return GRPC_TIMERS_NOT_CHECKED;
}
-void grpc_timer_list_init(grpc_exec_ctx *exec_ctx) {}
-void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {}
+void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {}
+void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {}
void grpc_timer_consume_kick(void) {}
diff --git a/src/core/lib/iomgr/timer_uv.h b/src/core/lib/iomgr/timer_uv.h
index 8a4c17c844..214aaa600a 100644
--- a/src/core/lib/iomgr/timer_uv.h
+++ b/src/core/lib/iomgr/timer_uv.h
@@ -22,10 +22,10 @@
#include "src/core/lib/iomgr/exec_ctx.h"
struct grpc_timer {
- grpc_closure *closure;
+ grpc_closure* closure;
/* This is actually a uv_timer_t*, but we want to keep platform-specific
types out of headers */
- void *uv_timer;
+ void* uv_timer;
int pending;
};
diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc
index 00b2e68bb5..8ce8b961ff 100644
--- a/src/core/lib/iomgr/udp_server.cc
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -59,8 +59,8 @@
typedef struct grpc_udp_listener grpc_udp_listener;
struct grpc_udp_listener {
int fd;
- grpc_fd *emfd;
- grpc_udp_server *server;
+ grpc_fd* emfd;
+ grpc_udp_server* server;
grpc_resolved_address addr;
grpc_closure read_closure;
grpc_closure write_closure;
@@ -74,12 +74,12 @@ struct grpc_udp_listener {
// True if orphan_cb is trigered.
bool orphan_notified;
- struct grpc_udp_listener *next;
+ struct grpc_udp_listener* next;
};
struct shutdown_fd_args {
- grpc_fd *fd;
- gpr_mu *server_mu;
+ grpc_fd* fd;
+ gpr_mu* server_mu;
};
/* the overall server */
@@ -87,7 +87,7 @@ struct grpc_udp_server {
gpr_mu mu;
/* factory to use for creating and binding sockets, or NULL */
- grpc_socket_factory *socket_factory;
+ grpc_socket_factory* socket_factory;
/* active port count: how many ports are actually still listening */
size_t active_ports;
@@ -98,34 +98,34 @@ struct grpc_udp_server {
int shutdown;
/* linked list of server ports */
- grpc_udp_listener *head;
- grpc_udp_listener *tail;
+ grpc_udp_listener* head;
+ grpc_udp_listener* tail;
unsigned nports;
/* shutdown callback */
- grpc_closure *shutdown_complete;
+ grpc_closure* shutdown_complete;
/* all pollsets interested in new connections */
- grpc_pollset **pollsets;
+ grpc_pollset** pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
/* opaque object to pass to callbacks */
- void *user_data;
+ void* user_data;
};
-static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
+static grpc_socket_factory* get_socket_factory(const grpc_channel_args* args) {
if (args) {
- const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
+ const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
if (arg) {
GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
- return (grpc_socket_factory *)arg->value.pointer.p;
+ return (grpc_socket_factory*)arg->value.pointer.p;
}
}
return NULL;
}
-grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
- grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server));
+grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args) {
+ grpc_udp_server* s = (grpc_udp_server*)gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
s->socket_factory = get_socket_factory(args);
if (s->socket_factory) {
@@ -141,20 +141,20 @@ grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
return s;
}
-static void shutdown_fd(grpc_exec_ctx *exec_ctx, void *args,
- grpc_error *error) {
- struct shutdown_fd_args *shutdown_args = (struct shutdown_fd_args *)args;
+static void shutdown_fd(grpc_exec_ctx* exec_ctx, void* args,
+ grpc_error* error) {
+ struct shutdown_fd_args* shutdown_args = (struct shutdown_fd_args*)args;
gpr_mu_lock(shutdown_args->server_mu);
grpc_fd_shutdown(exec_ctx, shutdown_args->fd, GRPC_ERROR_REF(error));
gpr_mu_unlock(shutdown_args->server_mu);
gpr_free(shutdown_args);
}
-static void dummy_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+static void dummy_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
// No-op.
}
-static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
+static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_udp_server* s) {
if (s->shutdown_complete != NULL) {
GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
@@ -162,7 +162,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
gpr_mu_destroy(&s->mu);
while (s->head) {
- grpc_udp_listener *sp = s->head;
+ grpc_udp_listener* sp = s->head;
s->head = sp->next;
gpr_free(sp);
}
@@ -174,9 +174,9 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
gpr_free(s);
}
-static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
- grpc_error *error) {
- grpc_udp_server *s = (grpc_udp_server *)server;
+static void destroyed_port(grpc_exec_ctx* exec_ctx, void* server,
+ grpc_error* error) {
+ grpc_udp_server* s = (grpc_udp_server*)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@@ -190,14 +190,14 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
-static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
+static void deactivated_all_ports(grpc_exec_ctx* exec_ctx, grpc_udp_server* s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
GPR_ASSERT(s->shutdown);
if (s->head) {
- grpc_udp_listener *sp;
+ grpc_udp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_unlink_if_unix_domain_socket(&sp->addr);
@@ -223,9 +223,9 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
}
}
-void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
- grpc_closure *on_done) {
- grpc_udp_listener *sp;
+void grpc_udp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_udp_server* s,
+ grpc_closure* on_done) {
+ grpc_udp_listener* sp;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
@@ -237,8 +237,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
- struct shutdown_fd_args *args =
- (struct shutdown_fd_args *)gpr_malloc(sizeof(*args));
+ struct shutdown_fd_args* args =
+ (struct shutdown_fd_args*)gpr_malloc(sizeof(*args));
args->fd = sp->emfd;
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@@ -254,19 +254,18 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
}
}
-static int bind_socket(grpc_socket_factory *socket_factory, int sockfd,
- const grpc_resolved_address *addr) {
+static int bind_socket(grpc_socket_factory* socket_factory, int sockfd,
+ const grpc_resolved_address* addr) {
return (socket_factory != NULL)
? grpc_socket_factory_bind(socket_factory, sockfd, addr)
- : bind(sockfd, (struct sockaddr *)addr->addr,
- (socklen_t)addr->len);
+ : bind(sockfd, (struct sockaddr*)addr->addr, (socklen_t)addr->len);
}
/* Prepare a recently-created socket for listening. */
-static int prepare_socket(grpc_socket_factory *socket_factory, int fd,
- const grpc_resolved_address *addr) {
+static int prepare_socket(grpc_socket_factory* socket_factory, int fd,
+ const grpc_resolved_address* addr) {
grpc_resolved_address sockname_temp;
- struct sockaddr *addr_ptr = (struct sockaddr *)addr->addr;
+ struct sockaddr* addr_ptr = (struct sockaddr*)addr->addr;
/* Set send/receive socket buffers to 1 MB */
int buffer_size_bytes = 1024 * 1024;
@@ -295,7 +294,7 @@ static int prepare_socket(grpc_socket_factory *socket_factory, int fd,
GPR_ASSERT(addr->len < ~(socklen_t)0);
if (bind_socket(socket_factory, fd, addr) < 0) {
- char *addr_str;
+ char* addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
gpr_free(addr_str);
@@ -304,8 +303,8 @@ static int prepare_socket(grpc_socket_factory *socket_factory, int fd,
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
- (socklen_t *)&sockname_temp.len) < 0) {
+ if (getsockname(fd, (struct sockaddr*)sockname_temp.addr,
+ (socklen_t*)&sockname_temp.len) < 0) {
goto error;
}
@@ -331,8 +330,8 @@ error:
}
/* event manager callback when reads are ready */
-static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = (grpc_udp_listener *)arg;
+static void on_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_udp_listener* sp = (grpc_udp_listener*)arg;
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@@ -354,8 +353,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_unlock(&sp->server->mu);
}
-static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_udp_listener *sp = (grpc_udp_listener *)arg;
+static void on_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_udp_listener* sp = (grpc_udp_listener*)arg;
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
@@ -377,15 +376,15 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_unlock(&sp->server->mu);
}
-static int add_socket_to_server(grpc_udp_server *s, int fd,
- const grpc_resolved_address *addr,
+static int add_socket_to_server(grpc_udp_server* s, int fd,
+ const grpc_resolved_address* addr,
grpc_udp_server_read_cb read_cb,
grpc_udp_server_write_cb write_cb,
grpc_udp_server_orphan_cb orphan_cb) {
- grpc_udp_listener *sp;
+ grpc_udp_listener* sp;
int port;
- char *addr_str;
- char *name;
+ char* addr_str;
+ char* name;
port = prepare_socket(s->socket_factory, fd, addr);
if (port >= 0) {
@@ -394,7 +393,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
- sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener));
+ sp = (grpc_udp_listener*)gpr_malloc(sizeof(grpc_udp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@@ -418,12 +417,12 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
return port;
}
-int grpc_udp_server_add_port(grpc_udp_server *s,
- const grpc_resolved_address *addr,
+int grpc_udp_server_add_port(grpc_udp_server* s,
+ const grpc_resolved_address* addr,
grpc_udp_server_read_cb read_cb,
grpc_udp_server_write_cb write_cb,
grpc_udp_server_orphan_cb orphan_cb) {
- grpc_udp_listener *sp;
+ grpc_udp_listener* sp;
int allocated_port1 = -1;
int allocated_port2 = -1;
int fd;
@@ -432,7 +431,7 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
grpc_resolved_address wild4;
grpc_resolved_address wild6;
grpc_resolved_address addr4_copy;
- grpc_resolved_address *allocated_addr = NULL;
+ grpc_resolved_address* allocated_addr = NULL;
grpc_resolved_address sockname_temp;
int port;
@@ -441,12 +440,12 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
if (grpc_sockaddr_get_port(addr) == 0) {
for (sp = s->head; sp; sp = sp->next) {
sockname_temp.len = sizeof(struct sockaddr_storage);
- if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr,
- (socklen_t *)&sockname_temp.len)) {
+ if (0 == getsockname(sp->fd, (struct sockaddr*)sockname_temp.addr,
+ (socklen_t*)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
- allocated_addr = (grpc_resolved_address *)gpr_malloc(
- sizeof(grpc_resolved_address));
+ allocated_addr =
+ (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
@@ -500,8 +499,8 @@ done:
return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
}
-int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
- grpc_udp_listener *sp;
+int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index) {
+ grpc_udp_listener* sp;
if (port_index >= s->nports) {
return -1;
}
@@ -512,12 +511,12 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
return sp->fd;
}
-void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
- grpc_pollset **pollsets, size_t pollset_count,
- void *user_data) {
+void grpc_udp_server_start(grpc_exec_ctx* exec_ctx, grpc_udp_server* s,
+ grpc_pollset** pollsets, size_t pollset_count,
+ void* user_data) {
size_t i;
gpr_mu_lock(&s->mu);
- grpc_udp_listener *sp;
+ grpc_udp_listener* sp;
GPR_ASSERT(s->active_ports == 0);
s->pollsets = pollsets;
s->user_data = user_data;
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index e887cb1bcf..bca0f049fb 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -35,28 +35,28 @@ struct grpc_server;
typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
-typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
- void *user_data);
+typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
+ void* user_data);
/* Called when the socket is writeable. */
-typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
- void *user_data);
+typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
+ void* user_data);
/* Called when the grpc_fd is about to be orphaned (and the FD closed). */
-typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx,
- grpc_fd *emfd,
- grpc_closure *shutdown_fd_callback,
- void *user_data);
+typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx* exec_ctx,
+ grpc_fd* emfd,
+ grpc_closure* shutdown_fd_callback,
+ void* user_data);
/* Create a server, initially not bound to any ports */
-grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args);
+grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args);
/* Start listening to bound ports. user_data is passed to callbacks. */
-void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
- grpc_pollset **pollsets, size_t pollset_count,
- void *user_data);
+void grpc_udp_server_start(grpc_exec_ctx* exec_ctx, grpc_udp_server* udp_server,
+ grpc_pollset** pollsets, size_t pollset_count,
+ void* user_data);
-int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);
+int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index);
/* Add a port to the server, returning port number on success, or negative
on failure.
@@ -68,14 +68,14 @@ int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index);
/* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
-int grpc_udp_server_add_port(grpc_udp_server *s,
- const grpc_resolved_address *addr,
+int grpc_udp_server_add_port(grpc_udp_server* s,
+ const grpc_resolved_address* addr,
grpc_udp_server_read_cb read_cb,
grpc_udp_server_write_cb write_cb,
grpc_udp_server_orphan_cb orphan_cb);
-void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server,
- grpc_closure *on_done);
+void grpc_udp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_udp_server* server,
+ grpc_closure* on_done);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/unix_sockets_posix.cc b/src/core/lib/iomgr/unix_sockets_posix.cc
index 35f898f13a..5d3689c38f 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.cc
+++ b/src/core/lib/iomgr/unix_sockets_posix.cc
@@ -36,12 +36,12 @@ void grpc_create_socketpair_if_unix(int sv[2]) {
GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
}
-grpc_error *grpc_resolve_unix_domain_address(const char *name,
- grpc_resolved_addresses **addrs) {
- struct sockaddr_un *un;
- if (strlen(name) > GPR_ARRAY_SIZE(((struct sockaddr_un *)0)->sun_path) - 1) {
- char *err_msg;
- grpc_error *err;
+grpc_error* grpc_resolve_unix_domain_address(const char* name,
+ grpc_resolved_addresses** addrs) {
+ struct sockaddr_un* un;
+ if (strlen(name) > GPR_ARRAY_SIZE(((struct sockaddr_un*)0)->sun_path) - 1) {
+ char* err_msg;
+ grpc_error* err;
gpr_asprintf(&err_msg,
"Path name should not have more than %" PRIuPTR " characters.",
GPR_ARRAY_SIZE(un->sun_path) - 1);
@@ -50,29 +50,29 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
return err;
}
*addrs =
- (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
+ (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addrs)->naddrs = 1;
(*addrs)->addrs =
- (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
- un = (struct sockaddr_un *)(*addrs)->addrs->addr;
+ (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
+ un = (struct sockaddr_un*)(*addrs)->addrs->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, name);
(*addrs)->addrs->len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
return GRPC_ERROR_NONE;
}
-int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+int grpc_is_unix_socket(const grpc_resolved_address* resolved_addr) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
return addr->sa_family == AF_UNIX;
}
void grpc_unlink_if_unix_domain_socket(
- const grpc_resolved_address *resolved_addr) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+ const grpc_resolved_address* resolved_addr) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
if (addr->sa_family != AF_UNIX) {
return;
}
- struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr;
+ struct sockaddr_un* un = (struct sockaddr_un*)resolved_addr->addr;
struct stat st;
if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) {
@@ -80,15 +80,15 @@ void grpc_unlink_if_unix_domain_socket(
}
}
-char *grpc_sockaddr_to_uri_unix_if_possible(
- const grpc_resolved_address *resolved_addr) {
- const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
+char* grpc_sockaddr_to_uri_unix_if_possible(
+ const grpc_resolved_address* resolved_addr) {
+ const struct sockaddr* addr = (const struct sockaddr*)resolved_addr->addr;
if (addr->sa_family != AF_UNIX) {
return NULL;
}
- char *result;
- gpr_asprintf(&result, "unix:%s", ((struct sockaddr_un *)addr)->sun_path);
+ char* result;
+ gpr_asprintf(&result, "unix:%s", ((struct sockaddr_un*)addr)->sun_path);
return result;
}
diff --git a/src/core/lib/iomgr/unix_sockets_posix.h b/src/core/lib/iomgr/unix_sockets_posix.h
index 3e7f9c7d1e..be3c33d9c2 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.h
+++ b/src/core/lib/iomgr/unix_sockets_posix.h
@@ -31,16 +31,16 @@ extern "C" {
void grpc_create_socketpair_if_unix(int sv[2]);
-grpc_error *grpc_resolve_unix_domain_address(
- const char *name, grpc_resolved_addresses **addresses);
+grpc_error* grpc_resolve_unix_domain_address(
+ const char* name, grpc_resolved_addresses** addresses);
-int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr);
+int grpc_is_unix_socket(const grpc_resolved_address* resolved_addr);
void grpc_unlink_if_unix_domain_socket(
- const grpc_resolved_address *resolved_addr);
+ const grpc_resolved_address* resolved_addr);
-char *grpc_sockaddr_to_uri_unix_if_possible(
- const grpc_resolved_address *resolved_addr);
+char* grpc_sockaddr_to_uri_unix_if_possible(
+ const grpc_resolved_address* resolved_addr);
#ifdef __cplusplus
}
diff --git a/src/core/lib/iomgr/unix_sockets_posix_noop.cc b/src/core/lib/iomgr/unix_sockets_posix_noop.cc
index e46b1c003d..fbd9602e1b 100644
--- a/src/core/lib/iomgr/unix_sockets_posix_noop.cc
+++ b/src/core/lib/iomgr/unix_sockets_posix_noop.cc
@@ -29,18 +29,18 @@ void grpc_create_socketpair_if_unix(int sv[2]) {
GPR_ASSERT(0);
}
-grpc_error *grpc_resolve_unix_domain_address(
- const char *name, grpc_resolved_addresses **addresses) {
+grpc_error* grpc_resolve_unix_domain_address(
+ const char* name, grpc_resolved_addresses** addresses) {
*addresses = NULL;
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Unix domain sockets are not supported on Windows");
}
-int grpc_is_unix_socket(const grpc_resolved_address *addr) { return false; }
+int grpc_is_unix_socket(const grpc_resolved_address* addr) { return false; }
-void grpc_unlink_if_unix_domain_socket(const grpc_resolved_address *addr) {}
+void grpc_unlink_if_unix_domain_socket(const grpc_resolved_address* addr) {}
-char *grpc_sockaddr_to_uri_unix_if_possible(const grpc_resolved_address *addr) {
+char* grpc_sockaddr_to_uri_unix_if_possible(const grpc_resolved_address* addr) {
return NULL;
}
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.cc b/src/core/lib/iomgr/wakeup_fd_posix.cc
index 9af96d314b..dcad61b097 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.cc
+++ b/src/core/lib/iomgr/wakeup_fd_posix.cc
@@ -25,7 +25,7 @@
#include "src/core/lib/iomgr/wakeup_fd_pipe.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
-static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
+static const grpc_wakeup_fd_vtable* wakeup_fd_vtable = NULL;
int grpc_allow_specialized_wakeup_fd = 1;
int grpc_allow_pipe_wakeup_fd = 1;
@@ -53,28 +53,28 @@ int grpc_cv_wakeup_fds_enabled(void) { return cv_wakeup_fds_enabled; }
void grpc_enable_cv_wakeup_fds(int enable) { cv_wakeup_fds_enabled = enable; }
-grpc_error *grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
+grpc_error* grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.init(fd_info);
}
return wakeup_fd_vtable->init(fd_info);
}
-grpc_error *grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) {
+grpc_error* grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.consume(fd_info);
}
return wakeup_fd_vtable->consume(fd_info);
}
-grpc_error *grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) {
+grpc_error* grpc_wakeup_fd_wakeup(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
return grpc_cv_wakeup_fd_vtable.wakeup(fd_info);
}
return wakeup_fd_vtable->wakeup(fd_info);
}
-void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) {
+void grpc_wakeup_fd_destroy(grpc_wakeup_fd* fd_info) {
if (cv_wakeup_fds_enabled) {
grpc_cv_wakeup_fd_vtable.destroy(fd_info);
} else {
diff --git a/src/core/lib/json/json_reader.cc b/src/core/lib/json/json_reader.cc
index 094a35176c..30039419b1 100644
--- a/src/core/lib/json/json_reader.cc
+++ b/src/core/lib/json/json_reader.cc
@@ -24,60 +24,60 @@
#include "src/core/lib/json/json_reader.h"
-static void json_reader_string_clear(grpc_json_reader *reader) {
+static void json_reader_string_clear(grpc_json_reader* reader) {
reader->vtable->string_clear(reader->userdata);
}
-static void json_reader_string_add_char(grpc_json_reader *reader, uint32_t c) {
+static void json_reader_string_add_char(grpc_json_reader* reader, uint32_t c) {
reader->vtable->string_add_char(reader->userdata, c);
}
-static void json_reader_string_add_utf32(grpc_json_reader *reader,
+static void json_reader_string_add_utf32(grpc_json_reader* reader,
uint32_t utf32) {
reader->vtable->string_add_utf32(reader->userdata, utf32);
}
-static uint32_t grpc_json_reader_read_char(grpc_json_reader *reader) {
+static uint32_t grpc_json_reader_read_char(grpc_json_reader* reader) {
return reader->vtable->read_char(reader->userdata);
}
-static void json_reader_container_begins(grpc_json_reader *reader,
+static void json_reader_container_begins(grpc_json_reader* reader,
grpc_json_type type) {
reader->vtable->container_begins(reader->userdata, type);
}
static grpc_json_type grpc_json_reader_container_ends(
- grpc_json_reader *reader) {
+ grpc_json_reader* reader) {
return reader->vtable->container_ends(reader->userdata);
}
-static void json_reader_set_key(grpc_json_reader *reader) {
+static void json_reader_set_key(grpc_json_reader* reader) {
reader->vtable->set_key(reader->userdata);
}
-static void json_reader_set_string(grpc_json_reader *reader) {
+static void json_reader_set_string(grpc_json_reader* reader) {
reader->vtable->set_string(reader->userdata);
}
-static int json_reader_set_number(grpc_json_reader *reader) {
+static int json_reader_set_number(grpc_json_reader* reader) {
return reader->vtable->set_number(reader->userdata);
}
-static void json_reader_set_true(grpc_json_reader *reader) {
+static void json_reader_set_true(grpc_json_reader* reader) {
reader->vtable->set_true(reader->userdata);
}
-static void json_reader_set_false(grpc_json_reader *reader) {
+static void json_reader_set_false(grpc_json_reader* reader) {
reader->vtable->set_false(reader->userdata);
}
-static void json_reader_set_null(grpc_json_reader *reader) {
+static void json_reader_set_null(grpc_json_reader* reader) {
reader->vtable->set_null(reader->userdata);
}
/* Call this function to initialize the reader structure. */
-void grpc_json_reader_init(grpc_json_reader *reader,
- grpc_json_reader_vtable *vtable, void *userdata) {
+void grpc_json_reader_init(grpc_json_reader* reader,
+ grpc_json_reader_vtable* vtable, void* userdata) {
memset(reader, 0, sizeof(*reader));
reader->vtable = vtable;
reader->userdata = userdata;
@@ -85,13 +85,13 @@ void grpc_json_reader_init(grpc_json_reader *reader,
reader->state = GRPC_JSON_STATE_VALUE_BEGIN;
}
-int grpc_json_reader_is_complete(grpc_json_reader *reader) {
+int grpc_json_reader_is_complete(grpc_json_reader* reader) {
return ((reader->depth == 0) &&
((reader->state == GRPC_JSON_STATE_END) ||
(reader->state == GRPC_JSON_STATE_VALUE_END)));
}
-grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
uint32_t c, success;
/* This state-machine is a strict implementation of ECMA-404 */
@@ -177,8 +177,8 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) {
if (!success) return GRPC_JSON_PARSE_ERROR;
json_reader_string_clear(reader);
reader->state = GRPC_JSON_STATE_VALUE_END;
- /* The missing break here is intentional. */
- /* fallthrough */
+ /* The missing break here is intentional. */
+ /* fallthrough */
case GRPC_JSON_STATE_VALUE_END:
case GRPC_JSON_STATE_OBJECT_KEY_BEGIN:
diff --git a/src/core/lib/json/json_reader.h b/src/core/lib/json/json_reader.h
index 7f14a9a9c8..2636d2b1d9 100644
--- a/src/core/lib/json/json_reader.h
+++ b/src/core/lib/json/json_reader.h
@@ -71,27 +71,27 @@ struct grpc_json_reader;
typedef struct grpc_json_reader_vtable {
/* Clears your internal string scratchpad. */
- void (*string_clear)(void *userdata);
+ void (*string_clear)(void* userdata);
/* Adds a char to the string scratchpad. */
- void (*string_add_char)(void *userdata, uint32_t c);
+ void (*string_add_char)(void* userdata, uint32_t c);
/* Adds a utf32 char to the string scratchpad. */
- void (*string_add_utf32)(void *userdata, uint32_t c);
+ void (*string_add_utf32)(void* userdata, uint32_t c);
/* Reads a character from your input. May be utf-8, 16 or 32. */
- uint32_t (*read_char)(void *userdata);
+ uint32_t (*read_char)(void* userdata);
/* Starts a container of type GRPC_JSON_ARRAY or GRPC_JSON_OBJECT. */
- void (*container_begins)(void *userdata, grpc_json_type type);
+ void (*container_begins)(void* userdata, grpc_json_type type);
/* Ends the current container. Must return the type of its parent. */
- grpc_json_type (*container_ends)(void *userdata);
+ grpc_json_type (*container_ends)(void* userdata);
/* Your internal string scratchpad is an object's key. */
- void (*set_key)(void *userdata);
+ void (*set_key)(void* userdata);
/* Your internal string scratchpad is a string value. */
- void (*set_string)(void *userdata);
+ void (*set_string)(void* userdata);
/* Your internal string scratchpad is a numerical value. Return 1 if valid. */
- int (*set_number)(void *userdata);
+ int (*set_number)(void* userdata);
/* Sets the values true, false or null. */
- void (*set_true)(void *userdata);
- void (*set_false)(void *userdata);
- void (*set_null)(void *userdata);
+ void (*set_true)(void* userdata);
+ void (*set_false)(void* userdata);
+ void (*set_null)(void* userdata);
} grpc_json_reader_vtable;
typedef struct grpc_json_reader {
@@ -99,8 +99,8 @@ typedef struct grpc_json_reader {
* The definition is public so you can put it on your stack.
*/
- void *userdata;
- grpc_json_reader_vtable *vtable;
+ void* userdata;
+ grpc_json_reader_vtable* vtable;
int depth;
int in_object;
int in_array;
@@ -133,18 +133,18 @@ typedef enum {
* . GRPC_JSON_INTERNAL_ERROR if the parser somehow ended into an invalid
* internal state.
*/
-grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader);
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader);
/* Call this function to initialize the reader structure. */
-void grpc_json_reader_init(grpc_json_reader *reader,
- grpc_json_reader_vtable *vtable, void *userdata);
+void grpc_json_reader_init(grpc_json_reader* reader,
+ grpc_json_reader_vtable* vtable, void* userdata);
/* You may call this from the read_char callback if you don't know where is the
* end of your input stream, and you'd like the json reader to hint you that it
* has completed reading its input, so you can return an EOF to it. Note that
* there might still be trailing whitespaces after that point.
*/
-int grpc_json_reader_is_complete(grpc_json_reader *reader);
+int grpc_json_reader_is_complete(grpc_json_reader* reader);
#ifdef __cplusplus
}
diff --git a/src/core/lib/json/json_string.cc b/src/core/lib/json/json_string.cc
index 3178d2d2b4..8674d72b7e 100644
--- a/src/core/lib/json/json_string.cc
+++ b/src/core/lib/json/json_string.cc
@@ -38,13 +38,13 @@
* input size, and never expands it.
*/
typedef struct {
- grpc_json *top;
- grpc_json *current_container;
- grpc_json *current_value;
- uint8_t *input;
- uint8_t *key;
- uint8_t *string;
- uint8_t *string_ptr;
+ grpc_json* top;
+ grpc_json* current_container;
+ grpc_json* current_value;
+ uint8_t* input;
+ uint8_t* key;
+ uint8_t* string;
+ uint8_t* string_ptr;
size_t remaining_input;
} json_reader_userdata;
@@ -52,7 +52,7 @@ typedef struct {
* The point is that we allocate that string in chunks of 256 bytes.
*/
typedef struct {
- char *output;
+ char* output;
size_t free_space;
size_t string_len;
size_t allocated;
@@ -62,35 +62,35 @@ typedef struct {
* and will enlarge it if necessary. We're only allocating chunks of 256
* bytes at a time (or multiples thereof).
*/
-static void json_writer_output_check(void *userdata, size_t needed) {
- json_writer_userdata *state = (json_writer_userdata *)userdata;
+static void json_writer_output_check(void* userdata, size_t needed) {
+ json_writer_userdata* state = (json_writer_userdata*)userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
needed = (needed + 0xff) & ~0xffU;
- state->output = (char *)gpr_realloc(state->output, state->allocated + needed);
+ state->output = (char*)gpr_realloc(state->output, state->allocated + needed);
state->free_space += needed;
state->allocated += needed;
}
/* These are needed by the writer's implementation. */
-static void json_writer_output_char(void *userdata, char c) {
- json_writer_userdata *state = (json_writer_userdata *)userdata;
+static void json_writer_output_char(void* userdata, char c) {
+ json_writer_userdata* state = (json_writer_userdata*)userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
}
-static void json_writer_output_string_with_len(void *userdata, const char *str,
+static void json_writer_output_string_with_len(void* userdata, const char* str,
size_t len) {
- json_writer_userdata *state = (json_writer_userdata *)userdata;
+ json_writer_userdata* state = (json_writer_userdata*)userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
state->free_space -= len;
}
-static void json_writer_output_string(void *userdata, const char *str) {
+static void json_writer_output_string(void* userdata, const char* str) {
size_t len = strlen(str);
json_writer_output_string_with_len(userdata, str, len);
}
@@ -98,8 +98,8 @@ static void json_writer_output_string(void *userdata, const char *str) {
/* The reader asks us to clear our scratchpad. In our case, we'll simply mark
* the end of the current string, and advance our output pointer.
*/
-static void json_reader_string_clear(void *userdata) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
+static void json_reader_string_clear(void* userdata) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@@ -107,8 +107,8 @@ static void json_reader_string_clear(void *userdata) {
state->string = state->string_ptr;
}
-static void json_reader_string_add_char(void *userdata, uint32_t c) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
+static void json_reader_string_add_char(void* userdata, uint32_t c) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (uint8_t)c;
@@ -117,7 +117,7 @@ static void json_reader_string_add_char(void *userdata, uint32_t c) {
/* We are converting a UTF-32 character into UTF-8 here,
* as described by RFC3629.
*/
-static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
+static void json_reader_string_add_utf32(void* userdata, uint32_t c) {
if (c <= 0x7f) {
json_reader_string_add_char(userdata, c);
} else if (c <= 0x7ff) {
@@ -147,9 +147,9 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
/* We consider that the input may be a zero-terminated string. So we
* can end up hitting eof before the end of the alleged string length.
*/
-static uint32_t json_reader_read_char(void *userdata) {
+static uint32_t json_reader_read_char(void* userdata) {
uint32_t r;
- json_reader_userdata *state = (json_reader_userdata *)userdata;
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@@ -167,9 +167,9 @@ static uint32_t json_reader_read_char(void *userdata) {
/* Helper function to create a new grpc_json object and link it into
* our tree-in-progress inside our opaque structure.
*/
-static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
- grpc_json *json = grpc_json_create(type);
+static grpc_json* json_create_and_link(void* userdata, grpc_json_type type) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
+ grpc_json* json = grpc_json_create(type);
json->parent = state->current_container;
json->prev = state->current_value;
@@ -183,7 +183,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
json->parent->child = json;
}
if (json->parent->type == GRPC_JSON_OBJECT) {
- json->key = (char *)state->key;
+ json->key = (char*)state->key;
}
}
if (!state->top) {
@@ -193,9 +193,9 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
return json;
}
-static void json_reader_container_begins(void *userdata, grpc_json_type type) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
- grpc_json *container;
+static void json_reader_container_begins(void* userdata, grpc_json_type type) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
+ grpc_json* container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@@ -213,9 +213,9 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) {
* Also note that if we're at the top of the tree, and the last container
* ends, we have to return GRPC_JSON_TOP_LEVEL.
*/
-static grpc_json_type json_reader_container_ends(void *userdata) {
+static grpc_json_type json_reader_container_ends(void* userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
- json_reader_userdata *state = (json_reader_userdata *)userdata;
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
GPR_ASSERT(state->current_container);
@@ -235,36 +235,36 @@ static grpc_json_type json_reader_container_ends(void *userdata) {
* Note that in the set_number case, we're not going to try interpreting it.
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
-static void json_reader_set_key(void *userdata) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
+static void json_reader_set_key(void* userdata) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
state->key = state->string;
}
-static void json_reader_set_string(void *userdata) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
- grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
- json->value = (char *)state->string;
+static void json_reader_set_string(void* userdata) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
+ grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING);
+ json->value = (char*)state->string;
}
-static int json_reader_set_number(void *userdata) {
- json_reader_userdata *state = (json_reader_userdata *)userdata;
- grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
- json->value = (char *)state->string;
+static int json_reader_set_number(void* userdata) {
+ json_reader_userdata* state = (json_reader_userdata*)userdata;
+ grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
+ json->value = (char*)state->string;
return 1;
}
/* The object types true, false and null are self-sufficient, and don't need
* any more information beside their type.
*/
-static void json_reader_set_true(void *userdata) {
+static void json_reader_set_true(void* userdata) {
json_create_and_link(userdata, GRPC_JSON_TRUE);
}
-static void json_reader_set_false(void *userdata) {
+static void json_reader_set_false(void* userdata) {
json_create_and_link(userdata, GRPC_JSON_FALSE);
}
-static void json_reader_set_null(void *userdata) {
+static void json_reader_set_null(void* userdata) {
json_create_and_link(userdata, GRPC_JSON_NULL);
}
@@ -277,17 +277,17 @@ static grpc_json_reader_vtable reader_vtable = {
json_reader_set_false, json_reader_set_null};
/* And finally, let's define our public API. */
-grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) {
+grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
grpc_json_reader reader;
json_reader_userdata state;
- grpc_json *json = NULL;
+ grpc_json* json = NULL;
grpc_json_reader_status status;
if (!input) return NULL;
state.top = state.current_container = state.current_value = NULL;
state.string = state.key = NULL;
- state.string_ptr = state.input = (uint8_t *)input;
+ state.string_ptr = state.input = (uint8_t*)input;
state.remaining_input = size;
grpc_json_reader_init(&reader, &reader_vtable, &state);
@@ -304,11 +304,11 @@ grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) {
#define UNBOUND_JSON_STRING_LENGTH 0x7fffffff
-grpc_json *grpc_json_parse_string(char *input) {
+grpc_json* grpc_json_parse_string(char* input) {
return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH);
}
-static void json_dump_recursive(grpc_json_writer *writer, grpc_json *json,
+static void json_dump_recursive(grpc_json_writer* writer, grpc_json* json,
int in_object) {
while (json) {
if (in_object) grpc_json_writer_object_key(writer, json->key);
@@ -348,7 +348,7 @@ static grpc_json_writer_vtable writer_vtable = {
json_writer_output_char, json_writer_output_string,
json_writer_output_string_with_len};
-char *grpc_json_dump_to_string(grpc_json *json, int indent) {
+char* grpc_json_dump_to_string(grpc_json* json, int indent) {
grpc_json_writer writer;
json_writer_userdata state;
diff --git a/src/core/lib/json/json_writer.cc b/src/core/lib/json/json_writer.cc
index eab1bff7a6..0b9c7c30ea 100644
--- a/src/core/lib/json/json_writer.cc
+++ b/src/core/lib/json/json_writer.cc
@@ -22,22 +22,22 @@
#include "src/core/lib/json/json_writer.h"
-static void json_writer_output_char(grpc_json_writer *writer, char c) {
+static void json_writer_output_char(grpc_json_writer* writer, char c) {
writer->vtable->output_char(writer->userdata, c);
}
-static void json_writer_output_string(grpc_json_writer *writer,
- const char *str) {
+static void json_writer_output_string(grpc_json_writer* writer,
+ const char* str) {
writer->vtable->output_string(writer->userdata, str);
}
-static void json_writer_output_string_with_len(grpc_json_writer *writer,
- const char *str, size_t len) {
+static void json_writer_output_string_with_len(grpc_json_writer* writer,
+ const char* str, size_t len) {
writer->vtable->output_string_with_len(writer->userdata, str, len);
}
-void grpc_json_writer_init(grpc_json_writer *writer, int indent,
- grpc_json_writer_vtable *vtable, void *userdata) {
+void grpc_json_writer_init(grpc_json_writer* writer, int indent,
+ grpc_json_writer_vtable* vtable, void* userdata) {
memset(writer, 0, sizeof(*writer));
writer->container_empty = 1;
writer->indent = indent;
@@ -45,7 +45,7 @@ void grpc_json_writer_init(grpc_json_writer *writer, int indent,
writer->userdata = userdata;
}
-static void json_writer_output_indent(grpc_json_writer *writer) {
+static void json_writer_output_indent(grpc_json_writer* writer) {
static const char spacesstr[] =
" "
" "
@@ -73,7 +73,7 @@ static void json_writer_output_indent(grpc_json_writer *writer) {
writer, spacesstr + sizeof(spacesstr) - 1 - spaces, spaces);
}
-static void json_writer_value_end(grpc_json_writer *writer) {
+static void json_writer_value_end(grpc_json_writer* writer) {
if (writer->container_empty) {
writer->container_empty = 0;
if ((writer->indent == 0) || (writer->depth == 0)) return;
@@ -85,7 +85,7 @@ static void json_writer_value_end(grpc_json_writer *writer) {
}
}
-static void json_writer_escape_utf16(grpc_json_writer *writer, uint16_t utf16) {
+static void json_writer_escape_utf16(grpc_json_writer* writer, uint16_t utf16) {
static const char hex[] = "0123456789abcdef";
json_writer_output_string_with_len(writer, "\\u", 2);
@@ -95,8 +95,8 @@ static void json_writer_escape_utf16(grpc_json_writer *writer, uint16_t utf16) {
json_writer_output_char(writer, hex[(utf16)&0x0f]);
}
-static void json_writer_escape_string(grpc_json_writer *writer,
- const char *string) {
+static void json_writer_escape_string(grpc_json_writer* writer,
+ const char* string) {
json_writer_output_char(writer, '"');
for (;;) {
@@ -190,7 +190,7 @@ static void json_writer_escape_string(grpc_json_writer *writer,
json_writer_output_char(writer, '"');
}
-void grpc_json_writer_container_begins(grpc_json_writer *writer,
+void grpc_json_writer_container_begins(grpc_json_writer* writer,
grpc_json_type type) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
@@ -200,7 +200,7 @@ void grpc_json_writer_container_begins(grpc_json_writer *writer,
writer->depth++;
}
-void grpc_json_writer_container_ends(grpc_json_writer *writer,
+void grpc_json_writer_container_ends(grpc_json_writer* writer,
grpc_json_type type) {
if (writer->indent && !writer->container_empty)
json_writer_output_char(writer, '\n');
@@ -211,7 +211,7 @@ void grpc_json_writer_container_ends(grpc_json_writer *writer,
writer->got_key = 0;
}
-void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string) {
+void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) {
json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_escape_string(writer, string);
@@ -219,23 +219,23 @@ void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string) {
writer->got_key = 1;
}
-void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string) {
+void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_string(writer, string);
writer->got_key = 0;
}
-void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
- const char *string, size_t len) {
+void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer,
+ const char* string, size_t len) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_output_string_with_len(writer, string, len);
writer->got_key = 0;
}
-void grpc_json_writer_value_string(grpc_json_writer *writer,
- const char *string) {
+void grpc_json_writer_value_string(grpc_json_writer* writer,
+ const char* string) {
if (!writer->got_key) json_writer_value_end(writer);
json_writer_output_indent(writer);
json_writer_escape_string(writer, string);
diff --git a/src/core/lib/json/json_writer.h b/src/core/lib/json/json_writer.h
index 132d1f24e8..93eeb2031b 100644
--- a/src/core/lib/json/json_writer.h
+++ b/src/core/lib/json/json_writer.h
@@ -41,17 +41,17 @@ extern "C" {
typedef struct grpc_json_writer_vtable {
/* Adds a character to the output stream. */
- void (*output_char)(void *userdata, char);
+ void (*output_char)(void* userdata, char);
/* Adds a zero-terminated string to the output stream. */
- void (*output_string)(void *userdata, const char *str);
+ void (*output_string)(void* userdata, const char* str);
/* Adds a fixed-length string to the output stream. */
- void (*output_string_with_len)(void *userdata, const char *str, size_t len);
+ void (*output_string_with_len)(void* userdata, const char* str, size_t len);
} grpc_json_writer_vtable;
typedef struct grpc_json_writer {
- void *userdata;
- grpc_json_writer_vtable *vtable;
+ void* userdata;
+ grpc_json_writer_vtable* vtable;
int indent;
int depth;
int container_empty;
@@ -63,25 +63,25 @@ typedef struct grpc_json_writer {
* use indent=0, then the output will not have any newlines either, thus
* emitting a condensed json output.
*/
-void grpc_json_writer_init(grpc_json_writer *writer, int indent,
- grpc_json_writer_vtable *vtable, void *userdata);
+void grpc_json_writer_init(grpc_json_writer* writer, int indent,
+ grpc_json_writer_vtable* vtable, void* userdata);
/* Signals the beginning of a container. */
-void grpc_json_writer_container_begins(grpc_json_writer *writer,
+void grpc_json_writer_container_begins(grpc_json_writer* writer,
grpc_json_type type);
/* Signals the end of a container. */
-void grpc_json_writer_container_ends(grpc_json_writer *writer,
+void grpc_json_writer_container_ends(grpc_json_writer* writer,
grpc_json_type type);
/* Writes down an object key for the next value. */
-void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string);
+void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string);
/* Sets a raw value. Useful for numbers. */
-void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string);
+void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string);
/* Sets a raw value with its length. Useful for values like true or false. */
-void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer,
- const char *string, size_t len);
+void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer,
+ const char* string, size_t len);
/* Sets a string value. It'll be escaped, and utf-8 validated. */
-void grpc_json_writer_value_string(grpc_json_writer *writer,
- const char *string);
+void grpc_json_writer_value_string(grpc_json_writer* writer,
+ const char* string);
#ifdef __cplusplus
}
diff --git a/src/core/lib/profiling/basic_timers.cc b/src/core/lib/profiling/basic_timers.cc
index 0ae7d7f600..3ec6280e6b 100644
--- a/src/core/lib/profiling/basic_timers.cc
+++ b/src/core/lib/profiling/basic_timers.cc
@@ -36,8 +36,8 @@ typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type;
typedef struct gpr_timer_entry {
gpr_timespec tm;
- const char *tagstr;
- const char *file;
+ const char* tagstr;
+ const char* file;
short line;
char type;
uint8_t important;
@@ -48,21 +48,21 @@ typedef struct gpr_timer_entry {
typedef struct gpr_timer_log {
size_t num_entries;
- struct gpr_timer_log *next;
- struct gpr_timer_log *prev;
+ struct gpr_timer_log* next;
+ struct gpr_timer_log* prev;
gpr_timer_entry log[MAX_COUNT];
} gpr_timer_log;
typedef struct gpr_timer_log_list {
- gpr_timer_log *head;
+ gpr_timer_log* head;
/* valid iff head!=NULL */
- gpr_timer_log *tail;
+ gpr_timer_log* tail;
} gpr_timer_log_list;
-static __thread gpr_timer_log *g_thread_log;
+static __thread gpr_timer_log* g_thread_log;
static gpr_once g_once_init = GPR_ONCE_INIT;
-static FILE *output_file;
-static const char *output_filename_or_null = NULL;
+static FILE* output_file;
+static const char* output_filename_or_null = NULL;
static pthread_mutex_t g_mu;
static pthread_cond_t g_cv;
static gpr_timer_log_list g_in_progress_logs;
@@ -73,7 +73,7 @@ static __thread int g_thread_id;
static int g_next_thread_id;
static int g_writing_enabled = 1;
-static const char *output_filename() {
+static const char* output_filename() {
if (output_filename_or_null == NULL) {
output_filename_or_null = gpr_getenv("LATENCY_TRACE");
if (output_filename_or_null == NULL ||
@@ -84,7 +84,7 @@ static const char *output_filename() {
return output_filename_or_null;
}
-static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
+static int timer_log_push_back(gpr_timer_log_list* list, gpr_timer_log* log) {
if (list->head == NULL) {
list->head = list->tail = log;
log->next = log->prev = NULL;
@@ -98,8 +98,8 @@ static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) {
}
}
-static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) {
- gpr_timer_log *out = list->head;
+static gpr_timer_log* timer_log_pop_front(gpr_timer_log_list* list) {
+ gpr_timer_log* out = list->head;
if (out != NULL) {
list->head = out->next;
if (list->head != NULL) {
@@ -111,7 +111,7 @@ static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) {
return out;
}
-static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) {
+static void timer_log_remove(gpr_timer_log_list* list, gpr_timer_log* log) {
if (log->prev == NULL) {
list->head = log->next;
if (list->head != NULL) {
@@ -130,13 +130,13 @@ static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) {
}
}
-static void write_log(gpr_timer_log *log) {
+static void write_log(gpr_timer_log* log) {
size_t i;
if (output_file == NULL) {
output_file = fopen(output_filename(), "w");
}
for (i = 0; i < log->num_entries; i++) {
- gpr_timer_entry *entry = &(log->log[i]);
+ gpr_timer_entry* entry = &(log->log[i]);
if (gpr_time_cmp(entry->tm, gpr_time_0(entry->tm.clock_type)) < 0) {
entry->tm = gpr_time_0(entry->tm.clock_type);
}
@@ -149,8 +149,8 @@ static void write_log(gpr_timer_log *log) {
}
}
-static void writing_thread(void *unused) {
- gpr_timer_log *log;
+static void writing_thread(void* unused) {
+ gpr_timer_log* log;
pthread_mutex_lock(&g_mu);
for (;;) {
while ((log = timer_log_pop_front(&g_done_logs)) == NULL && !g_shutdown) {
@@ -169,8 +169,8 @@ static void writing_thread(void *unused) {
}
}
-static void flush_logs(gpr_timer_log_list *list) {
- gpr_timer_log *log;
+static void flush_logs(gpr_timer_log_list* list) {
+ gpr_timer_log* log;
while ((log = timer_log_pop_front(list)) != NULL) {
write_log(log);
free(log);
@@ -196,7 +196,7 @@ static void finish_writing(void) {
}
}
-void gpr_timers_set_log_filename(const char *filename) {
+void gpr_timers_set_log_filename(const char* filename) {
output_filename_or_null = filename;
}
@@ -209,7 +209,7 @@ static void init_output() {
static void rotate_log() {
/* Using malloc here, as this code could end up being called by gpr_malloc */
- gpr_timer_log *log = static_cast<gpr_timer_log *>(malloc(sizeof(*log)));
+ gpr_timer_log* log = static_cast<gpr_timer_log*>(malloc(sizeof(*log)));
gpr_once_init(&g_once_init, init_output);
log->num_entries = 0;
pthread_mutex_lock(&g_mu);
@@ -226,9 +226,9 @@ static void rotate_log() {
g_thread_log = log;
}
-static void gpr_timers_log_add(const char *tagstr, marker_type type,
- int important, const char *file, int line) {
- gpr_timer_entry *entry;
+static void gpr_timers_log_add(const char* tagstr, marker_type type,
+ int important, const char* file, int line) {
+ gpr_timer_entry* entry;
if (!g_writing_enabled) {
return;
@@ -250,17 +250,17 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type,
}
/* Latency profiler API implementation. */
-void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
+void gpr_timer_add_mark(const char* tagstr, int important, const char* file,
int line) {
gpr_timers_log_add(tagstr, MARK, important, file, line);
}
-void gpr_timer_begin(const char *tagstr, int important, const char *file,
+void gpr_timer_begin(const char* tagstr, int important, const char* file,
int line) {
gpr_timers_log_add(tagstr, BEGIN, important, file, line);
}
-void gpr_timer_end(const char *tagstr, int important, const char *file,
+void gpr_timer_end(const char* tagstr, int important, const char* file,
int line) {
gpr_timers_log_add(tagstr, END, important, file, line);
}
@@ -277,7 +277,7 @@ void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
-void gpr_timers_set_log_filename(const char *filename) {}
+void gpr_timers_set_log_filename(const char* filename) {}
void gpr_timer_set_enabled(int enabled) {}
#endif /* GRPC_BASIC_PROFILER */
diff --git a/src/core/lib/profiling/stap_timers.cc b/src/core/lib/profiling/stap_timers.cc
index c86d74f058..5ee1c4317c 100644
--- a/src/core/lib/profiling/stap_timers.cc
+++ b/src/core/lib/profiling/stap_timers.cc
@@ -27,22 +27,22 @@
#include "src/core/lib/profiling/stap_probes.h"
/* Latency profiler API implementation. */
-void gpr_timer_add_mark(int tag, const char *tagstr, void *id, const char *file,
+void gpr_timer_add_mark(int tag, const char* tagstr, void* id, const char* file,
int line) {
_STAP_ADD_MARK(tag);
}
-void gpr_timer_add_important_mark(int tag, const char *tagstr, void *id,
- const char *file, int line) {
+void gpr_timer_add_important_mark(int tag, const char* tagstr, void* id,
+ const char* file, int line) {
_STAP_ADD_IMPORTANT_MARK(tag);
}
-void gpr_timer_begin(int tag, const char *tagstr, void *id, const char *file,
+void gpr_timer_begin(int tag, const char* tagstr, void* id, const char* file,
int line) {
_STAP_TIMING_NS_BEGIN(tag);
}
-void gpr_timer_end(int tag, const char *tagstr, void *id, const char *file,
+void gpr_timer_end(int tag, const char* tagstr, void* id, const char* file,
int line) {
_STAP_TIMING_NS_END(tag);
}
diff --git a/src/core/lib/profiling/timers.h b/src/core/lib/profiling/timers.h
index 7f02b4bf84..8b6c254c21 100644
--- a/src/core/lib/profiling/timers.h
+++ b/src/core/lib/profiling/timers.h
@@ -26,14 +26,14 @@ extern "C" {
void gpr_timers_global_init(void);
void gpr_timers_global_destroy(void);
-void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
+void gpr_timer_add_mark(const char* tagstr, int important, const char* file,
int line);
-void gpr_timer_begin(const char *tagstr, int important, const char *file,
+void gpr_timer_begin(const char* tagstr, int important, const char* file,
int line);
-void gpr_timer_end(const char *tagstr, int important, const char *file,
+void gpr_timer_end(const char* tagstr, int important, const char* file,
int line);
-void gpr_timers_set_log_filename(const char *filename);
+void gpr_timers_set_log_filename(const char* filename);
void gpr_timer_set_enabled(int enabled);
@@ -92,14 +92,14 @@ void gpr_timer_set_enabled(int enabled);
namespace grpc {
class ProfileScope {
public:
- ProfileScope(const char *desc, bool important, const char *file, int line)
+ ProfileScope(const char* desc, bool important, const char* file, int line)
: desc_(desc) {
gpr_timer_begin(desc_, important ? 1 : 0, file, line);
}
~ProfileScope() { gpr_timer_end(desc_, 0, "n/a", 0); }
private:
- const char *const desc_;
+ const char* const desc_;
};
} // namespace grpc
diff --git a/src/core/lib/security/context/security_context.cc b/src/core/lib/security/context/security_context.cc
index 31d800b9b4..b2b90e86e0 100644
--- a/src/core/lib/security/context/security_context.cc
+++ b/src/core/lib/security/context/security_context.cc
@@ -36,17 +36,17 @@ grpc_tracer_flag grpc_trace_auth_context_refcount =
/* --- grpc_call --- */
-grpc_call_error grpc_call_set_credentials(grpc_call *call,
- grpc_call_credentials *creds) {
+grpc_call_error grpc_call_set_credentials(grpc_call* call,
+ grpc_call_credentials* creds) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_client_security_context *ctx = NULL;
+ grpc_client_security_context* ctx = NULL;
GRPC_API_TRACE("grpc_call_set_credentials(call=%p, creds=%p)", 2,
(call, creds));
if (!grpc_call_is_client(call)) {
gpr_log(GPR_ERROR, "Method is client-side only.");
return GRPC_CALL_ERROR_NOT_ON_SERVER;
}
- ctx = (grpc_client_security_context *)grpc_call_context_get(
+ ctx = (grpc_client_security_context*)grpc_call_context_get(
call, GRPC_CONTEXT_SECURITY);
if (ctx == NULL) {
ctx = grpc_client_security_context_create();
@@ -61,34 +61,34 @@ grpc_call_error grpc_call_set_credentials(grpc_call *call,
return GRPC_CALL_OK;
}
-grpc_auth_context *grpc_call_auth_context(grpc_call *call) {
- void *sec_ctx = grpc_call_context_get(call, GRPC_CONTEXT_SECURITY);
+grpc_auth_context* grpc_call_auth_context(grpc_call* call) {
+ void* sec_ctx = grpc_call_context_get(call, GRPC_CONTEXT_SECURITY);
GRPC_API_TRACE("grpc_call_auth_context(call=%p)", 1, (call));
if (sec_ctx == NULL) return NULL;
return grpc_call_is_client(call)
? GRPC_AUTH_CONTEXT_REF(
- ((grpc_client_security_context *)sec_ctx)->auth_context,
+ ((grpc_client_security_context*)sec_ctx)->auth_context,
"grpc_call_auth_context client")
: GRPC_AUTH_CONTEXT_REF(
- ((grpc_server_security_context *)sec_ctx)->auth_context,
+ ((grpc_server_security_context*)sec_ctx)->auth_context,
"grpc_call_auth_context server");
}
-void grpc_auth_context_release(grpc_auth_context *context) {
+void grpc_auth_context_release(grpc_auth_context* context) {
GRPC_API_TRACE("grpc_auth_context_release(context=%p)", 1, (context));
GRPC_AUTH_CONTEXT_UNREF(context, "grpc_auth_context_unref");
}
/* --- grpc_client_security_context --- */
-grpc_client_security_context *grpc_client_security_context_create(void) {
- return (grpc_client_security_context *)gpr_zalloc(
+grpc_client_security_context* grpc_client_security_context_create(void) {
+ return (grpc_client_security_context*)gpr_zalloc(
sizeof(grpc_client_security_context));
}
-void grpc_client_security_context_destroy(void *ctx) {
+void grpc_client_security_context_destroy(void* ctx) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_client_security_context *c = (grpc_client_security_context *)ctx;
+ grpc_client_security_context* c = (grpc_client_security_context*)ctx;
grpc_call_credentials_unref(&exec_ctx, c->creds);
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
if (c->extension.instance != NULL && c->extension.destroy != NULL) {
@@ -100,13 +100,13 @@ void grpc_client_security_context_destroy(void *ctx) {
/* --- grpc_server_security_context --- */
-grpc_server_security_context *grpc_server_security_context_create(void) {
- return (grpc_server_security_context *)gpr_zalloc(
+grpc_server_security_context* grpc_server_security_context_create(void) {
+ return (grpc_server_security_context*)gpr_zalloc(
sizeof(grpc_server_security_context));
}
-void grpc_server_security_context_destroy(void *ctx) {
- grpc_server_security_context *c = (grpc_server_security_context *)ctx;
+void grpc_server_security_context_destroy(void* ctx) {
+ grpc_server_security_context* c = (grpc_server_security_context*)ctx;
GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context");
if (c->extension.instance != NULL && c->extension.destroy != NULL) {
c->extension.destroy(c->extension.instance);
@@ -118,9 +118,9 @@ void grpc_server_security_context_destroy(void *ctx) {
static grpc_auth_property_iterator empty_iterator = {NULL, 0, NULL};
-grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
- grpc_auth_context *ctx =
- (grpc_auth_context *)gpr_zalloc(sizeof(grpc_auth_context));
+grpc_auth_context* grpc_auth_context_create(grpc_auth_context* chained) {
+ grpc_auth_context* ctx =
+ (grpc_auth_context*)gpr_zalloc(sizeof(grpc_auth_context));
gpr_ref_init(&ctx->refcount, 1);
if (chained != NULL) {
ctx->chained = GRPC_AUTH_CONTEXT_REF(chained, "chained");
@@ -131,9 +131,9 @@ grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) {
}
#ifndef NDEBUG
-grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx,
- const char *file, int line,
- const char *reason) {
+grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* ctx,
+ const char* file, int line,
+ const char* reason) {
if (ctx == NULL) return NULL;
if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count);
@@ -142,7 +142,7 @@ grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx,
val + 1, reason);
}
#else
-grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
+grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* ctx) {
if (ctx == NULL) return NULL;
#endif
gpr_ref(&ctx->refcount);
@@ -150,8 +150,8 @@ grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
}
#ifndef NDEBUG
-void grpc_auth_context_unref(grpc_auth_context *ctx, const char *file, int line,
- const char *reason) {
+void grpc_auth_context_unref(grpc_auth_context* ctx, const char* file, int line,
+ const char* reason) {
if (ctx == NULL) return;
if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count);
@@ -160,7 +160,7 @@ void grpc_auth_context_unref(grpc_auth_context *ctx, const char *file, int line,
val - 1, reason);
}
#else
-void grpc_auth_context_unref(grpc_auth_context *ctx) {
+void grpc_auth_context_unref(grpc_auth_context* ctx) {
if (ctx == NULL) return;
#endif
if (gpr_unref(&ctx->refcount)) {
@@ -176,18 +176,18 @@ void grpc_auth_context_unref(grpc_auth_context *ctx) {
}
}
-const char *grpc_auth_context_peer_identity_property_name(
- const grpc_auth_context *ctx) {
+const char* grpc_auth_context_peer_identity_property_name(
+ const grpc_auth_context* ctx) {
GRPC_API_TRACE("grpc_auth_context_peer_identity_property_name(ctx=%p)", 1,
(ctx));
return ctx->peer_identity_property_name;
}
-int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx,
- const char *name) {
+int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context* ctx,
+ const char* name) {
grpc_auth_property_iterator it =
grpc_auth_context_find_properties_by_name(ctx, name);
- const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it);
+ const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it);
GRPC_API_TRACE(
"grpc_auth_context_set_peer_identity_property_name(ctx=%p, name=%s)", 2,
(ctx, name));
@@ -200,13 +200,13 @@ int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx,
return 1;
}
-int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx) {
+int grpc_auth_context_peer_is_authenticated(const grpc_auth_context* ctx) {
GRPC_API_TRACE("grpc_auth_context_peer_is_authenticated(ctx=%p)", 1, (ctx));
return ctx->peer_identity_property_name == NULL ? 0 : 1;
}
grpc_auth_property_iterator grpc_auth_context_property_iterator(
- const grpc_auth_context *ctx) {
+ const grpc_auth_context* ctx) {
grpc_auth_property_iterator it = empty_iterator;
GRPC_API_TRACE("grpc_auth_context_property_iterator(ctx=%p)", 1, (ctx));
if (ctx == NULL) return it;
@@ -214,8 +214,8 @@ grpc_auth_property_iterator grpc_auth_context_property_iterator(
return it;
}
-const grpc_auth_property *grpc_auth_property_iterator_next(
- grpc_auth_property_iterator *it) {
+const grpc_auth_property* grpc_auth_property_iterator_next(
+ grpc_auth_property_iterator* it) {
GRPC_API_TRACE("grpc_auth_property_iterator_next(it=%p)", 1, (it));
if (it == NULL || it->ctx == NULL) return NULL;
while (it->index == it->ctx->properties.count) {
@@ -227,7 +227,7 @@ const grpc_auth_property *grpc_auth_property_iterator_next(
return &it->ctx->properties.array[it->index++];
} else {
while (it->index < it->ctx->properties.count) {
- const grpc_auth_property *prop = &it->ctx->properties.array[it->index++];
+ const grpc_auth_property* prop = &it->ctx->properties.array[it->index++];
GPR_ASSERT(prop->name != NULL);
if (strcmp(it->name, prop->name) == 0) {
return prop;
@@ -239,7 +239,7 @@ const grpc_auth_property *grpc_auth_property_iterator_next(
}
grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
- const grpc_auth_context *ctx, const char *name) {
+ const grpc_auth_context* ctx, const char* name) {
grpc_auth_property_iterator it = empty_iterator;
GRPC_API_TRACE("grpc_auth_context_find_properties_by_name(ctx=%p, name=%s)",
2, (ctx, name));
@@ -250,44 +250,45 @@ grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
}
grpc_auth_property_iterator grpc_auth_context_peer_identity(
- const grpc_auth_context *ctx) {
+ const grpc_auth_context* ctx) {
GRPC_API_TRACE("grpc_auth_context_peer_identity(ctx=%p)", 1, (ctx));
if (ctx == NULL) return empty_iterator;
return grpc_auth_context_find_properties_by_name(
ctx, ctx->peer_identity_property_name);
}
-static void ensure_auth_context_capacity(grpc_auth_context *ctx) {
+static void ensure_auth_context_capacity(grpc_auth_context* ctx) {
if (ctx->properties.count == ctx->properties.capacity) {
ctx->properties.capacity =
GPR_MAX(ctx->properties.capacity + 8, ctx->properties.capacity * 2);
- ctx->properties.array = (grpc_auth_property *)gpr_realloc(
+ ctx->properties.array = (grpc_auth_property*)gpr_realloc(
ctx->properties.array,
ctx->properties.capacity * sizeof(grpc_auth_property));
}
}
-void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name,
- const char *value, size_t value_length) {
- grpc_auth_property *prop;
+void grpc_auth_context_add_property(grpc_auth_context* ctx, const char* name,
+ const char* value, size_t value_length) {
+ grpc_auth_property* prop;
GRPC_API_TRACE(
"grpc_auth_context_add_property(ctx=%p, name=%s, value=%*.*s, "
"value_length=%lu)",
- 6, (ctx, name, (int)value_length, (int)value_length, value,
- (unsigned long)value_length));
+ 6,
+ (ctx, name, (int)value_length, (int)value_length, value,
+ (unsigned long)value_length));
ensure_auth_context_capacity(ctx);
prop = &ctx->properties.array[ctx->properties.count++];
prop->name = gpr_strdup(name);
- prop->value = (char *)gpr_malloc(value_length + 1);
+ prop->value = (char*)gpr_malloc(value_length + 1);
memcpy(prop->value, value, value_length);
prop->value[value_length] = '\0';
prop->value_length = value_length;
}
-void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx,
- const char *name,
- const char *value) {
- grpc_auth_property *prop;
+void grpc_auth_context_add_cstring_property(grpc_auth_context* ctx,
+ const char* name,
+ const char* value) {
+ grpc_auth_property* prop;
GRPC_API_TRACE(
"grpc_auth_context_add_cstring_property(ctx=%p, name=%s, value=%s)", 3,
(ctx, name, value));
@@ -298,48 +299,48 @@ void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx,
prop->value_length = strlen(value);
}
-void grpc_auth_property_reset(grpc_auth_property *property) {
+void grpc_auth_property_reset(grpc_auth_property* property) {
gpr_free(property->name);
gpr_free(property->value);
memset(property, 0, sizeof(grpc_auth_property));
}
-static void auth_context_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- GRPC_AUTH_CONTEXT_UNREF((grpc_auth_context *)p, "auth_context_pointer_arg");
+static void auth_context_pointer_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+ GRPC_AUTH_CONTEXT_UNREF((grpc_auth_context*)p, "auth_context_pointer_arg");
}
-static void *auth_context_pointer_arg_copy(void *p) {
- return GRPC_AUTH_CONTEXT_REF((grpc_auth_context *)p,
+static void* auth_context_pointer_arg_copy(void* p) {
+ return GRPC_AUTH_CONTEXT_REF((grpc_auth_context*)p,
"auth_context_pointer_arg");
}
-static int auth_context_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+static int auth_context_pointer_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
static const grpc_arg_pointer_vtable auth_context_pointer_vtable = {
auth_context_pointer_arg_copy, auth_context_pointer_arg_destroy,
auth_context_pointer_cmp};
-grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
- return grpc_channel_arg_pointer_create((char *)GRPC_AUTH_CONTEXT_ARG, p,
+grpc_arg grpc_auth_context_to_arg(grpc_auth_context* p) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_AUTH_CONTEXT_ARG, p,
&auth_context_pointer_vtable);
}
-grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg) {
+grpc_auth_context* grpc_auth_context_from_arg(const grpc_arg* arg) {
if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
GRPC_AUTH_CONTEXT_ARG);
return NULL;
}
- return (grpc_auth_context *)arg->value.pointer.p;
+ return (grpc_auth_context*)arg->value.pointer.p;
}
-grpc_auth_context *grpc_find_auth_context_in_args(
- const grpc_channel_args *args) {
+grpc_auth_context* grpc_find_auth_context_in_args(
+ const grpc_channel_args* args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
- grpc_auth_context *p = grpc_auth_context_from_arg(&args->args[i]);
+ grpc_auth_context* p = grpc_auth_context_from_arg(&args->args[i]);
if (p != NULL) return p;
}
return NULL;
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index 0df39257a7..4f049c4a3b 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -37,21 +37,21 @@ extern "C" {
/* Property names are always NULL terminated. */
typedef struct {
- grpc_auth_property *array;
+ grpc_auth_property* array;
size_t count;
size_t capacity;
} grpc_auth_property_array;
struct grpc_auth_context {
- struct grpc_auth_context *chained;
+ struct grpc_auth_context* chained;
grpc_auth_property_array properties;
gpr_refcount refcount;
- const char *peer_identity_property_name;
- grpc_pollset *pollset;
+ const char* peer_identity_property_name;
+ grpc_pollset* pollset;
};
/* Creation. */
-grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained);
+grpc_auth_context* grpc_auth_context_create(grpc_auth_context* chained);
/* Refcounting. */
#ifndef NDEBUG
@@ -59,19 +59,19 @@ grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained);
grpc_auth_context_ref((p), __FILE__, __LINE__, (r))
#define GRPC_AUTH_CONTEXT_UNREF(p, r) \
grpc_auth_context_unref((p), __FILE__, __LINE__, (r))
-grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *policy,
- const char *file, int line,
- const char *reason);
-void grpc_auth_context_unref(grpc_auth_context *policy, const char *file,
- int line, const char *reason);
+grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* policy,
+ const char* file, int line,
+ const char* reason);
+void grpc_auth_context_unref(grpc_auth_context* policy, const char* file,
+ int line, const char* reason);
#else
#define GRPC_AUTH_CONTEXT_REF(p, r) grpc_auth_context_ref((p))
#define GRPC_AUTH_CONTEXT_UNREF(p, r) grpc_auth_context_unref((p))
-grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *policy);
-void grpc_auth_context_unref(grpc_auth_context *policy);
+grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* policy);
+void grpc_auth_context_unref(grpc_auth_context* policy);
#endif
-void grpc_auth_property_reset(grpc_auth_property *property);
+void grpc_auth_property_reset(grpc_auth_property* property);
/* --- grpc_security_context_extension ---
@@ -79,8 +79,8 @@ void grpc_auth_property_reset(grpc_auth_property *property);
later by a higher level method on a grpc_call object. */
typedef struct {
- void *instance;
- void (*destroy)(void *);
+ void* instance;
+ void (*destroy)(void*);
} grpc_security_context_extension;
/* --- grpc_client_security_context ---
@@ -88,33 +88,33 @@ typedef struct {
Internal client-side security context. */
typedef struct {
- grpc_call_credentials *creds;
- grpc_auth_context *auth_context;
+ grpc_call_credentials* creds;
+ grpc_auth_context* auth_context;
grpc_security_context_extension extension;
} grpc_client_security_context;
-grpc_client_security_context *grpc_client_security_context_create(void);
-void grpc_client_security_context_destroy(void *ctx);
+grpc_client_security_context* grpc_client_security_context_create(void);
+void grpc_client_security_context_destroy(void* ctx);
/* --- grpc_server_security_context ---
Internal server-side security context. */
typedef struct {
- grpc_auth_context *auth_context;
+ grpc_auth_context* auth_context;
grpc_security_context_extension extension;
} grpc_server_security_context;
-grpc_server_security_context *grpc_server_security_context_create(void);
-void grpc_server_security_context_destroy(void *ctx);
+grpc_server_security_context* grpc_server_security_context_create(void);
+void grpc_server_security_context_destroy(void* ctx);
/* --- Channel args for auth context --- */
#define GRPC_AUTH_CONTEXT_ARG "grpc.auth_context"
-grpc_arg grpc_auth_context_to_arg(grpc_auth_context *c);
-grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg);
-grpc_auth_context *grpc_find_auth_context_in_args(
- const grpc_channel_args *args);
+grpc_arg grpc_auth_context_to_arg(grpc_auth_context* c);
+grpc_auth_context* grpc_auth_context_from_arg(const grpc_arg* arg);
+grpc_auth_context* grpc_find_auth_context_in_args(
+ const grpc_channel_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.cc b/src/core/lib/security/credentials/composite/composite_credentials.cc
index 779300ac07..5eb7f9d09e 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.cc
+++ b/src/core/lib/security/credentials/composite/composite_credentials.cc
@@ -30,32 +30,32 @@
/* -- Composite call credentials. -- */
typedef struct {
- grpc_composite_call_credentials *composite_creds;
+ grpc_composite_call_credentials* composite_creds;
size_t creds_index;
- grpc_polling_entity *pollent;
+ grpc_polling_entity* pollent;
grpc_auth_metadata_context auth_md_context;
- grpc_credentials_mdelem_array *md_array;
- grpc_closure *on_request_metadata;
+ grpc_credentials_mdelem_array* md_array;
+ grpc_closure* on_request_metadata;
grpc_closure internal_on_request_metadata;
} grpc_composite_call_credentials_metadata_context;
-static void composite_call_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
+static void composite_call_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_composite_call_credentials* c = (grpc_composite_call_credentials*)creds;
for (size_t i = 0; i < c->inner.num_creds; i++) {
grpc_call_credentials_unref(exec_ctx, c->inner.creds_array[i]);
}
gpr_free(c->inner.creds_array);
}
-static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_composite_call_credentials_metadata_context *ctx =
- (grpc_composite_call_credentials_metadata_context *)arg;
+static void composite_call_metadata_cb(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_composite_call_credentials_metadata_context* ctx =
+ (grpc_composite_call_credentials_metadata_context*)arg;
if (error == GRPC_ERROR_NONE) {
/* See if we need to get some more metadata. */
if (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
- grpc_call_credentials *inner_creds =
+ grpc_call_credentials* inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
if (grpc_call_credentials_get_request_metadata(
exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context,
@@ -73,13 +73,13 @@ static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *arg,
}
static bool composite_call_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_polling_entity *pollent, grpc_auth_metadata_context auth_md_context,
- grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
- grpc_composite_call_credentials_metadata_context *ctx;
- ctx = (grpc_composite_call_credentials_metadata_context *)gpr_zalloc(
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_polling_entity* pollent, grpc_auth_metadata_context auth_md_context,
+ grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_composite_call_credentials* c = (grpc_composite_call_credentials*)creds;
+ grpc_composite_call_credentials_metadata_context* ctx;
+ ctx = (grpc_composite_call_credentials_metadata_context*)gpr_zalloc(
sizeof(grpc_composite_call_credentials_metadata_context));
ctx->composite_creds = c;
ctx->pollent = pollent;
@@ -90,7 +90,7 @@ static bool composite_call_get_request_metadata(
composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx);
bool synchronous = true;
while (ctx->creds_index < ctx->composite_creds->inner.num_creds) {
- grpc_call_credentials *inner_creds =
+ grpc_call_credentials* inner_creds =
ctx->composite_creds->inner.creds_array[ctx->creds_index++];
if (grpc_call_credentials_get_request_metadata(
exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context,
@@ -106,9 +106,9 @@ static bool composite_call_get_request_metadata(
}
static void composite_call_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
- grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+ grpc_composite_call_credentials* c = (grpc_composite_call_credentials*)creds;
for (size_t i = 0; i < c->inner.num_creds; ++i) {
grpc_call_credentials_cancel_get_request_metadata(
exec_ctx, c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error));
@@ -121,9 +121,9 @@ static grpc_call_credentials_vtable composite_call_credentials_vtable = {
composite_call_cancel_get_request_metadata};
static grpc_call_credentials_array get_creds_array(
- grpc_call_credentials **creds_addr) {
+ grpc_call_credentials** creds_addr) {
grpc_call_credentials_array result;
- grpc_call_credentials *creds = *creds_addr;
+ grpc_call_credentials* creds = *creds_addr;
result.creds_array = creds_addr;
result.num_creds = 1;
if (strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0) {
@@ -132,14 +132,14 @@ static grpc_call_credentials_array get_creds_array(
return result;
}
-grpc_call_credentials *grpc_composite_call_credentials_create(
- grpc_call_credentials *creds1, grpc_call_credentials *creds2,
- void *reserved) {
+grpc_call_credentials* grpc_composite_call_credentials_create(
+ grpc_call_credentials* creds1, grpc_call_credentials* creds2,
+ void* reserved) {
size_t i;
size_t creds_array_byte_size;
grpc_call_credentials_array creds1_array;
grpc_call_credentials_array creds2_array;
- grpc_composite_call_credentials *c;
+ grpc_composite_call_credentials* c;
GRPC_API_TRACE(
"grpc_composite_call_credentials_create(creds1=%p, creds2=%p, "
"reserved=%p)",
@@ -147,7 +147,7 @@ grpc_call_credentials *grpc_composite_call_credentials_create(
GPR_ASSERT(reserved == NULL);
GPR_ASSERT(creds1 != NULL);
GPR_ASSERT(creds2 != NULL);
- c = (grpc_composite_call_credentials *)gpr_zalloc(
+ c = (grpc_composite_call_credentials*)gpr_zalloc(
sizeof(grpc_composite_call_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE;
c->base.vtable = &composite_call_credentials_vtable;
@@ -155,38 +155,38 @@ grpc_call_credentials *grpc_composite_call_credentials_create(
creds1_array = get_creds_array(&creds1);
creds2_array = get_creds_array(&creds2);
c->inner.num_creds = creds1_array.num_creds + creds2_array.num_creds;
- creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials *);
+ creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials*);
c->inner.creds_array =
- (grpc_call_credentials **)gpr_zalloc(creds_array_byte_size);
+ (grpc_call_credentials**)gpr_zalloc(creds_array_byte_size);
for (i = 0; i < creds1_array.num_creds; i++) {
- grpc_call_credentials *cur_creds = creds1_array.creds_array[i];
+ grpc_call_credentials* cur_creds = creds1_array.creds_array[i];
c->inner.creds_array[i] = grpc_call_credentials_ref(cur_creds);
}
for (i = 0; i < creds2_array.num_creds; i++) {
- grpc_call_credentials *cur_creds = creds2_array.creds_array[i];
+ grpc_call_credentials* cur_creds = creds2_array.creds_array[i];
c->inner.creds_array[i + creds1_array.num_creds] =
grpc_call_credentials_ref(cur_creds);
}
return &c->base;
}
-const grpc_call_credentials_array *
-grpc_composite_call_credentials_get_credentials(grpc_call_credentials *creds) {
- const grpc_composite_call_credentials *c =
- (const grpc_composite_call_credentials *)creds;
+const grpc_call_credentials_array*
+grpc_composite_call_credentials_get_credentials(grpc_call_credentials* creds) {
+ const grpc_composite_call_credentials* c =
+ (const grpc_composite_call_credentials*)creds;
GPR_ASSERT(strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0);
return &c->inner;
}
-grpc_call_credentials *grpc_credentials_contains_type(
- grpc_call_credentials *creds, const char *type,
- grpc_call_credentials **composite_creds) {
+grpc_call_credentials* grpc_credentials_contains_type(
+ grpc_call_credentials* creds, const char* type,
+ grpc_call_credentials** composite_creds) {
size_t i;
if (strcmp(creds->type, type) == 0) {
if (composite_creds != NULL) *composite_creds = NULL;
return creds;
} else if (strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0) {
- const grpc_call_credentials_array *inner_creds_array =
+ const grpc_call_credentials_array* inner_creds_array =
grpc_composite_call_credentials_get_credentials(creds);
for (i = 0; i < inner_creds_array->num_creds; i++) {
if (strcmp(type, inner_creds_array->creds_array[i]->type) == 0) {
@@ -200,21 +200,21 @@ grpc_call_credentials *grpc_credentials_contains_type(
/* -- Composite channel credentials. -- */
-static void composite_channel_destruct(grpc_exec_ctx *exec_ctx,
- grpc_channel_credentials *creds) {
- grpc_composite_channel_credentials *c =
- (grpc_composite_channel_credentials *)creds;
+static void composite_channel_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_channel_credentials* creds) {
+ grpc_composite_channel_credentials* c =
+ (grpc_composite_channel_credentials*)creds;
grpc_channel_credentials_unref(exec_ctx, c->inner_creds);
grpc_call_credentials_unref(exec_ctx, c->call_creds);
}
static grpc_security_status composite_channel_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds,
- grpc_call_credentials *call_creds, const char *target,
- const grpc_channel_args *args, grpc_channel_security_connector **sc,
- grpc_channel_args **new_args) {
- grpc_composite_channel_credentials *c =
- (grpc_composite_channel_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* creds,
+ grpc_call_credentials* call_creds, const char* target,
+ const grpc_channel_args* args, grpc_channel_security_connector** sc,
+ grpc_channel_args** new_args) {
+ grpc_composite_channel_credentials* c =
+ (grpc_composite_channel_credentials*)creds;
grpc_security_status status = GRPC_SECURITY_ERROR;
GPR_ASSERT(c->inner_creds != NULL && c->call_creds != NULL &&
@@ -223,7 +223,7 @@ static grpc_security_status composite_channel_create_security_connector(
/* If we are passed a call_creds, create a call composite to pass it
downstream. */
if (call_creds != NULL) {
- grpc_call_credentials *composite_call_creds =
+ grpc_call_credentials* composite_call_creds =
grpc_composite_call_credentials_create(c->call_creds, call_creds, NULL);
status = c->inner_creds->vtable->create_security_connector(
exec_ctx, c->inner_creds, composite_call_creds, target, args, sc,
@@ -236,11 +236,11 @@ static grpc_security_status composite_channel_create_security_connector(
return status;
}
-static grpc_channel_credentials *
+static grpc_channel_credentials*
composite_channel_duplicate_without_call_credentials(
- grpc_channel_credentials *creds) {
- grpc_composite_channel_credentials *c =
- (grpc_composite_channel_credentials *)creds;
+ grpc_channel_credentials* creds) {
+ grpc_composite_channel_credentials* c =
+ (grpc_composite_channel_credentials*)creds;
return grpc_channel_credentials_ref(c->inner_creds);
}
@@ -248,11 +248,11 @@ static grpc_channel_credentials_vtable composite_channel_credentials_vtable = {
composite_channel_destruct, composite_channel_create_security_connector,
composite_channel_duplicate_without_call_credentials};
-grpc_channel_credentials *grpc_composite_channel_credentials_create(
- grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds,
- void *reserved) {
- grpc_composite_channel_credentials *c =
- (grpc_composite_channel_credentials *)gpr_zalloc(sizeof(*c));
+grpc_channel_credentials* grpc_composite_channel_credentials_create(
+ grpc_channel_credentials* channel_creds, grpc_call_credentials* call_creds,
+ void* reserved) {
+ grpc_composite_channel_credentials* c =
+ (grpc_composite_channel_credentials*)gpr_zalloc(sizeof(*c));
GPR_ASSERT(channel_creds != NULL && call_creds != NULL && reserved == NULL);
GRPC_API_TRACE(
"grpc_composite_channel_credentials_create(channel_creds=%p, "
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.h b/src/core/lib/security/credentials/composite/composite_credentials.h
index 6e9f9a8f6f..efb5f4f0c4 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.h
+++ b/src/core/lib/security/credentials/composite/composite_credentials.h
@@ -26,28 +26,28 @@ extern "C" {
#endif
typedef struct {
- grpc_call_credentials **creds_array;
+ grpc_call_credentials** creds_array;
size_t num_creds;
} grpc_call_credentials_array;
-const grpc_call_credentials_array *
+const grpc_call_credentials_array*
grpc_composite_call_credentials_get_credentials(
- grpc_call_credentials *composite_creds);
+ grpc_call_credentials* composite_creds);
/* Returns creds if creds is of the specified type or the inner creds of the
specified type (if found), if the creds is of type COMPOSITE.
If composite_creds is not NULL, *composite_creds will point to creds if of
type COMPOSITE in case of success. */
-grpc_call_credentials *grpc_credentials_contains_type(
- grpc_call_credentials *creds, const char *type,
- grpc_call_credentials **composite_creds);
+grpc_call_credentials* grpc_credentials_contains_type(
+ grpc_call_credentials* creds, const char* type,
+ grpc_call_credentials** composite_creds);
/* -- Composite channel credentials. -- */
typedef struct {
grpc_channel_credentials base;
- grpc_channel_credentials *inner_creds;
- grpc_call_credentials *call_creds;
+ grpc_channel_credentials* inner_creds;
+ grpc_call_credentials* call_creds;
} grpc_composite_channel_credentials;
/* -- Composite call credentials. -- */
@@ -62,4 +62,4 @@ typedef struct {
#endif
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H \
- */
+ */
diff --git a/src/core/lib/security/credentials/credentials.cc b/src/core/lib/security/credentials/credentials.cc
index ebbf350865..e60d022f1b 100644
--- a/src/core/lib/security/credentials/credentials.cc
+++ b/src/core/lib/security/credentials/credentials.cc
@@ -37,31 +37,31 @@
/* -- Common. -- */
-grpc_credentials_metadata_request *grpc_credentials_metadata_request_create(
- grpc_call_credentials *creds) {
- grpc_credentials_metadata_request *r =
- (grpc_credentials_metadata_request *)gpr_zalloc(
+grpc_credentials_metadata_request* grpc_credentials_metadata_request_create(
+ grpc_call_credentials* creds) {
+ grpc_credentials_metadata_request* r =
+ (grpc_credentials_metadata_request*)gpr_zalloc(
sizeof(grpc_credentials_metadata_request));
r->creds = grpc_call_credentials_ref(creds);
return r;
}
void grpc_credentials_metadata_request_destroy(
- grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r) {
+ grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* r) {
grpc_call_credentials_unref(exec_ctx, r->creds);
grpc_http_response_destroy(&r->response);
gpr_free(r);
}
-grpc_channel_credentials *grpc_channel_credentials_ref(
- grpc_channel_credentials *creds) {
+grpc_channel_credentials* grpc_channel_credentials_ref(
+ grpc_channel_credentials* creds) {
if (creds == NULL) return NULL;
gpr_ref(&creds->refcount);
return creds;
}
-void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx,
- grpc_channel_credentials *creds) {
+void grpc_channel_credentials_unref(grpc_exec_ctx* exec_ctx,
+ grpc_channel_credentials* creds) {
if (creds == NULL) return;
if (gpr_unref(&creds->refcount)) {
if (creds->vtable->destruct != NULL) {
@@ -71,21 +71,21 @@ void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_channel_credentials_release(grpc_channel_credentials *creds) {
+void grpc_channel_credentials_release(grpc_channel_credentials* creds) {
GRPC_API_TRACE("grpc_channel_credentials_release(creds=%p)", 1, (creds));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_channel_credentials_unref(&exec_ctx, creds);
grpc_exec_ctx_finish(&exec_ctx);
}
-grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds) {
+grpc_call_credentials* grpc_call_credentials_ref(grpc_call_credentials* creds) {
if (creds == NULL) return NULL;
gpr_ref(&creds->refcount);
return creds;
}
-void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
+void grpc_call_credentials_unref(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
if (creds == NULL) return;
if (gpr_unref(&creds->refcount)) {
if (creds->vtable->destruct != NULL) {
@@ -95,7 +95,7 @@ void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_call_credentials_release(grpc_call_credentials *creds) {
+void grpc_call_credentials_release(grpc_call_credentials* creds) {
GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call_credentials_unref(&exec_ctx, creds);
@@ -103,10 +103,10 @@ void grpc_call_credentials_release(grpc_call_credentials *creds) {
}
bool grpc_call_credentials_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_polling_entity *pollent, grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata,
- grpc_error **error) {
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_polling_entity* pollent, grpc_auth_metadata_context context,
+ grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
+ grpc_error** error) {
if (creds == NULL || creds->vtable->get_request_metadata == NULL) {
return true;
}
@@ -115,8 +115,8 @@ bool grpc_call_credentials_get_request_metadata(
}
void grpc_call_credentials_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
if (creds == NULL || creds->vtable->cancel_get_request_metadata == NULL) {
return;
}
@@ -124,9 +124,9 @@ void grpc_call_credentials_cancel_get_request_metadata(
}
grpc_security_status grpc_channel_credentials_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds,
- const char *target, const grpc_channel_args *args,
- grpc_channel_security_connector **sc, grpc_channel_args **new_args) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* channel_creds,
+ const char* target, const grpc_channel_args* args,
+ grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
*new_args = NULL;
if (channel_creds == NULL) {
return GRPC_SECURITY_ERROR;
@@ -136,9 +136,9 @@ grpc_security_status grpc_channel_credentials_create_security_connector(
exec_ctx, channel_creds, NULL, target, args, sc, new_args);
}
-grpc_channel_credentials *
+grpc_channel_credentials*
grpc_channel_credentials_duplicate_without_call_credentials(
- grpc_channel_credentials *channel_creds) {
+ grpc_channel_credentials* channel_creds) {
if (channel_creds != NULL && channel_creds->vtable != NULL &&
channel_creds->vtable->duplicate_without_call_credentials != NULL) {
return channel_creds->vtable->duplicate_without_call_credentials(
@@ -148,59 +148,59 @@ grpc_channel_credentials_duplicate_without_call_credentials(
}
}
-static void credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_channel_credentials_unref(exec_ctx, (grpc_channel_credentials *)p);
+static void credentials_pointer_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+ grpc_channel_credentials_unref(exec_ctx, (grpc_channel_credentials*)p);
}
-static void *credentials_pointer_arg_copy(void *p) {
- return grpc_channel_credentials_ref((grpc_channel_credentials *)p);
+static void* credentials_pointer_arg_copy(void* p) {
+ return grpc_channel_credentials_ref((grpc_channel_credentials*)p);
}
-static int credentials_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }
+static int credentials_pointer_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
static const grpc_arg_pointer_vtable credentials_pointer_vtable = {
credentials_pointer_arg_copy, credentials_pointer_arg_destroy,
credentials_pointer_cmp};
grpc_arg grpc_channel_credentials_to_arg(
- grpc_channel_credentials *credentials) {
- return grpc_channel_arg_pointer_create((char *)GRPC_ARG_CHANNEL_CREDENTIALS,
+ grpc_channel_credentials* credentials) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CHANNEL_CREDENTIALS,
credentials,
&credentials_pointer_vtable);
}
-grpc_channel_credentials *grpc_channel_credentials_from_arg(
- const grpc_arg *arg) {
+grpc_channel_credentials* grpc_channel_credentials_from_arg(
+ const grpc_arg* arg) {
if (strcmp(arg->key, GRPC_ARG_CHANNEL_CREDENTIALS)) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
GRPC_ARG_CHANNEL_CREDENTIALS);
return NULL;
}
- return (grpc_channel_credentials *)arg->value.pointer.p;
+ return (grpc_channel_credentials*)arg->value.pointer.p;
}
-grpc_channel_credentials *grpc_channel_credentials_find_in_args(
- const grpc_channel_args *args) {
+grpc_channel_credentials* grpc_channel_credentials_find_in_args(
+ const grpc_channel_args* args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
- grpc_channel_credentials *credentials =
+ grpc_channel_credentials* credentials =
grpc_channel_credentials_from_arg(&args->args[i]);
if (credentials != NULL) return credentials;
}
return NULL;
}
-grpc_server_credentials *grpc_server_credentials_ref(
- grpc_server_credentials *creds) {
+grpc_server_credentials* grpc_server_credentials_ref(
+ grpc_server_credentials* creds) {
if (creds == NULL) return NULL;
gpr_ref(&creds->refcount);
return creds;
}
-void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx,
- grpc_server_credentials *creds) {
+void grpc_server_credentials_unref(grpc_exec_ctx* exec_ctx,
+ grpc_server_credentials* creds) {
if (creds == NULL) return;
if (gpr_unref(&creds->refcount)) {
if (creds->vtable->destruct != NULL) {
@@ -213,7 +213,7 @@ void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_server_credentials_release(grpc_server_credentials *creds) {
+void grpc_server_credentials_release(grpc_server_credentials* creds) {
GRPC_API_TRACE("grpc_server_credentials_release(creds=%p)", 1, (creds));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_server_credentials_unref(&exec_ctx, creds);
@@ -221,8 +221,8 @@ void grpc_server_credentials_release(grpc_server_credentials *creds) {
}
grpc_security_status grpc_server_credentials_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds,
- grpc_server_security_connector **sc) {
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* creds,
+ grpc_server_security_connector** sc) {
if (creds == NULL || creds->vtable->create_security_connector == NULL) {
gpr_log(GPR_ERROR, "Server credentials cannot create security context.");
return GRPC_SECURITY_ERROR;
@@ -231,12 +231,12 @@ grpc_security_status grpc_server_credentials_create_security_connector(
}
void grpc_server_credentials_set_auth_metadata_processor(
- grpc_server_credentials *creds, grpc_auth_metadata_processor processor) {
+ grpc_server_credentials* creds, grpc_auth_metadata_processor processor) {
GRPC_API_TRACE(
"grpc_server_credentials_set_auth_metadata_processor("
"creds=%p, "
"processor=grpc_auth_metadata_processor { process: %p, state: %p })",
- 3, (creds, (void *)(intptr_t)processor.process, processor.state));
+ 3, (creds, (void*)(intptr_t)processor.process, processor.state));
if (creds == NULL) return;
if (creds->processor.destroy != NULL && creds->processor.state != NULL) {
creds->processor.destroy(creds->processor.state);
@@ -244,16 +244,16 @@ void grpc_server_credentials_set_auth_metadata_processor(
creds->processor = processor;
}
-static void server_credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx,
- void *p) {
- grpc_server_credentials_unref(exec_ctx, (grpc_server_credentials *)p);
+static void server_credentials_pointer_arg_destroy(grpc_exec_ctx* exec_ctx,
+ void* p) {
+ grpc_server_credentials_unref(exec_ctx, (grpc_server_credentials*)p);
}
-static void *server_credentials_pointer_arg_copy(void *p) {
- return grpc_server_credentials_ref((grpc_server_credentials *)p);
+static void* server_credentials_pointer_arg_copy(void* p) {
+ return grpc_server_credentials_ref((grpc_server_credentials*)p);
}
-static int server_credentials_pointer_cmp(void *a, void *b) {
+static int server_credentials_pointer_cmp(void* a, void* b) {
return GPR_ICMP(a, b);
}
@@ -261,27 +261,27 @@ static const grpc_arg_pointer_vtable cred_ptr_vtable = {
server_credentials_pointer_arg_copy, server_credentials_pointer_arg_destroy,
server_credentials_pointer_cmp};
-grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
- return grpc_channel_arg_pointer_create((char *)GRPC_SERVER_CREDENTIALS_ARG, p,
+grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials* p) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_SERVER_CREDENTIALS_ARG, p,
&cred_ptr_vtable);
}
-grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg) {
+grpc_server_credentials* grpc_server_credentials_from_arg(const grpc_arg* arg) {
if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
GRPC_SERVER_CREDENTIALS_ARG);
return NULL;
}
- return (grpc_server_credentials *)arg->value.pointer.p;
+ return (grpc_server_credentials*)arg->value.pointer.p;
}
-grpc_server_credentials *grpc_find_server_credentials_in_args(
- const grpc_channel_args *args) {
+grpc_server_credentials* grpc_find_server_credentials_in_args(
+ const grpc_channel_args* args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
- grpc_server_credentials *p =
+ grpc_server_credentials* p =
grpc_server_credentials_from_arg(&args->args[i]);
if (p != NULL) return p;
}
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index 73e39ae039..c65b9660ea 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -77,13 +77,13 @@ typedef enum {
/* --- Google utils --- */
/* It is the caller's responsibility to gpr_free the result if not NULL. */
-char *grpc_get_well_known_google_credentials_file_path(void);
+char* grpc_get_well_known_google_credentials_file_path(void);
/* Implementation function for the different platforms. */
-char *grpc_get_well_known_google_credentials_file_path_impl(void);
+char* grpc_get_well_known_google_credentials_file_path_impl(void);
/* Override for testing only. Not thread-safe */
-typedef char *(*grpc_well_known_credentials_path_getter)(void);
+typedef char* (*grpc_well_known_credentials_path_getter)(void);
void grpc_override_well_known_credentials_path_getter(
grpc_well_known_credentials_path_getter getter);
@@ -92,169 +92,169 @@ void grpc_override_well_known_credentials_path_getter(
#define GRPC_ARG_CHANNEL_CREDENTIALS "grpc.channel_credentials"
typedef struct {
- void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c);
+ void (*destruct)(grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c);
grpc_security_status (*create_security_connector)(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c,
- grpc_call_credentials *call_creds, const char *target,
- const grpc_channel_args *args, grpc_channel_security_connector **sc,
- grpc_channel_args **new_args);
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c,
+ grpc_call_credentials* call_creds, const char* target,
+ const grpc_channel_args* args, grpc_channel_security_connector** sc,
+ grpc_channel_args** new_args);
- grpc_channel_credentials *(*duplicate_without_call_credentials)(
- grpc_channel_credentials *c);
+ grpc_channel_credentials* (*duplicate_without_call_credentials)(
+ grpc_channel_credentials* c);
} grpc_channel_credentials_vtable;
struct grpc_channel_credentials {
- const grpc_channel_credentials_vtable *vtable;
- const char *type;
+ const grpc_channel_credentials_vtable* vtable;
+ const char* type;
gpr_refcount refcount;
};
-grpc_channel_credentials *grpc_channel_credentials_ref(
- grpc_channel_credentials *creds);
-void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx,
- grpc_channel_credentials *creds);
+grpc_channel_credentials* grpc_channel_credentials_ref(
+ grpc_channel_credentials* creds);
+void grpc_channel_credentials_unref(grpc_exec_ctx* exec_ctx,
+ grpc_channel_credentials* creds);
/* Creates a security connector for the channel. May also create new channel
args for the channel to be used in place of the passed in const args if
returned non NULL. In that case the caller is responsible for destroying
new_args after channel creation. */
grpc_security_status grpc_channel_credentials_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds,
- const char *target, const grpc_channel_args *args,
- grpc_channel_security_connector **sc, grpc_channel_args **new_args);
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* creds,
+ const char* target, const grpc_channel_args* args,
+ grpc_channel_security_connector** sc, grpc_channel_args** new_args);
/* Creates a version of the channel credentials without any attached call
credentials. This can be used in order to open a channel to a non-trusted
gRPC load balancer. */
-grpc_channel_credentials *
+grpc_channel_credentials*
grpc_channel_credentials_duplicate_without_call_credentials(
- grpc_channel_credentials *creds);
+ grpc_channel_credentials* creds);
/* Util to encapsulate the channel credentials in a channel arg. */
-grpc_arg grpc_channel_credentials_to_arg(grpc_channel_credentials *credentials);
+grpc_arg grpc_channel_credentials_to_arg(grpc_channel_credentials* credentials);
/* Util to get the channel credentials from a channel arg. */
-grpc_channel_credentials *grpc_channel_credentials_from_arg(
- const grpc_arg *arg);
+grpc_channel_credentials* grpc_channel_credentials_from_arg(
+ const grpc_arg* arg);
/* Util to find the channel credentials from channel args. */
-grpc_channel_credentials *grpc_channel_credentials_find_in_args(
- const grpc_channel_args *args);
+grpc_channel_credentials* grpc_channel_credentials_find_in_args(
+ const grpc_channel_args* args);
/* --- grpc_credentials_mdelem_array. --- */
typedef struct {
- grpc_mdelem *md;
+ grpc_mdelem* md;
size_t size;
} grpc_credentials_mdelem_array;
/// Takes a new ref to \a md.
-void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array *list,
+void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array* list,
grpc_mdelem md);
/// Appends all elements from \a src to \a dst, taking a new ref to each one.
-void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst,
- grpc_credentials_mdelem_array *src);
+void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array* dst,
+ grpc_credentials_mdelem_array* src);
-void grpc_credentials_mdelem_array_destroy(grpc_exec_ctx *exec_ctx,
- grpc_credentials_mdelem_array *list);
+void grpc_credentials_mdelem_array_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_credentials_mdelem_array* list);
/* --- grpc_call_credentials. --- */
typedef struct {
- void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_call_credentials *c);
- bool (*get_request_metadata)(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *c,
- grpc_polling_entity *pollent,
+ void (*destruct)(grpc_exec_ctx* exec_ctx, grpc_call_credentials* c);
+ bool (*get_request_metadata)(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* c,
+ grpc_polling_entity* pollent,
grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array,
- grpc_closure *on_request_metadata,
- grpc_error **error);
- void (*cancel_get_request_metadata)(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *c,
- grpc_credentials_mdelem_array *md_array,
- grpc_error *error);
+ grpc_credentials_mdelem_array* md_array,
+ grpc_closure* on_request_metadata,
+ grpc_error** error);
+ void (*cancel_get_request_metadata)(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* c,
+ grpc_credentials_mdelem_array* md_array,
+ grpc_error* error);
} grpc_call_credentials_vtable;
struct grpc_call_credentials {
- const grpc_call_credentials_vtable *vtable;
- const char *type;
+ const grpc_call_credentials_vtable* vtable;
+ const char* type;
gpr_refcount refcount;
};
-grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds);
-void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds);
+grpc_call_credentials* grpc_call_credentials_ref(grpc_call_credentials* creds);
+void grpc_call_credentials_unref(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds);
/// Returns true if completed synchronously, in which case \a error will
/// be set to indicate the result. Otherwise, \a on_request_metadata will
/// be invoked asynchronously when complete. \a md_array will be populated
/// with the resulting metadata once complete.
bool grpc_call_credentials_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_polling_entity *pollent, grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata,
- grpc_error **error);
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_polling_entity* pollent, grpc_auth_metadata_context context,
+ grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
+ grpc_error** error);
/// Cancels a pending asynchronous operation started by
/// grpc_call_credentials_get_request_metadata() with the corresponding
/// value of \a md_array.
void grpc_call_credentials_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *c,
- grpc_credentials_mdelem_array *md_array, grpc_error *error);
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error);
/* Metadata-only credentials with the specified key and value where
asynchronicity can be simulated for testing. */
-grpc_call_credentials *grpc_md_only_test_credentials_create(
- grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value,
+grpc_call_credentials* grpc_md_only_test_credentials_create(
+ grpc_exec_ctx* exec_ctx, const char* md_key, const char* md_value,
bool is_async);
/* --- grpc_server_credentials. --- */
typedef struct {
- void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_server_credentials *c);
+ void (*destruct)(grpc_exec_ctx* exec_ctx, grpc_server_credentials* c);
grpc_security_status (*create_security_connector)(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *c,
- grpc_server_security_connector **sc);
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* c,
+ grpc_server_security_connector** sc);
} grpc_server_credentials_vtable;
struct grpc_server_credentials {
- const grpc_server_credentials_vtable *vtable;
- const char *type;
+ const grpc_server_credentials_vtable* vtable;
+ const char* type;
gpr_refcount refcount;
grpc_auth_metadata_processor processor;
};
grpc_security_status grpc_server_credentials_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds,
- grpc_server_security_connector **sc);
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* creds,
+ grpc_server_security_connector** sc);
-grpc_server_credentials *grpc_server_credentials_ref(
- grpc_server_credentials *creds);
+grpc_server_credentials* grpc_server_credentials_ref(
+ grpc_server_credentials* creds);
-void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx,
- grpc_server_credentials *creds);
+void grpc_server_credentials_unref(grpc_exec_ctx* exec_ctx,
+ grpc_server_credentials* creds);
#define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials"
-grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *c);
-grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg);
-grpc_server_credentials *grpc_find_server_credentials_in_args(
- const grpc_channel_args *args);
+grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials* c);
+grpc_server_credentials* grpc_server_credentials_from_arg(const grpc_arg* arg);
+grpc_server_credentials* grpc_find_server_credentials_in_args(
+ const grpc_channel_args* args);
/* -- Credentials Metadata Request. -- */
typedef struct {
- grpc_call_credentials *creds;
+ grpc_call_credentials* creds;
grpc_http_response response;
} grpc_credentials_metadata_request;
-grpc_credentials_metadata_request *grpc_credentials_metadata_request_create(
- grpc_call_credentials *creds);
+grpc_credentials_metadata_request* grpc_credentials_metadata_request_create(
+ grpc_call_credentials* creds);
void grpc_credentials_metadata_request_destroy(
- grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r);
+ grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* r);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/credentials/credentials_metadata.cc b/src/core/lib/security/credentials/credentials_metadata.cc
index 5ba98bda4e..a3623fa1d6 100644
--- a/src/core/lib/security/credentials/credentials_metadata.cc
+++ b/src/core/lib/security/credentials/credentials_metadata.cc
@@ -24,7 +24,7 @@
#include "src/core/lib/slice/slice_internal.h"
-static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array *list,
+static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array* list,
size_t additional_space_needed) {
size_t target_size = list->size + additional_space_needed;
// Find the next power of two greater than the target size (i.e.,
@@ -34,17 +34,17 @@ static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array *list,
new_size *= 2;
}
list->md =
- (grpc_mdelem *)gpr_realloc(list->md, sizeof(grpc_mdelem) * new_size);
+ (grpc_mdelem*)gpr_realloc(list->md, sizeof(grpc_mdelem) * new_size);
}
-void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array *list,
+void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array* list,
grpc_mdelem md) {
mdelem_list_ensure_capacity(list, 1);
list->md[list->size++] = GRPC_MDELEM_REF(md);
}
-void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst,
- grpc_credentials_mdelem_array *src) {
+void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array* dst,
+ grpc_credentials_mdelem_array* src) {
mdelem_list_ensure_capacity(dst, src->size);
for (size_t i = 0; i < src->size; ++i) {
dst->md[dst->size++] = GRPC_MDELEM_REF(src->md[i]);
@@ -52,7 +52,7 @@ void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst,
}
void grpc_credentials_mdelem_array_destroy(
- grpc_exec_ctx *exec_ctx, grpc_credentials_mdelem_array *list) {
+ grpc_exec_ctx* exec_ctx, grpc_credentials_mdelem_array* list) {
for (size_t i = 0; i < list->size; ++i) {
GRPC_MDELEM_UNREF(exec_ctx, list->md[i]);
}
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.cc b/src/core/lib/security/credentials/fake/fake_credentials.cc
index cf10bf24c8..17700f5651 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.cc
+++ b/src/core/lib/security/credentials/fake/fake_credentials.cc
@@ -34,10 +34,10 @@
"grpc.fake_security.expected_targets"
static grpc_security_status fake_transport_security_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c,
- grpc_call_credentials *call_creds, const char *target,
- const grpc_channel_args *args, grpc_channel_security_connector **sc,
- grpc_channel_args **new_args) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c,
+ grpc_call_credentials* call_creds, const char* target,
+ const grpc_channel_args* args, grpc_channel_security_connector** sc,
+ grpc_channel_args** new_args) {
*sc =
grpc_fake_channel_security_connector_create(c, call_creds, target, args);
return GRPC_SECURITY_OK;
@@ -45,8 +45,8 @@ static grpc_security_status fake_transport_security_create_security_connector(
static grpc_security_status
fake_transport_security_server_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *c,
- grpc_server_security_connector **sc) {
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* c,
+ grpc_server_security_connector** sc) {
*sc = grpc_fake_server_security_connector_create(c);
return GRPC_SECURITY_OK;
}
@@ -59,20 +59,20 @@ static grpc_server_credentials_vtable
fake_transport_security_server_credentials_vtable = {
NULL, fake_transport_security_server_create_security_connector};
-grpc_channel_credentials *grpc_fake_transport_security_credentials_create(
+grpc_channel_credentials* grpc_fake_transport_security_credentials_create(
void) {
- grpc_channel_credentials *c =
- (grpc_channel_credentials *)gpr_zalloc(sizeof(grpc_channel_credentials));
+ grpc_channel_credentials* c =
+ (grpc_channel_credentials*)gpr_zalloc(sizeof(grpc_channel_credentials));
c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
c->vtable = &fake_transport_security_credentials_vtable;
gpr_ref_init(&c->refcount, 1);
return c;
}
-grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
+grpc_server_credentials* grpc_fake_transport_security_server_credentials_create(
void) {
- grpc_server_credentials *c =
- (grpc_server_credentials *)gpr_malloc(sizeof(grpc_server_credentials));
+ grpc_server_credentials* c =
+ (grpc_server_credentials*)gpr_malloc(sizeof(grpc_server_credentials));
memset(c, 0, sizeof(grpc_server_credentials));
c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY;
gpr_ref_init(&c->refcount, 1);
@@ -80,14 +80,14 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
return c;
}
-grpc_arg grpc_fake_transport_expected_targets_arg(char *expected_targets) {
+grpc_arg grpc_fake_transport_expected_targets_arg(char* expected_targets) {
return grpc_channel_arg_string_create(
- (char *)GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
+ (char*)GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
}
-const char *grpc_fake_transport_get_expected_targets(
- const grpc_channel_args *args) {
- const grpc_arg *expected_target_arg =
+const char* grpc_fake_transport_get_expected_targets(
+ const grpc_channel_args* args) {
+ const grpc_arg* expected_target_arg =
grpc_channel_args_find(args, GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS);
if (expected_target_arg != NULL &&
expected_target_arg->type == GRPC_ARG_STRING) {
@@ -98,18 +98,18 @@ const char *grpc_fake_transport_get_expected_targets(
/* -- Metadata-only test credentials. -- */
-static void md_only_test_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
+static void md_only_test_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)creds;
GRPC_MDELEM_UNREF(exec_ctx, c->md);
}
static bool md_only_test_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_polling_entity *pollent, grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_polling_entity* pollent, grpc_auth_metadata_context context,
+ grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)creds;
grpc_credentials_mdelem_array_add(md_array, c->md);
if (c->is_async) {
GRPC_CLOSURE_SCHED(exec_ctx, on_request_metadata, GRPC_ERROR_NONE);
@@ -119,8 +119,8 @@ static bool md_only_test_get_request_metadata(
}
static void md_only_test_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *c,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
@@ -128,12 +128,11 @@ static grpc_call_credentials_vtable md_only_test_vtable = {
md_only_test_destruct, md_only_test_get_request_metadata,
md_only_test_cancel_get_request_metadata};
-grpc_call_credentials *grpc_md_only_test_credentials_create(
- grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value,
+grpc_call_credentials* grpc_md_only_test_credentials_create(
+ grpc_exec_ctx* exec_ctx, const char* md_key, const char* md_value,
bool is_async) {
- grpc_md_only_test_credentials *c =
- (grpc_md_only_test_credentials *)gpr_zalloc(
- sizeof(grpc_md_only_test_credentials));
+ grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)gpr_zalloc(
+ sizeof(grpc_md_only_test_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &md_only_test_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.h b/src/core/lib/security/credentials/fake/fake_credentials.h
index ed3f893c58..b8b58cc8fd 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.h
+++ b/src/core/lib/security/credentials/fake/fake_credentials.h
@@ -28,10 +28,10 @@ extern "C" {
/* -- Fake transport security credentials. -- */
/* Creates a fake transport security credentials object for testing. */
-grpc_channel_credentials *grpc_fake_transport_security_credentials_create(void);
+grpc_channel_credentials* grpc_fake_transport_security_credentials_create(void);
/* Creates a fake server transport security credentials object for testing. */
-grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
+grpc_server_credentials* grpc_fake_transport_security_server_credentials_create(
void);
/* Used to verify the target names given to the fake transport security
@@ -46,11 +46,11 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create(
* That is to say, LB channels have a heading list of LB targets separated from
* the list of backend targets by a semicolon. For non-LB channels, only the
* latter is present. */
-grpc_arg grpc_fake_transport_expected_targets_arg(char *expected_targets);
+grpc_arg grpc_fake_transport_expected_targets_arg(char* expected_targets);
/* Return the value associated with the expected targets channel arg or NULL */
-const char *grpc_fake_transport_get_expected_targets(
- const grpc_channel_args *args);
+const char* grpc_fake_transport_get_expected_targets(
+ const grpc_channel_args* args);
/* -- Metadata-only Test credentials. -- */
diff --git a/src/core/lib/security/credentials/google_default/credentials_generic.cc b/src/core/lib/security/credentials/google_default/credentials_generic.cc
index 4f79718f3d..c2a336ff07 100644
--- a/src/core/lib/security/credentials/google_default/credentials_generic.cc
+++ b/src/core/lib/security/credentials/google_default/credentials_generic.cc
@@ -25,9 +25,9 @@
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
-char *grpc_get_well_known_google_credentials_file_path_impl(void) {
- char *result = NULL;
- char *base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR);
+char* grpc_get_well_known_google_credentials_file_path_impl(void) {
+ char* result = NULL;
+ char* base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR);
if (base == NULL) {
gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_ENV_VAR
" environment variable.");
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
index 5b2ddceb4a..3ce19e9a05 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
@@ -43,10 +43,10 @@
/* -- Default credentials. -- */
-static grpc_channel_credentials *default_credentials = NULL;
+static grpc_channel_credentials* default_credentials = NULL;
static int compute_engine_detection_done = 0;
static gpr_mu g_state_mu;
-static gpr_mu *g_polling_mu;
+static gpr_mu* g_polling_mu;
static gpr_once g_once = GPR_ONCE_INIT;
static void init_default_credentials(void) { gpr_mu_init(&g_state_mu); }
@@ -58,17 +58,17 @@ typedef struct {
grpc_http_response response;
} compute_engine_detector;
-static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_error *error) {
- compute_engine_detector *detector = (compute_engine_detector *)user_data;
+static void on_compute_engine_detection_http_response(grpc_exec_ctx* exec_ctx,
+ void* user_data,
+ grpc_error* error) {
+ compute_engine_detector* detector = (compute_engine_detector*)user_data;
if (error == GRPC_ERROR_NONE && detector->response.status == 200 &&
detector->response.hdr_count > 0) {
/* Internet providers can return a generic response to all requests, so
it is necessary to check that metadata header is present also. */
size_t i;
for (i = 0; i < detector->response.hdr_count; i++) {
- grpc_http_header *header = &detector->response.hdrs[i];
+ grpc_http_header* header = &detector->response.hdrs[i];
if (strcmp(header->key, "Metadata-Flavor") == 0 &&
strcmp(header->value, "Google") == 0) {
detector->success = 1;
@@ -85,11 +85,11 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(g_polling_mu);
}
-static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, grpc_error *e) {
- grpc_pollset_destroy(exec_ctx, (grpc_pollset *)p);
+static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p, grpc_error* e) {
+ grpc_pollset_destroy(exec_ctx, (grpc_pollset*)p);
}
-static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
+static int is_stack_running_on_compute_engine(grpc_exec_ctx* exec_ctx) {
compute_engine_detector detector;
grpc_httpcli_request request;
grpc_httpcli_context context;
@@ -99,7 +99,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
on compute engine. */
grpc_millis max_detection_delay = GPR_MS_PER_SEC;
- grpc_pollset *pollset = (grpc_pollset *)gpr_zalloc(grpc_pollset_size());
+ grpc_pollset* pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
grpc_pollset_init(pollset, &g_polling_mu);
detector.pollent = grpc_polling_entity_create_from_pollset(pollset);
detector.is_done = 0;
@@ -107,12 +107,12 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
memset(&detector.response, 0, sizeof(detector.response));
memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = (char *)GRPC_COMPUTE_ENGINE_DETECTION_HOST;
- request.http.path = (char *)"/";
+ request.host = (char*)GRPC_COMPUTE_ENGINE_DETECTION_HOST;
+ request.http.path = (char*)"/";
grpc_httpcli_context_init(&context);
- grpc_resource_quota *resource_quota =
+ grpc_resource_quota* resource_quota =
grpc_resource_quota_create("google_default_credentials");
grpc_httpcli_get(
exec_ctx, &context, &detector.pollent, resource_quota, &request,
@@ -128,7 +128,7 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(g_polling_mu);
while (!detector.is_done) {
- grpc_pollset_worker *worker = NULL;
+ grpc_pollset_worker* worker = NULL;
if (!GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(exec_ctx,
@@ -157,14 +157,14 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) {
}
/* Takes ownership of creds_path if not NULL. */
-static grpc_error *create_default_creds_from_path(
- grpc_exec_ctx *exec_ctx, char *creds_path, grpc_call_credentials **creds) {
- grpc_json *json = NULL;
+static grpc_error* create_default_creds_from_path(
+ grpc_exec_ctx* exec_ctx, char* creds_path, grpc_call_credentials** creds) {
+ grpc_json* json = NULL;
grpc_auth_json_key key;
grpc_auth_refresh_token token;
- grpc_call_credentials *result = NULL;
+ grpc_call_credentials* result = NULL;
grpc_slice creds_data = grpc_empty_slice();
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (creds_path == NULL) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("creds_path unset");
goto end;
@@ -174,7 +174,7 @@ static grpc_error *create_default_creds_from_path(
goto end;
}
json = grpc_json_parse_string_with_len(
- (char *)GRPC_SLICE_START_PTR(creds_data), GRPC_SLICE_LENGTH(creds_data));
+ (char*)GRPC_SLICE_START_PTR(creds_data), GRPC_SLICE_LENGTH(creds_data));
if (json == NULL) {
error = grpc_error_set_str(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to parse JSON"),
@@ -218,12 +218,12 @@ end:
return error;
}
-grpc_channel_credentials *grpc_google_default_credentials_create(void) {
- grpc_channel_credentials *result = NULL;
- grpc_call_credentials *call_creds = NULL;
- grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+grpc_channel_credentials* grpc_google_default_credentials_create(void) {
+ grpc_channel_credentials* result = NULL;
+ grpc_call_credentials* call_creds = NULL;
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to create Google credentials");
- grpc_error *err;
+ grpc_error* err;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE("grpc_google_default_credentials_create(void)", 0, ());
@@ -272,7 +272,7 @@ end:
/* Blend with default ssl credentials and add a global reference so that
it
can be cached and re-served. */
- grpc_channel_credentials *ssl_creds =
+ grpc_channel_credentials* ssl_creds =
grpc_ssl_credentials_create(NULL, NULL, NULL);
default_credentials = grpc_channel_credentials_ref(
grpc_composite_channel_credentials_create(ssl_creds, call_creds,
@@ -312,7 +312,7 @@ void grpc_flush_cached_google_default_credentials(void) {
static grpc_well_known_credentials_path_getter creds_path_getter = NULL;
-char *grpc_get_well_known_google_credentials_file_path(void) {
+char* grpc_get_well_known_google_credentials_file_path(void) {
if (creds_path_getter != NULL) return creds_path_getter();
return grpc_get_well_known_google_credentials_file_path_impl();
}
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index 66677873ca..a0f8dc954e 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -50,4 +50,4 @@ void grpc_flush_cached_google_default_credentials(void);
#endif
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \
- */
+ */
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.cc b/src/core/lib/security/credentials/iam/iam_credentials.cc
index e9cf208c16..7410294a20 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.cc
+++ b/src/core/lib/security/credentials/iam/iam_credentials.cc
@@ -27,35 +27,35 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-static void iam_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds;
+static void iam_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_google_iam_credentials* c = (grpc_google_iam_credentials*)creds;
grpc_credentials_mdelem_array_destroy(exec_ctx, &c->md_array);
}
-static bool iam_get_request_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds,
- grpc_polling_entity *pollent,
+static bool iam_get_request_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds,
+ grpc_polling_entity* pollent,
grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array,
- grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds;
+ grpc_credentials_mdelem_array* md_array,
+ grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_google_iam_credentials* c = (grpc_google_iam_credentials*)creds;
grpc_credentials_mdelem_array_append(md_array, &c->md_array);
return true;
}
static void iam_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *c,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
static grpc_call_credentials_vtable iam_vtable = {
iam_destruct, iam_get_request_metadata, iam_cancel_get_request_metadata};
-grpc_call_credentials *grpc_google_iam_credentials_create(
- const char *token, const char *authority_selector, void *reserved) {
+grpc_call_credentials* grpc_google_iam_credentials_create(
+ const char* token, const char* authority_selector, void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE(
"grpc_iam_credentials_create(token=%s, authority_selector=%s, "
@@ -64,8 +64,8 @@ grpc_call_credentials *grpc_google_iam_credentials_create(
GPR_ASSERT(reserved == NULL);
GPR_ASSERT(token != NULL);
GPR_ASSERT(authority_selector != NULL);
- grpc_google_iam_credentials *c =
- (grpc_google_iam_credentials *)gpr_zalloc(sizeof(*c));
+ grpc_google_iam_credentials* c =
+ (grpc_google_iam_credentials*)gpr_zalloc(sizeof(*c));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_IAM;
c->base.vtable = &iam_vtable;
gpr_ref_init(&c->base.refcount, 1);
diff --git a/src/core/lib/security/credentials/jwt/json_token.cc b/src/core/lib/security/credentials/jwt/json_token.cc
index 8c30353470..e195ec7509 100644
--- a/src/core/lib/security/credentials/jwt/json_token.cc
+++ b/src/core/lib/security/credentials/jwt/json_token.cc
@@ -56,15 +56,15 @@ static grpc_jwt_encode_and_sign_override g_jwt_encode_and_sign_override = NULL;
/* --- grpc_auth_json_key. --- */
-int grpc_auth_json_key_is_valid(const grpc_auth_json_key *json_key) {
+int grpc_auth_json_key_is_valid(const grpc_auth_json_key* json_key) {
return (json_key != NULL) &&
strcmp(json_key->type, GRPC_AUTH_JSON_TYPE_INVALID);
}
-grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json) {
+grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json* json) {
grpc_auth_json_key result;
- BIO *bio = NULL;
- const char *prop_value;
+ BIO* bio = NULL;
+ const char* prop_value;
int success = 0;
memset(&result, 0, sizeof(grpc_auth_json_key));
@@ -99,7 +99,7 @@ grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json) {
gpr_log(GPR_ERROR, "Could not write into openssl BIO.");
goto end;
}
- result.private_key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, (void *)"");
+ result.private_key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, (void*)"");
if (result.private_key == NULL) {
gpr_log(GPR_ERROR, "Could not deserialize private key.");
goto end;
@@ -113,16 +113,16 @@ end:
}
grpc_auth_json_key grpc_auth_json_key_create_from_string(
- const char *json_string) {
- char *scratchpad = gpr_strdup(json_string);
- grpc_json *json = grpc_json_parse_string(scratchpad);
+ const char* json_string) {
+ char* scratchpad = gpr_strdup(json_string);
+ grpc_json* json = grpc_json_parse_string(scratchpad);
grpc_auth_json_key result = grpc_auth_json_key_create_from_json(json);
if (json != NULL) grpc_json_destroy(json);
gpr_free(scratchpad);
return result;
}
-void grpc_auth_json_key_destruct(grpc_auth_json_key *json_key) {
+void grpc_auth_json_key_destruct(grpc_auth_json_key* json_key) {
if (json_key == NULL) return;
json_key->type = GRPC_AUTH_JSON_TYPE_INVALID;
if (json_key->client_id != NULL) {
@@ -145,10 +145,10 @@ void grpc_auth_json_key_destruct(grpc_auth_json_key *json_key) {
/* --- jwt encoding and signature. --- */
-static grpc_json *create_child(grpc_json *brother, grpc_json *parent,
- const char *key, const char *value,
+static grpc_json* create_child(grpc_json* brother, grpc_json* parent,
+ const char* key, const char* value,
grpc_json_type type) {
- grpc_json *child = grpc_json_create(type);
+ grpc_json* child = grpc_json_create(type);
if (brother) brother->next = child;
if (!parent->child) parent->child = child;
child->parent = parent;
@@ -157,11 +157,11 @@ static grpc_json *create_child(grpc_json *brother, grpc_json *parent,
return child;
}
-static char *encoded_jwt_header(const char *key_id, const char *algorithm) {
- grpc_json *json = grpc_json_create(GRPC_JSON_OBJECT);
- grpc_json *child = NULL;
- char *json_str = NULL;
- char *result = NULL;
+static char* encoded_jwt_header(const char* key_id, const char* algorithm) {
+ grpc_json* json = grpc_json_create(GRPC_JSON_OBJECT);
+ grpc_json* child = NULL;
+ char* json_str = NULL;
+ char* result = NULL;
child = create_child(NULL, json, "alg", algorithm, GRPC_JSON_STRING);
child = create_child(child, json, "typ", GRPC_JWT_TYPE, GRPC_JSON_STRING);
@@ -174,13 +174,13 @@ static char *encoded_jwt_header(const char *key_id, const char *algorithm) {
return result;
}
-static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
- const char *audience,
- gpr_timespec token_lifetime, const char *scope) {
- grpc_json *json = grpc_json_create(GRPC_JSON_OBJECT);
- grpc_json *child = NULL;
- char *json_str = NULL;
- char *result = NULL;
+static char* encoded_jwt_claim(const grpc_auth_json_key* json_key,
+ const char* audience,
+ gpr_timespec token_lifetime, const char* scope) {
+ grpc_json* json = grpc_json_create(GRPC_JSON_OBJECT);
+ grpc_json* child = NULL;
+ char* json_str = NULL;
+ char* result = NULL;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec expiration = gpr_time_add(now, token_lifetime);
char now_str[GPR_LTOA_MIN_BUFSIZE];
@@ -213,12 +213,12 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
return result;
}
-static char *dot_concat_and_free_strings(char *str1, char *str2) {
+static char* dot_concat_and_free_strings(char* str1, char* str2) {
size_t str1_len = strlen(str1);
size_t str2_len = strlen(str2);
size_t result_len = str1_len + 1 /* dot */ + str2_len;
- char *result = (char *)gpr_malloc(result_len + 1 /* NULL terminated */);
- char *current = result;
+ char* result = (char*)gpr_malloc(result_len + 1 /* NULL terminated */);
+ char* current = result;
memcpy(current, str1, str1_len);
current += str1_len;
*(current++) = '.';
@@ -232,7 +232,7 @@ static char *dot_concat_and_free_strings(char *str1, char *str2) {
return result;
}
-const EVP_MD *openssl_digest_from_algorithm(const char *algorithm) {
+const EVP_MD* openssl_digest_from_algorithm(const char* algorithm) {
if (strcmp(algorithm, GRPC_JWT_RSA_SHA256_ALGORITHM) == 0) {
return EVP_sha256();
} else {
@@ -241,15 +241,15 @@ const EVP_MD *openssl_digest_from_algorithm(const char *algorithm) {
}
}
-char *compute_and_encode_signature(const grpc_auth_json_key *json_key,
- const char *signature_algorithm,
- const char *to_sign) {
- const EVP_MD *md = openssl_digest_from_algorithm(signature_algorithm);
- EVP_MD_CTX *md_ctx = NULL;
- EVP_PKEY *key = EVP_PKEY_new();
+char* compute_and_encode_signature(const grpc_auth_json_key* json_key,
+ const char* signature_algorithm,
+ const char* to_sign) {
+ const EVP_MD* md = openssl_digest_from_algorithm(signature_algorithm);
+ EVP_MD_CTX* md_ctx = NULL;
+ EVP_PKEY* key = EVP_PKEY_new();
size_t sig_len = 0;
- unsigned char *sig = NULL;
- char *result = NULL;
+ unsigned char* sig = NULL;
+ char* result = NULL;
if (md == NULL) return NULL;
md_ctx = EVP_MD_CTX_create();
if (md_ctx == NULL) {
@@ -269,7 +269,7 @@ char *compute_and_encode_signature(const grpc_auth_json_key *json_key,
gpr_log(GPR_ERROR, "DigestFinal (get signature length) failed.");
goto end;
}
- sig = (unsigned char *)gpr_malloc(sig_len);
+ sig = (unsigned char*)gpr_malloc(sig_len);
if (EVP_DigestSignFinal(md_ctx, sig, &sig_len) != 1) {
gpr_log(GPR_ERROR, "DigestFinal (signature compute) failed.");
goto end;
@@ -283,18 +283,18 @@ end:
return result;
}
-char *grpc_jwt_encode_and_sign(const grpc_auth_json_key *json_key,
- const char *audience,
- gpr_timespec token_lifetime, const char *scope) {
+char* grpc_jwt_encode_and_sign(const grpc_auth_json_key* json_key,
+ const char* audience,
+ gpr_timespec token_lifetime, const char* scope) {
if (g_jwt_encode_and_sign_override != NULL) {
return g_jwt_encode_and_sign_override(json_key, audience, token_lifetime,
scope);
} else {
- const char *sig_algo = GRPC_JWT_RSA_SHA256_ALGORITHM;
- char *to_sign = dot_concat_and_free_strings(
+ const char* sig_algo = GRPC_JWT_RSA_SHA256_ALGORITHM;
+ char* to_sign = dot_concat_and_free_strings(
encoded_jwt_header(json_key->private_key_id, sig_algo),
encoded_jwt_claim(json_key, audience, token_lifetime, scope));
- char *sig = compute_and_encode_signature(json_key, sig_algo, to_sign);
+ char* sig = compute_and_encode_signature(json_key, sig_algo, to_sign);
if (sig == NULL) {
gpr_free(to_sign);
return NULL;
diff --git a/src/core/lib/security/credentials/jwt/json_token.h b/src/core/lib/security/credentials/jwt/json_token.h
index b923b02df6..b2c3c09c25 100644
--- a/src/core/lib/security/credentials/jwt/json_token.h
+++ b/src/core/lib/security/credentials/jwt/json_token.h
@@ -35,40 +35,40 @@ extern "C" {
/* --- auth_json_key parsing. --- */
typedef struct {
- const char *type;
- char *private_key_id;
- char *client_id;
- char *client_email;
- RSA *private_key;
+ const char* type;
+ char* private_key_id;
+ char* client_id;
+ char* client_email;
+ RSA* private_key;
} grpc_auth_json_key;
/* Returns 1 if the object is valid, 0 otherwise. */
-int grpc_auth_json_key_is_valid(const grpc_auth_json_key *json_key);
+int grpc_auth_json_key_is_valid(const grpc_auth_json_key* json_key);
/* Creates a json_key object from string. Returns an invalid object if a parsing
error has been encountered. */
grpc_auth_json_key grpc_auth_json_key_create_from_string(
- const char *json_string);
+ const char* json_string);
/* Creates a json_key object from parsed json. Returns an invalid object if a
parsing error has been encountered. */
-grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json);
+grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json* json);
/* Destructs the object. */
-void grpc_auth_json_key_destruct(grpc_auth_json_key *json_key);
+void grpc_auth_json_key_destruct(grpc_auth_json_key* json_key);
/* --- json token encoding and signing. --- */
/* Caller is responsible for calling gpr_free on the returned value. May return
NULL on invalid input. The scope parameter may be NULL. */
-char *grpc_jwt_encode_and_sign(const grpc_auth_json_key *json_key,
- const char *audience,
- gpr_timespec token_lifetime, const char *scope);
+char* grpc_jwt_encode_and_sign(const grpc_auth_json_key* json_key,
+ const char* audience,
+ gpr_timespec token_lifetime, const char* scope);
/* Override encode_and_sign function for testing. */
-typedef char *(*grpc_jwt_encode_and_sign_override)(
- const grpc_auth_json_key *json_key, const char *audience,
- gpr_timespec token_lifetime, const char *scope);
+typedef char* (*grpc_jwt_encode_and_sign_override)(
+ const grpc_auth_json_key* json_key, const char* audience,
+ gpr_timespec token_lifetime, const char* scope);
/* Set a custom encode_and_sign override for testing. */
void grpc_jwt_encode_and_sign_set_override(
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.cc b/src/core/lib/security/credentials/jwt/jwt_credentials.cc
index 835dd677ed..e8baa7e053 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.cc
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.cc
@@ -30,8 +30,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
-static void jwt_reset_cache(grpc_exec_ctx *exec_ctx,
- grpc_service_account_jwt_access_credentials *c) {
+static void jwt_reset_cache(grpc_exec_ctx* exec_ctx,
+ grpc_service_account_jwt_access_credentials* c) {
GRPC_MDELEM_UNREF(exec_ctx, c->cached.jwt_md);
c->cached.jwt_md = GRPC_MDNULL;
if (c->cached.service_url != NULL) {
@@ -41,24 +41,24 @@ static void jwt_reset_cache(grpc_exec_ctx *exec_ctx,
c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
}
-static void jwt_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_service_account_jwt_access_credentials *c =
- (grpc_service_account_jwt_access_credentials *)creds;
+static void jwt_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_service_account_jwt_access_credentials* c =
+ (grpc_service_account_jwt_access_credentials*)creds;
grpc_auth_json_key_destruct(&c->key);
jwt_reset_cache(exec_ctx, c);
gpr_mu_destroy(&c->cache_mu);
}
-static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds,
- grpc_polling_entity *pollent,
+static bool jwt_get_request_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds,
+ grpc_polling_entity* pollent,
grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array,
- grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_service_account_jwt_access_credentials *c =
- (grpc_service_account_jwt_access_credentials *)creds;
+ grpc_credentials_mdelem_array* md_array,
+ grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_service_account_jwt_access_credentials* c =
+ (grpc_service_account_jwt_access_credentials*)creds;
gpr_timespec refresh_threshold = gpr_time_from_seconds(
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
@@ -78,14 +78,14 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx,
}
if (GRPC_MDISNULL(jwt_md)) {
- char *jwt = NULL;
+ char* jwt = NULL;
/* Generate a new jwt. */
gpr_mu_lock(&c->cache_mu);
jwt_reset_cache(exec_ctx, c);
jwt = grpc_jwt_encode_and_sign(&c->key, context.service_url,
c->jwt_lifetime, NULL);
if (jwt != NULL) {
- char *md_value;
+ char* md_value;
gpr_asprintf(&md_value, "Bearer %s", jwt);
gpr_free(jwt);
c->cached.jwt_expiration =
@@ -111,24 +111,24 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx,
}
static void jwt_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *c,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
static grpc_call_credentials_vtable jwt_vtable = {
jwt_destruct, jwt_get_request_metadata, jwt_cancel_get_request_metadata};
-grpc_call_credentials *
+grpc_call_credentials*
grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
- grpc_exec_ctx *exec_ctx, grpc_auth_json_key key,
+ grpc_exec_ctx* exec_ctx, grpc_auth_json_key key,
gpr_timespec token_lifetime) {
- grpc_service_account_jwt_access_credentials *c;
+ grpc_service_account_jwt_access_credentials* c;
if (!grpc_auth_json_key_is_valid(&key)) {
gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation");
return NULL;
}
- c = (grpc_service_account_jwt_access_credentials *)gpr_zalloc(
+ c = (grpc_service_account_jwt_access_credentials*)gpr_zalloc(
sizeof(grpc_service_account_jwt_access_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_JWT;
gpr_ref_init(&c->base.refcount, 1);
@@ -147,33 +147,33 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
return &c->base;
}
-static char *redact_private_key(const char *json_key) {
- char *json_copy = gpr_strdup(json_key);
- grpc_json *json = grpc_json_parse_string(json_copy);
+static char* redact_private_key(const char* json_key) {
+ char* json_copy = gpr_strdup(json_key);
+ grpc_json* json = grpc_json_parse_string(json_copy);
if (!json) {
gpr_free(json_copy);
return gpr_strdup("<Json failed to parse.>");
}
- const char *redacted = "<redacted>";
- grpc_json *current = json->child;
+ const char* redacted = "<redacted>";
+ grpc_json* current = json->child;
while (current) {
if (current->type == GRPC_JSON_STRING &&
strcmp(current->key, "private_key") == 0) {
- current->value = (char *)redacted;
+ current->value = (char*)redacted;
break;
}
current = current->next;
}
- char *clean_json = grpc_json_dump_to_string(json, 2);
+ char* clean_json = grpc_json_dump_to_string(json, 2);
gpr_free(json_copy);
grpc_json_destroy(json);
return clean_json;
}
-grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
- const char *json_key, gpr_timespec token_lifetime, void *reserved) {
+grpc_call_credentials* grpc_service_account_jwt_access_credentials_create(
+ const char* json_key, gpr_timespec token_lifetime, void* reserved) {
if (GRPC_TRACER_ON(grpc_api_trace)) {
- char *clean_json = redact_private_key(json_key);
+ char* clean_json = redact_private_key(json_key);
gpr_log(GPR_INFO,
"grpc_service_account_jwt_access_credentials_create("
"json_key=%s, "
@@ -187,7 +187,7 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
}
GPR_ASSERT(reserved == NULL);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_call_credentials *creds =
+ grpc_call_credentials* creds =
grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
&exec_ctx, grpc_auth_json_key_create_from_string(json_key),
token_lifetime);
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.h b/src/core/lib/security/credentials/jwt/jwt_credentials.h
index 5cee6ed0da..d554613eed 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.h
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.h
@@ -34,7 +34,7 @@ typedef struct {
gpr_mu cache_mu;
struct {
grpc_mdelem jwt_md;
- char *service_url;
+ char* service_url;
gpr_timespec jwt_expiration;
} cached;
@@ -44,9 +44,9 @@ typedef struct {
// Private constructor for jwt credentials from an already parsed json key.
// Takes ownership of the key.
-grpc_call_credentials *
+grpc_call_credentials*
grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
- grpc_exec_ctx *exec_ctx, grpc_auth_json_key key,
+ grpc_exec_ctx* exec_ctx, grpc_auth_json_key key,
gpr_timespec token_lifetime);
#ifdef __cplusplus
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.cc b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
index 39e72c195b..0fce5f5555 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.cc
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
@@ -40,7 +40,7 @@ extern "C" {
/* --- Utils. --- */
-const char *grpc_jwt_verifier_status_to_string(
+const char* grpc_jwt_verifier_status_to_string(
grpc_jwt_verifier_status status) {
switch (status) {
case GRPC_JWT_VERIFIER_OK:
@@ -62,7 +62,7 @@ const char *grpc_jwt_verifier_status_to_string(
}
}
-static const EVP_MD *evp_md_from_alg(const char *alg) {
+static const EVP_MD* evp_md_from_alg(const char* alg) {
if (strcmp(alg, "RS256") == 0) {
return EVP_sha256();
} else if (strcmp(alg, "RS384") == 0) {
@@ -74,17 +74,17 @@ static const EVP_MD *evp_md_from_alg(const char *alg) {
}
}
-static grpc_json *parse_json_part_from_jwt(grpc_exec_ctx *exec_ctx,
- const char *str, size_t len,
- grpc_slice *buffer) {
- grpc_json *json;
+static grpc_json* parse_json_part_from_jwt(grpc_exec_ctx* exec_ctx,
+ const char* str, size_t len,
+ grpc_slice* buffer) {
+ grpc_json* json;
*buffer = grpc_base64_decode_with_len(exec_ctx, str, len, 1);
if (GRPC_SLICE_IS_EMPTY(*buffer)) {
gpr_log(GPR_ERROR, "Invalid base64.");
return NULL;
}
- json = grpc_json_parse_string_with_len((char *)GRPC_SLICE_START_PTR(*buffer),
+ json = grpc_json_parse_string_with_len((char*)GRPC_SLICE_START_PTR(*buffer),
GRPC_SLICE_LENGTH(*buffer));
if (json == NULL) {
grpc_slice_unref_internal(exec_ctx, *buffer);
@@ -93,8 +93,8 @@ static grpc_json *parse_json_part_from_jwt(grpc_exec_ctx *exec_ctx,
return json;
}
-static const char *validate_string_field(const grpc_json *json,
- const char *key) {
+static const char* validate_string_field(const grpc_json* json,
+ const char* key) {
if (json->type != GRPC_JSON_STRING) {
gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value);
return NULL;
@@ -102,8 +102,8 @@ static const char *validate_string_field(const grpc_json *json,
return json->value;
}
-static gpr_timespec validate_time_field(const grpc_json *json,
- const char *key) {
+static gpr_timespec validate_time_field(const grpc_json* json,
+ const char* key) {
gpr_timespec result = gpr_time_0(GPR_CLOCK_REALTIME);
if (json->type != GRPC_JSON_NUMBER) {
gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value);
@@ -116,23 +116,23 @@ static gpr_timespec validate_time_field(const grpc_json *json,
/* --- JOSE header. see http://tools.ietf.org/html/rfc7515#section-4 --- */
typedef struct {
- const char *alg;
- const char *kid;
- const char *typ;
+ const char* alg;
+ const char* kid;
+ const char* typ;
/* TODO(jboeuf): Add others as needed (jku, jwk, x5u, x5c and so on...). */
grpc_slice buffer;
} jose_header;
-static void jose_header_destroy(grpc_exec_ctx *exec_ctx, jose_header *h) {
+static void jose_header_destroy(grpc_exec_ctx* exec_ctx, jose_header* h) {
grpc_slice_unref_internal(exec_ctx, h->buffer);
gpr_free(h);
}
/* Takes ownership of json and buffer. */
-static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx,
- grpc_json *json, grpc_slice buffer) {
- grpc_json *cur;
- jose_header *h = (jose_header *)gpr_zalloc(sizeof(jose_header));
+static jose_header* jose_header_from_json(grpc_exec_ctx* exec_ctx,
+ grpc_json* json, grpc_slice buffer) {
+ grpc_json* cur;
+ jose_header* h = (jose_header*)gpr_zalloc(sizeof(jose_header));
h->buffer = buffer;
for (cur = json->child; cur != NULL; cur = cur->next) {
if (strcmp(cur->key, "alg") == 0) {
@@ -172,70 +172,70 @@ error:
struct grpc_jwt_claims {
/* Well known properties already parsed. */
- const char *sub;
- const char *iss;
- const char *aud;
- const char *jti;
+ const char* sub;
+ const char* iss;
+ const char* aud;
+ const char* jti;
gpr_timespec iat;
gpr_timespec exp;
gpr_timespec nbf;
- grpc_json *json;
+ grpc_json* json;
grpc_slice buffer;
};
-void grpc_jwt_claims_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *claims) {
+void grpc_jwt_claims_destroy(grpc_exec_ctx* exec_ctx, grpc_jwt_claims* claims) {
grpc_json_destroy(claims->json);
grpc_slice_unref_internal(exec_ctx, claims->buffer);
gpr_free(claims);
}
-const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims) {
+const grpc_json* grpc_jwt_claims_json(const grpc_jwt_claims* claims) {
if (claims == NULL) return NULL;
return claims->json;
}
-const char *grpc_jwt_claims_subject(const grpc_jwt_claims *claims) {
+const char* grpc_jwt_claims_subject(const grpc_jwt_claims* claims) {
if (claims == NULL) return NULL;
return claims->sub;
}
-const char *grpc_jwt_claims_issuer(const grpc_jwt_claims *claims) {
+const char* grpc_jwt_claims_issuer(const grpc_jwt_claims* claims) {
if (claims == NULL) return NULL;
return claims->iss;
}
-const char *grpc_jwt_claims_id(const grpc_jwt_claims *claims) {
+const char* grpc_jwt_claims_id(const grpc_jwt_claims* claims) {
if (claims == NULL) return NULL;
return claims->jti;
}
-const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims) {
+const char* grpc_jwt_claims_audience(const grpc_jwt_claims* claims) {
if (claims == NULL) return NULL;
return claims->aud;
}
-gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims) {
+gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims* claims) {
if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
return claims->iat;
}
-gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims) {
+gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims* claims) {
if (claims == NULL) return gpr_inf_future(GPR_CLOCK_REALTIME);
return claims->exp;
}
-gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) {
+gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims) {
if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
return claims->nbf;
}
/* Takes ownership of json and buffer even in case of failure. */
-grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx,
- grpc_json *json, grpc_slice buffer) {
- grpc_json *cur;
- grpc_jwt_claims *claims =
- (grpc_jwt_claims *)gpr_malloc(sizeof(grpc_jwt_claims));
+grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_exec_ctx* exec_ctx,
+ grpc_json* json, grpc_slice buffer) {
+ grpc_json* cur;
+ grpc_jwt_claims* claims =
+ (grpc_jwt_claims*)gpr_malloc(sizeof(grpc_jwt_claims));
memset(claims, 0, sizeof(grpc_jwt_claims));
claims->json = json;
claims->buffer = buffer;
@@ -278,8 +278,8 @@ error:
return NULL;
}
-grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
- const char *audience) {
+grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims,
+ const char* audience) {
gpr_timespec skewed_now;
int audience_ok;
@@ -332,26 +332,26 @@ typedef enum {
} http_response_index;
typedef struct {
- grpc_jwt_verifier *verifier;
+ grpc_jwt_verifier* verifier;
grpc_polling_entity pollent;
- jose_header *header;
- grpc_jwt_claims *claims;
- char *audience;
+ jose_header* header;
+ grpc_jwt_claims* claims;
+ char* audience;
grpc_slice signature;
grpc_slice signed_data;
- void *user_data;
+ void* user_data;
grpc_jwt_verification_done_cb user_cb;
grpc_http_response responses[HTTP_RESPONSE_COUNT];
} verifier_cb_ctx;
/* Takes ownership of the header, claims and signature. */
-static verifier_cb_ctx *verifier_cb_ctx_create(
- grpc_jwt_verifier *verifier, grpc_pollset *pollset, jose_header *header,
- grpc_jwt_claims *claims, const char *audience, grpc_slice signature,
- const char *signed_jwt, size_t signed_jwt_len, void *user_data,
+static verifier_cb_ctx* verifier_cb_ctx_create(
+ grpc_jwt_verifier* verifier, grpc_pollset* pollset, jose_header* header,
+ grpc_jwt_claims* claims, const char* audience, grpc_slice signature,
+ const char* signed_jwt, size_t signed_jwt_len, void* user_data,
grpc_jwt_verification_done_cb cb) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- verifier_cb_ctx *ctx = (verifier_cb_ctx *)gpr_zalloc(sizeof(verifier_cb_ctx));
+ verifier_cb_ctx* ctx = (verifier_cb_ctx*)gpr_zalloc(sizeof(verifier_cb_ctx));
ctx->verifier = verifier;
ctx->pollent = grpc_polling_entity_create_from_pollset(pollset);
ctx->header = header;
@@ -365,7 +365,7 @@ static verifier_cb_ctx *verifier_cb_ctx_create(
return ctx;
}
-void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) {
+void verifier_cb_ctx_destroy(grpc_exec_ctx* exec_ctx, verifier_cb_ctx* ctx) {
if (ctx->audience != NULL) gpr_free(ctx->audience);
if (ctx->claims != NULL) grpc_jwt_claims_destroy(exec_ctx, ctx->claims);
grpc_slice_unref_internal(exec_ctx, ctx->signature);
@@ -387,19 +387,19 @@ gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN};
grpc_millis grpc_jwt_verifier_max_delay = 60 * GPR_MS_PER_SEC;
typedef struct {
- char *email_domain;
- char *key_url_prefix;
+ char* email_domain;
+ char* key_url_prefix;
} email_key_mapping;
struct grpc_jwt_verifier {
- email_key_mapping *mappings;
+ email_key_mapping* mappings;
size_t num_mappings; /* Should be very few, linear search ok. */
size_t allocated_mappings;
grpc_httpcli_context http_ctx;
};
-static grpc_json *json_from_http(const grpc_httpcli_response *response) {
- grpc_json *json = NULL;
+static grpc_json* json_from_http(const grpc_httpcli_response* response) {
+ grpc_json* json = NULL;
if (response == NULL) {
gpr_log(GPR_ERROR, "HTTP response is NULL.");
@@ -418,19 +418,19 @@ static grpc_json *json_from_http(const grpc_httpcli_response *response) {
return json;
}
-static const grpc_json *find_property_by_name(const grpc_json *json,
- const char *name) {
- const grpc_json *cur;
+static const grpc_json* find_property_by_name(const grpc_json* json,
+ const char* name) {
+ const grpc_json* cur;
for (cur = json->child; cur != NULL; cur = cur->next) {
if (strcmp(cur->key, name) == 0) return cur;
}
return NULL;
}
-static EVP_PKEY *extract_pkey_from_x509(const char *x509_str) {
- X509 *x509 = NULL;
- EVP_PKEY *result = NULL;
- BIO *bio = BIO_new(BIO_s_mem());
+static EVP_PKEY* extract_pkey_from_x509(const char* x509_str) {
+ X509* x509 = NULL;
+ EVP_PKEY* result = NULL;
+ BIO* bio = BIO_new(BIO_s_mem());
size_t len = strlen(x509_str);
GPR_ASSERT(len < INT_MAX);
BIO_write(bio, x509_str, (int)len);
@@ -450,8 +450,8 @@ end:
return result;
}
-static BIGNUM *bignum_from_base64(grpc_exec_ctx *exec_ctx, const char *b64) {
- BIGNUM *result = NULL;
+static BIGNUM* bignum_from_base64(grpc_exec_ctx* exec_ctx, const char* b64) {
+ BIGNUM* result = NULL;
grpc_slice bin;
if (b64 == NULL) return NULL;
@@ -469,7 +469,7 @@ static BIGNUM *bignum_from_base64(grpc_exec_ctx *exec_ctx, const char *b64) {
#if OPENSSL_VERSION_NUMBER < 0x10100000L
// Provide compatibility across OpenSSL 1.02 and 1.1.
-static int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) {
+static int RSA_set0_key(RSA* r, BIGNUM* n, BIGNUM* e, BIGNUM* d) {
/* If the fields n and e in r are NULL, the corresponding input
* parameters MUST be non-NULL for n and e. d may be
* left NULL (in case only the public key is used).
@@ -495,13 +495,13 @@ static int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) {
}
#endif // OPENSSL_VERSION_NUMBER < 0x10100000L
-static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json,
- const char *kty) {
- const grpc_json *key_prop;
- RSA *rsa = NULL;
- EVP_PKEY *result = NULL;
- BIGNUM *tmp_n = NULL;
- BIGNUM *tmp_e = NULL;
+static EVP_PKEY* pkey_from_jwk(grpc_exec_ctx* exec_ctx, const grpc_json* json,
+ const char* kty) {
+ const grpc_json* key_prop;
+ RSA* rsa = NULL;
+ EVP_PKEY* result = NULL;
+ BIGNUM* tmp_n = NULL;
+ BIGNUM* tmp_e = NULL;
GPR_ASSERT(kty != NULL && json != NULL);
if (strcmp(kty, "RSA") != 0) {
@@ -545,19 +545,19 @@ end:
return result;
}
-static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx,
- const grpc_json *json,
- const char *header_alg,
- const char *header_kid) {
- const grpc_json *jkey;
- const grpc_json *jwk_keys;
+static EVP_PKEY* find_verification_key(grpc_exec_ctx* exec_ctx,
+ const grpc_json* json,
+ const char* header_alg,
+ const char* header_kid) {
+ const grpc_json* jkey;
+ const grpc_json* jwk_keys;
/* Try to parse the json as a JWK set:
https://tools.ietf.org/html/rfc7517#section-5. */
jwk_keys = find_property_by_name(json, "keys");
if (jwk_keys == NULL) {
/* Use the google proprietary format which is:
{ <kid1>: <x5091>, <kid2>: <x5092>, ... } */
- const grpc_json *cur = find_property_by_name(json, header_kid);
+ const grpc_json* cur = find_property_by_name(json, header_kid);
if (cur == NULL) return NULL;
return extract_pkey_from_x509(cur->value);
}
@@ -570,10 +570,10 @@ static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx,
/* Key format is specified in:
https://tools.ietf.org/html/rfc7518#section-6. */
for (jkey = jwk_keys->child; jkey != NULL; jkey = jkey->next) {
- grpc_json *key_prop;
- const char *alg = NULL;
- const char *kid = NULL;
- const char *kty = NULL;
+ grpc_json* key_prop;
+ const char* alg = NULL;
+ const char* kid = NULL;
+ const char* kty = NULL;
if (jkey->type != GRPC_JSON_OBJECT) continue;
for (key_prop = jkey->child; key_prop != NULL; key_prop = key_prop->next) {
@@ -599,10 +599,10 @@ static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx,
return NULL;
}
-static int verify_jwt_signature(EVP_PKEY *key, const char *alg,
+static int verify_jwt_signature(EVP_PKEY* key, const char* alg,
grpc_slice signature, grpc_slice signed_data) {
- EVP_MD_CTX *md_ctx = EVP_MD_CTX_create();
- const EVP_MD *md = evp_md_from_alg(alg);
+ EVP_MD_CTX* md_ctx = EVP_MD_CTX_create();
+ const EVP_MD* md = evp_md_from_alg(alg);
int result = 0;
GPR_ASSERT(md != NULL); /* Checked before. */
@@ -631,13 +631,13 @@ end:
return result;
}
-static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *error) {
- verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
- grpc_json *json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]);
- EVP_PKEY *verification_key = NULL;
+static void on_keys_retrieved(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* error) {
+ verifier_cb_ctx* ctx = (verifier_cb_ctx*)user_data;
+ grpc_json* json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]);
+ EVP_PKEY* verification_key = NULL;
grpc_jwt_verifier_status status = GRPC_JWT_VERIFIER_GENERIC_ERROR;
- grpc_jwt_claims *claims = NULL;
+ grpc_jwt_claims* claims = NULL;
if (json == NULL) {
status = GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR;
@@ -672,15 +672,15 @@ end:
verifier_cb_ctx_destroy(exec_ctx, ctx);
}
-static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *error) {
- const grpc_json *cur;
- verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
- const grpc_http_response *response = &ctx->responses[HTTP_RESPONSE_OPENID];
- grpc_json *json = json_from_http(response);
+static void on_openid_config_retrieved(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* error) {
+ const grpc_json* cur;
+ verifier_cb_ctx* ctx = (verifier_cb_ctx*)user_data;
+ const grpc_http_response* response = &ctx->responses[HTTP_RESPONSE_OPENID];
+ grpc_json* json = json_from_http(response);
grpc_httpcli_request req;
- const char *jwks_uri;
- grpc_resource_quota *resource_quota = NULL;
+ const char* jwks_uri;
+ grpc_resource_quota* resource_quota = NULL;
/* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time. */
if (json == NULL) goto error;
@@ -698,9 +698,9 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
jwks_uri += 8;
req.handshaker = &grpc_httpcli_ssl;
req.host = gpr_strdup(jwks_uri);
- req.http.path = (char *)strchr(jwks_uri, '/');
+ req.http.path = (char*)strchr(jwks_uri, '/');
if (req.http.path == NULL) {
- req.http.path = (char *)"";
+ req.http.path = (char*)"";
} else {
*(req.host + (req.http.path - jwks_uri)) = '\0';
}
@@ -726,8 +726,8 @@ error:
verifier_cb_ctx_destroy(exec_ctx, ctx);
}
-static email_key_mapping *verifier_get_mapping(grpc_jwt_verifier *v,
- const char *email_domain) {
+static email_key_mapping* verifier_get_mapping(grpc_jwt_verifier* v,
+ const char* email_domain) {
size_t i;
if (v->mappings == NULL) return NULL;
for (i = 0; i < v->num_mappings; i++) {
@@ -738,9 +738,9 @@ static email_key_mapping *verifier_get_mapping(grpc_jwt_verifier *v,
return NULL;
}
-static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain,
- const char *key_url_prefix) {
- email_key_mapping *mapping = verifier_get_mapping(v, email_domain);
+static void verifier_put_mapping(grpc_jwt_verifier* v, const char* email_domain,
+ const char* key_url_prefix) {
+ email_key_mapping* mapping = verifier_get_mapping(v, email_domain);
GPR_ASSERT(v->num_mappings < v->allocated_mappings);
if (mapping != NULL) {
gpr_free(mapping->key_url_prefix);
@@ -755,30 +755,30 @@ static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain,
/* Very non-sophisticated way to detect an email address. Should be good
enough for now... */
-const char *grpc_jwt_issuer_email_domain(const char *issuer) {
- const char *at_sign = strchr(issuer, '@');
+const char* grpc_jwt_issuer_email_domain(const char* issuer) {
+ const char* at_sign = strchr(issuer, '@');
if (at_sign == NULL) return NULL;
- const char *email_domain = at_sign + 1;
+ const char* email_domain = at_sign + 1;
if (*email_domain == '\0') return NULL;
- const char *dot = strrchr(email_domain, '.');
+ const char* dot = strrchr(email_domain, '.');
if (dot == NULL || dot == email_domain) return email_domain;
GPR_ASSERT(dot > email_domain);
/* There may be a subdomain, we just want the domain. */
- dot = (const char *)gpr_memrchr((void *)email_domain, '.',
- (size_t)(dot - email_domain));
+ dot = (const char*)gpr_memrchr((void*)email_domain, '.',
+ (size_t)(dot - email_domain));
if (dot == NULL) return email_domain;
return dot + 1;
}
/* Takes ownership of ctx. */
-static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
- verifier_cb_ctx *ctx) {
- const char *email_domain;
- grpc_closure *http_cb;
- char *path_prefix = NULL;
- const char *iss;
+static void retrieve_key_and_verify(grpc_exec_ctx* exec_ctx,
+ verifier_cb_ctx* ctx) {
+ const char* email_domain;
+ grpc_closure* http_cb;
+ char* path_prefix = NULL;
+ const char* iss;
grpc_httpcli_request req;
- grpc_resource_quota *resource_quota = NULL;
+ grpc_resource_quota* resource_quota = NULL;
memset(&req, 0, sizeof(grpc_httpcli_request));
req.handshaker = &grpc_httpcli_ssl;
http_response_index rsp_idx;
@@ -801,7 +801,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
Part 4, on the other hand is implemented by both google and salesforce. */
email_domain = grpc_jwt_issuer_email_domain(iss);
if (email_domain != NULL) {
- email_key_mapping *mapping;
+ email_key_mapping* mapping;
GPR_ASSERT(ctx->verifier != NULL);
mapping = verifier_get_mapping(ctx->verifier, email_domain);
if (mapping == NULL) {
@@ -853,21 +853,21 @@ error:
verifier_cb_ctx_destroy(exec_ctx, ctx);
}
-void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
- grpc_jwt_verifier *verifier,
- grpc_pollset *pollset, const char *jwt,
- const char *audience,
+void grpc_jwt_verifier_verify(grpc_exec_ctx* exec_ctx,
+ grpc_jwt_verifier* verifier,
+ grpc_pollset* pollset, const char* jwt,
+ const char* audience,
grpc_jwt_verification_done_cb cb,
- void *user_data) {
- const char *dot = NULL;
- grpc_json *json;
- jose_header *header = NULL;
- grpc_jwt_claims *claims = NULL;
+ void* user_data) {
+ const char* dot = NULL;
+ grpc_json* json;
+ jose_header* header = NULL;
+ grpc_jwt_claims* claims = NULL;
grpc_slice header_buffer;
grpc_slice claims_buffer;
grpc_slice signature;
size_t signed_jwt_len;
- const char *cur = jwt;
+ const char* cur = jwt;
GPR_ASSERT(verifier != NULL && jwt != NULL && audience != NULL && cb != NULL);
dot = strchr(cur, '.');
@@ -903,17 +903,17 @@ error:
cb(exec_ctx, user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, NULL);
}
-grpc_jwt_verifier *grpc_jwt_verifier_create(
- const grpc_jwt_verifier_email_domain_key_url_mapping *mappings,
+grpc_jwt_verifier* grpc_jwt_verifier_create(
+ const grpc_jwt_verifier_email_domain_key_url_mapping* mappings,
size_t num_mappings) {
- grpc_jwt_verifier *v =
- (grpc_jwt_verifier *)gpr_zalloc(sizeof(grpc_jwt_verifier));
+ grpc_jwt_verifier* v =
+ (grpc_jwt_verifier*)gpr_zalloc(sizeof(grpc_jwt_verifier));
grpc_httpcli_context_init(&v->http_ctx);
/* We know at least of one mapping. */
v->allocated_mappings = 1 + num_mappings;
- v->mappings = (email_key_mapping *)gpr_malloc(v->allocated_mappings *
- sizeof(email_key_mapping));
+ v->mappings = (email_key_mapping*)gpr_malloc(v->allocated_mappings *
+ sizeof(email_key_mapping));
verifier_put_mapping(v, GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN,
GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX);
/* User-Provided mappings. */
@@ -927,7 +927,7 @@ grpc_jwt_verifier *grpc_jwt_verifier_create(
return v;
}
-void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_verifier *v) {
+void grpc_jwt_verifier_destroy(grpc_exec_ctx* exec_ctx, grpc_jwt_verifier* v) {
size_t i;
if (v == NULL) return;
grpc_httpcli_context_destroy(exec_ctx, &v->http_ctx);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h
index 998365e75c..8083cf9beb 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.h
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h
@@ -49,25 +49,25 @@ typedef enum {
GRPC_JWT_VERIFIER_GENERIC_ERROR
} grpc_jwt_verifier_status;
-const char *grpc_jwt_verifier_status_to_string(grpc_jwt_verifier_status status);
+const char* grpc_jwt_verifier_status_to_string(grpc_jwt_verifier_status status);
/* --- grpc_jwt_claims. --- */
typedef struct grpc_jwt_claims grpc_jwt_claims;
-void grpc_jwt_claims_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *claims);
+void grpc_jwt_claims_destroy(grpc_exec_ctx* exec_ctx, grpc_jwt_claims* claims);
/* Returns the whole JSON tree of the claims. */
-const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims);
+const grpc_json* grpc_jwt_claims_json(const grpc_jwt_claims* claims);
/* Access to registered claims in https://tools.ietf.org/html/rfc7519#page-9 */
-const char *grpc_jwt_claims_subject(const grpc_jwt_claims *claims);
-const char *grpc_jwt_claims_issuer(const grpc_jwt_claims *claims);
-const char *grpc_jwt_claims_id(const grpc_jwt_claims *claims);
-const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims);
-gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims);
-gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims);
-gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims);
+const char* grpc_jwt_claims_subject(const grpc_jwt_claims* claims);
+const char* grpc_jwt_claims_issuer(const grpc_jwt_claims* claims);
+const char* grpc_jwt_claims_id(const grpc_jwt_claims* claims);
+const char* grpc_jwt_claims_audience(const grpc_jwt_claims* claims);
+gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims* claims);
+gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims* claims);
+gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims);
/* --- grpc_jwt_verifier. --- */
@@ -75,12 +75,12 @@ typedef struct grpc_jwt_verifier grpc_jwt_verifier;
typedef struct {
/* The email domain is the part after the @ sign. */
- const char *email_domain;
+ const char* email_domain;
/* The key url prefix will be used to get the public key from the issuer:
https://<key_url_prefix>/<issuer_email>
Therefore the key_url_prefix must NOT contain https://. */
- const char *key_url_prefix;
+ const char* key_url_prefix;
} grpc_jwt_verifier_email_domain_key_url_mapping;
/* Globals to control the verifier. Not thread-safe. */
@@ -93,38 +93,38 @@ extern grpc_millis grpc_jwt_verifier_max_delay;
A verifier object has one built-in mapping (unless overridden):
GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN ->
GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX.*/
-grpc_jwt_verifier *grpc_jwt_verifier_create(
- const grpc_jwt_verifier_email_domain_key_url_mapping *mappings,
+grpc_jwt_verifier* grpc_jwt_verifier_create(
+ const grpc_jwt_verifier_email_domain_key_url_mapping* mappings,
size_t num_mappings);
/*The verifier must not be destroyed if there are still outstanding callbacks.*/
-void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx,
- grpc_jwt_verifier *verifier);
+void grpc_jwt_verifier_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_jwt_verifier* verifier);
/* User provided callback that will be called when the verification of the JWT
is done (maybe in another thread).
It is the responsibility of the callee to call grpc_jwt_claims_destroy on
the claims. */
-typedef void (*grpc_jwt_verification_done_cb)(grpc_exec_ctx *exec_ctx,
- void *user_data,
+typedef void (*grpc_jwt_verification_done_cb)(grpc_exec_ctx* exec_ctx,
+ void* user_data,
grpc_jwt_verifier_status status,
- grpc_jwt_claims *claims);
+ grpc_jwt_claims* claims);
/* Verifies for the JWT for the given expected audience. */
-void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx,
- grpc_jwt_verifier *verifier,
- grpc_pollset *pollset, const char *jwt,
- const char *audience,
+void grpc_jwt_verifier_verify(grpc_exec_ctx* exec_ctx,
+ grpc_jwt_verifier* verifier,
+ grpc_pollset* pollset, const char* jwt,
+ const char* audience,
grpc_jwt_verification_done_cb cb,
- void *user_data);
+ void* user_data);
/* --- TESTING ONLY exposed functions. --- */
-grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx,
- grpc_json *json, grpc_slice buffer);
-grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims,
- const char *audience);
-const char *grpc_jwt_issuer_email_domain(const char *issuer);
+grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_exec_ctx* exec_ctx,
+ grpc_json* json, grpc_slice buffer);
+grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims,
+ const char* audience);
+const char* grpc_jwt_issuer_email_domain(const char* issuer);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
index 7867105f56..2a44211228 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
@@ -32,15 +32,15 @@
//
int grpc_auth_refresh_token_is_valid(
- const grpc_auth_refresh_token *refresh_token) {
+ const grpc_auth_refresh_token* refresh_token) {
return (refresh_token != NULL) &&
strcmp(refresh_token->type, GRPC_AUTH_JSON_TYPE_INVALID);
}
grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json(
- const grpc_json *json) {
+ const grpc_json* json) {
grpc_auth_refresh_token result;
- const char *prop_value;
+ const char* prop_value;
int success = 0;
memset(&result, 0, sizeof(grpc_auth_refresh_token));
@@ -72,9 +72,9 @@ end:
}
grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
- const char *json_string) {
- char *scratchpad = gpr_strdup(json_string);
- grpc_json *json = grpc_json_parse_string(scratchpad);
+ const char* json_string) {
+ char* scratchpad = gpr_strdup(json_string);
+ grpc_json* json = grpc_json_parse_string(scratchpad);
grpc_auth_refresh_token result =
grpc_auth_refresh_token_create_from_json(json);
if (json != NULL) grpc_json_destroy(json);
@@ -82,7 +82,7 @@ grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
return result;
}
-void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token) {
+void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token* refresh_token) {
if (refresh_token == NULL) return;
refresh_token->type = GRPC_AUTH_JSON_TYPE_INVALID;
if (refresh_token->client_id != NULL) {
@@ -103,10 +103,10 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token) {
// Oauth2 Token Fetcher credentials.
//
-static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_oauth2_token_fetcher_credentials *c =
- (grpc_oauth2_token_fetcher_credentials *)creds;
+static void oauth2_token_fetcher_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_oauth2_token_fetcher_credentials* c =
+ (grpc_oauth2_token_fetcher_credentials*)creds;
GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md);
gpr_mu_destroy(&c->mu);
grpc_pollset_set_destroy(exec_ctx,
@@ -116,12 +116,12 @@ static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx,
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
- grpc_exec_ctx *exec_ctx, const grpc_http_response *response,
- grpc_mdelem *token_md, grpc_millis *token_lifetime) {
- char *null_terminated_body = NULL;
- char *new_access_token = NULL;
+ grpc_exec_ctx* exec_ctx, const grpc_http_response* response,
+ grpc_mdelem* token_md, grpc_millis* token_lifetime) {
+ char* null_terminated_body = NULL;
+ char* new_access_token = NULL;
grpc_credentials_status status = GRPC_CREDENTIALS_OK;
- grpc_json *json = NULL;
+ grpc_json* json = NULL;
if (response == NULL) {
gpr_log(GPR_ERROR, "Received NULL response.");
@@ -130,7 +130,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
}
if (response->body_length > 0) {
- null_terminated_body = (char *)gpr_malloc(response->body_length + 1);
+ null_terminated_body = (char*)gpr_malloc(response->body_length + 1);
null_terminated_body[response->body_length] = '\0';
memcpy(null_terminated_body, response->body, response->body_length);
}
@@ -142,10 +142,10 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
status = GRPC_CREDENTIALS_ERROR;
goto end;
} else {
- grpc_json *access_token = NULL;
- grpc_json *token_type = NULL;
- grpc_json *expires_in = NULL;
- grpc_json *ptr;
+ grpc_json* access_token = NULL;
+ grpc_json* token_type = NULL;
+ grpc_json* expires_in = NULL;
+ grpc_json* ptr;
json = grpc_json_parse_string(null_terminated_body);
if (json == NULL) {
gpr_log(GPR_ERROR, "Could not parse JSON from %s", null_terminated_body);
@@ -203,14 +203,14 @@ end:
return status;
}
-static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
- void *user_data,
- grpc_error *error) {
+static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx* exec_ctx,
+ void* user_data,
+ grpc_error* error) {
GRPC_LOG_IF_ERROR("oauth_fetch", GRPC_ERROR_REF(error));
- grpc_credentials_metadata_request *r =
- (grpc_credentials_metadata_request *)user_data;
- grpc_oauth2_token_fetcher_credentials *c =
- (grpc_oauth2_token_fetcher_credentials *)r->creds;
+ grpc_credentials_metadata_request* r =
+ (grpc_credentials_metadata_request*)user_data;
+ grpc_oauth2_token_fetcher_credentials* c =
+ (grpc_oauth2_token_fetcher_credentials*)r->creds;
grpc_mdelem access_token_md = GRPC_MDNULL;
grpc_millis token_lifetime;
grpc_credentials_status status =
@@ -223,7 +223,7 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
c->token_expiration = status == GRPC_CREDENTIALS_OK
? grpc_exec_ctx_now(exec_ctx) + token_lifetime
: 0;
- grpc_oauth2_pending_get_request_metadata *pending_request =
+ grpc_oauth2_pending_get_request_metadata* pending_request =
c->pending_requests;
c->pending_requests = NULL;
gpr_mu_unlock(&c->mu);
@@ -240,7 +240,7 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
grpc_polling_entity_del_from_pollset_set(
exec_ctx, pending_request->pollent,
grpc_polling_entity_pollset_set(&c->pollent));
- grpc_oauth2_pending_get_request_metadata *prev = pending_request;
+ grpc_oauth2_pending_get_request_metadata* prev = pending_request;
pending_request = pending_request->next;
gpr_free(prev);
}
@@ -250,12 +250,12 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx,
}
static bool oauth2_token_fetcher_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_polling_entity *pollent, grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_oauth2_token_fetcher_credentials *c =
- (grpc_oauth2_token_fetcher_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_polling_entity* pollent, grpc_auth_metadata_context context,
+ grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_oauth2_token_fetcher_credentials* c =
+ (grpc_oauth2_token_fetcher_credentials*)creds;
// Check if we can use the cached token.
grpc_millis refresh_threshold =
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC;
@@ -273,8 +273,8 @@ static bool oauth2_token_fetcher_get_request_metadata(
}
// Couldn't get the token from the cache.
// Add request to c->pending_requests and start a new fetch if needed.
- grpc_oauth2_pending_get_request_metadata *pending_request =
- (grpc_oauth2_pending_get_request_metadata *)gpr_malloc(
+ grpc_oauth2_pending_get_request_metadata* pending_request =
+ (grpc_oauth2_pending_get_request_metadata*)gpr_malloc(
sizeof(*pending_request));
pending_request->md_array = md_array;
pending_request->on_request_metadata = on_request_metadata;
@@ -300,13 +300,13 @@ static bool oauth2_token_fetcher_get_request_metadata(
}
static void oauth2_token_fetcher_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
- grpc_oauth2_token_fetcher_credentials *c =
- (grpc_oauth2_token_fetcher_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+ grpc_oauth2_token_fetcher_credentials* c =
+ (grpc_oauth2_token_fetcher_credentials*)creds;
gpr_mu_lock(&c->mu);
- grpc_oauth2_pending_get_request_metadata *prev = NULL;
- grpc_oauth2_pending_get_request_metadata *pending_request =
+ grpc_oauth2_pending_get_request_metadata* prev = NULL;
+ grpc_oauth2_pending_get_request_metadata* pending_request =
c->pending_requests;
while (pending_request != NULL) {
if (pending_request->md_array == md_array) {
@@ -329,7 +329,7 @@ static void oauth2_token_fetcher_cancel_get_request_metadata(
GRPC_ERROR_UNREF(error);
}
-static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
+static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials* c,
grpc_fetch_oauth2_func fetch_func) {
memset(c, 0, sizeof(grpc_oauth2_token_fetcher_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
@@ -351,20 +351,20 @@ static grpc_call_credentials_vtable compute_engine_vtable = {
oauth2_token_fetcher_cancel_get_request_metadata};
static void compute_engine_fetch_oauth2(
- grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
- grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
+ grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* metadata_req,
+ grpc_httpcli_context* httpcli_context, grpc_polling_entity* pollent,
grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
- grpc_http_header header = {(char *)"Metadata-Flavor", (char *)"Google"};
+ grpc_http_header header = {(char*)"Metadata-Flavor", (char*)"Google"};
grpc_httpcli_request request;
memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = (char *)GRPC_COMPUTE_ENGINE_METADATA_HOST;
- request.http.path = (char *)GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
+ request.host = (char*)GRPC_COMPUTE_ENGINE_METADATA_HOST;
+ request.http.path = (char*)GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
- grpc_resource_quota *resource_quota =
+ grpc_resource_quota* resource_quota =
grpc_resource_quota_create("oauth2_credentials");
grpc_httpcli_get(
exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
@@ -373,10 +373,10 @@ static void compute_engine_fetch_oauth2(
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
}
-grpc_call_credentials *grpc_google_compute_engine_credentials_create(
- void *reserved) {
- grpc_oauth2_token_fetcher_credentials *c =
- (grpc_oauth2_token_fetcher_credentials *)gpr_malloc(
+grpc_call_credentials* grpc_google_compute_engine_credentials_create(
+ void* reserved) {
+ grpc_oauth2_token_fetcher_credentials* c =
+ (grpc_oauth2_token_fetcher_credentials*)gpr_malloc(
sizeof(grpc_oauth2_token_fetcher_credentials));
GRPC_API_TRACE("grpc_compute_engine_credentials_create(reserved=%p)", 1,
(reserved));
@@ -390,10 +390,10 @@ grpc_call_credentials *grpc_google_compute_engine_credentials_create(
// Google Refresh Token credentials.
//
-static void refresh_token_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_google_refresh_token_credentials *c =
- (grpc_google_refresh_token_credentials *)creds;
+static void refresh_token_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_google_refresh_token_credentials* c =
+ (grpc_google_refresh_token_credentials*)creds;
grpc_auth_refresh_token_destruct(&c->refresh_token);
oauth2_token_fetcher_destruct(exec_ctx, &c->base.base);
}
@@ -403,28 +403,28 @@ static grpc_call_credentials_vtable refresh_token_vtable = {
oauth2_token_fetcher_cancel_get_request_metadata};
static void refresh_token_fetch_oauth2(
- grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req,
- grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent,
+ grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* metadata_req,
+ grpc_httpcli_context* httpcli_context, grpc_polling_entity* pollent,
grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
- grpc_google_refresh_token_credentials *c =
- (grpc_google_refresh_token_credentials *)metadata_req->creds;
- grpc_http_header header = {(char *)"Content-Type",
- (char *)"application/x-www-form-urlencoded"};
+ grpc_google_refresh_token_credentials* c =
+ (grpc_google_refresh_token_credentials*)metadata_req->creds;
+ grpc_http_header header = {(char*)"Content-Type",
+ (char*)"application/x-www-form-urlencoded"};
grpc_httpcli_request request;
- char *body = NULL;
+ char* body = NULL;
gpr_asprintf(&body, GRPC_REFRESH_TOKEN_POST_BODY_FORMAT_STRING,
c->refresh_token.client_id, c->refresh_token.client_secret,
c->refresh_token.refresh_token);
memset(&request, 0, sizeof(grpc_httpcli_request));
- request.host = (char *)GRPC_GOOGLE_OAUTH2_SERVICE_HOST;
- request.http.path = (char *)GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
+ request.host = (char*)GRPC_GOOGLE_OAUTH2_SERVICE_HOST;
+ request.http.path = (char*)GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
- grpc_resource_quota *resource_quota =
+ grpc_resource_quota* resource_quota =
grpc_resource_quota_create("oauth2_credentials_refresh");
grpc_httpcli_post(
exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
@@ -435,15 +435,15 @@ static void refresh_token_fetch_oauth2(
gpr_free(body);
}
-grpc_call_credentials *
+grpc_call_credentials*
grpc_refresh_token_credentials_create_from_auth_refresh_token(
grpc_auth_refresh_token refresh_token) {
- grpc_google_refresh_token_credentials *c;
+ grpc_google_refresh_token_credentials* c;
if (!grpc_auth_refresh_token_is_valid(&refresh_token)) {
gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation");
return NULL;
}
- c = (grpc_google_refresh_token_credentials *)gpr_zalloc(
+ c = (grpc_google_refresh_token_credentials*)gpr_zalloc(
sizeof(grpc_google_refresh_token_credentials));
init_oauth2_token_fetcher(&c->base, refresh_token_fetch_oauth2);
c->base.base.vtable = &refresh_token_vtable;
@@ -451,11 +451,11 @@ grpc_refresh_token_credentials_create_from_auth_refresh_token(
return &c->base.base;
}
-static char *create_loggable_refresh_token(grpc_auth_refresh_token *token) {
+static char* create_loggable_refresh_token(grpc_auth_refresh_token* token) {
if (strcmp(token->type, GRPC_AUTH_JSON_TYPE_INVALID) == 0) {
return gpr_strdup("<Invalid json token>");
}
- char *loggable_token = NULL;
+ char* loggable_token = NULL;
gpr_asprintf(&loggable_token,
"{\n type: %s\n client_id: %s\n client_secret: "
"<redacted>\n refresh_token: <redacted>\n}",
@@ -463,12 +463,12 @@ static char *create_loggable_refresh_token(grpc_auth_refresh_token *token) {
return loggable_token;
}
-grpc_call_credentials *grpc_google_refresh_token_credentials_create(
- const char *json_refresh_token, void *reserved) {
+grpc_call_credentials* grpc_google_refresh_token_credentials_create(
+ const char* json_refresh_token, void* reserved) {
grpc_auth_refresh_token token =
grpc_auth_refresh_token_create_from_string(json_refresh_token);
if (GRPC_TRACER_ON(grpc_api_trace)) {
- char *loggable_token = create_loggable_refresh_token(&token);
+ char* loggable_token = create_loggable_refresh_token(&token);
gpr_log(GPR_INFO,
"grpc_refresh_token_credentials_create(json_refresh_token=%s, "
"reserved=%p)",
@@ -483,25 +483,25 @@ grpc_call_credentials *grpc_google_refresh_token_credentials_create(
// Oauth2 Access Token credentials.
//
-static void access_token_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
+static void access_token_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_access_token_credentials* c = (grpc_access_token_credentials*)creds;
GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md);
}
static bool access_token_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_polling_entity *pollent, grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_polling_entity* pollent, grpc_auth_metadata_context context,
+ grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_access_token_credentials* c = (grpc_access_token_credentials*)creds;
grpc_credentials_mdelem_array_add(md_array, c->access_token_md);
return true;
}
static void access_token_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *c,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
@@ -509,11 +509,10 @@ static grpc_call_credentials_vtable access_token_vtable = {
access_token_destruct, access_token_get_request_metadata,
access_token_cancel_get_request_metadata};
-grpc_call_credentials *grpc_access_token_credentials_create(
- const char *access_token, void *reserved) {
- grpc_access_token_credentials *c =
- (grpc_access_token_credentials *)gpr_zalloc(
- sizeof(grpc_access_token_credentials));
+grpc_call_credentials* grpc_access_token_credentials_create(
+ const char* access_token, void* reserved) {
+ grpc_access_token_credentials* c = (grpc_access_token_credentials*)gpr_zalloc(
+ sizeof(grpc_access_token_credentials));
GRPC_API_TRACE(
"grpc_access_token_credentials_create(access_token=<redacted>, "
"reserved=%p)",
@@ -522,7 +521,7 @@ grpc_call_credentials *grpc_access_token_credentials_create(
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &access_token_vtable;
gpr_ref_init(&c->base.refcount, 1);
- char *token_md_value;
+ char* token_md_value;
gpr_asprintf(&token_md_value, "Bearer %s", access_token);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
c->access_token_md = grpc_mdelem_from_slices(
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
index c12db896f3..32d3ff760d 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
@@ -28,46 +28,46 @@ extern "C" {
// auth_refresh_token parsing.
typedef struct {
- const char *type;
- char *client_id;
- char *client_secret;
- char *refresh_token;
+ const char* type;
+ char* client_id;
+ char* client_secret;
+ char* refresh_token;
} grpc_auth_refresh_token;
/// Returns 1 if the object is valid, 0 otherwise.
int grpc_auth_refresh_token_is_valid(
- const grpc_auth_refresh_token *refresh_token);
+ const grpc_auth_refresh_token* refresh_token);
/// Creates a refresh token object from string. Returns an invalid object if a
/// parsing error has been encountered.
grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string(
- const char *json_string);
+ const char* json_string);
/// Creates a refresh token object from parsed json. Returns an invalid object
/// if a parsing error has been encountered.
grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json(
- const grpc_json *json);
+ const grpc_json* json);
/// Destructs the object.
-void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token);
+void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token* refresh_token);
// -- Oauth2 Token Fetcher credentials --
//
// This object is a base for credentials that need to acquire an oauth2 token
// from an http service.
-typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx,
- grpc_credentials_metadata_request *req,
- grpc_httpcli_context *http_context,
- grpc_polling_entity *pollent,
+typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx* exec_ctx,
+ grpc_credentials_metadata_request* req,
+ grpc_httpcli_context* http_context,
+ grpc_polling_entity* pollent,
grpc_iomgr_cb_func cb,
grpc_millis deadline);
typedef struct grpc_oauth2_pending_get_request_metadata {
- grpc_credentials_mdelem_array *md_array;
- grpc_closure *on_request_metadata;
- grpc_polling_entity *pollent;
- struct grpc_oauth2_pending_get_request_metadata *next;
+ grpc_credentials_mdelem_array* md_array;
+ grpc_closure* on_request_metadata;
+ grpc_polling_entity* pollent;
+ struct grpc_oauth2_pending_get_request_metadata* next;
} grpc_oauth2_pending_get_request_metadata;
typedef struct {
@@ -76,7 +76,7 @@ typedef struct {
grpc_mdelem access_token_md;
grpc_millis token_expiration;
bool token_fetch_pending;
- grpc_oauth2_pending_get_request_metadata *pending_requests;
+ grpc_oauth2_pending_get_request_metadata* pending_requests;
grpc_httpcli_context httpcli_context;
grpc_fetch_oauth2_func fetch_func;
grpc_polling_entity pollent;
@@ -96,15 +96,15 @@ typedef struct {
// Private constructor for refresh token credentials from an already parsed
// refresh token. Takes ownership of the refresh token.
-grpc_call_credentials *
+grpc_call_credentials*
grpc_refresh_token_credentials_create_from_auth_refresh_token(
grpc_auth_refresh_token token);
// Exposed for testing only.
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
- grpc_exec_ctx *exec_ctx, const struct grpc_http_response *response,
- grpc_mdelem *token_md, grpc_millis *token_lifetime);
+ grpc_exec_ctx* exec_ctx, const struct grpc_http_response* response,
+ grpc_mdelem* token_md, grpc_millis* token_lifetime);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.cc b/src/core/lib/security/credentials/plugin/plugin_credentials.cc
index 8106a730fe..e75b00c01a 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.cc
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.cc
@@ -34,9 +34,9 @@
grpc_tracer_flag grpc_plugin_credentials_trace =
GRPC_TRACER_INITIALIZER(false, "plugin_credentials");
-static void plugin_destruct(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds) {
- grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
+static void plugin_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds) {
+ grpc_plugin_credentials* c = (grpc_plugin_credentials*)creds;
gpr_mu_destroy(&c->mu);
if (c->plugin.state != NULL && c->plugin.destroy != NULL) {
c->plugin.destroy(c->plugin.state);
@@ -44,8 +44,8 @@ static void plugin_destruct(grpc_exec_ctx *exec_ctx,
}
static void pending_request_remove_locked(
- grpc_plugin_credentials *c,
- grpc_plugin_credentials_pending_request *pending_request) {
+ grpc_plugin_credentials* c,
+ grpc_plugin_credentials_pending_request* pending_request) {
if (pending_request->prev == NULL) {
c->pending_requests = pending_request->next;
} else {
@@ -62,7 +62,7 @@ static void pending_request_remove_locked(
// When this returns, r->cancelled indicates whether the request was
// cancelled before completion.
static void pending_request_complete(
- grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) {
+ grpc_exec_ctx* exec_ctx, grpc_plugin_credentials_pending_request* r) {
gpr_mu_lock(&r->creds->mu);
if (!r->cancelled) pending_request_remove_locked(r->creds, r);
gpr_mu_unlock(&r->creds->mu);
@@ -70,13 +70,13 @@ static void pending_request_complete(
grpc_call_credentials_unref(exec_ctx, &r->creds->base);
}
-static grpc_error *process_plugin_result(
- grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r,
- const grpc_metadata *md, size_t num_md, grpc_status_code status,
- const char *error_details) {
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* process_plugin_result(
+ grpc_exec_ctx* exec_ctx, grpc_plugin_credentials_pending_request* r,
+ const grpc_metadata* md, size_t num_md, grpc_status_code status,
+ const char* error_details) {
+ grpc_error* error = GRPC_ERROR_NONE;
if (status != GRPC_STATUS_OK) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s",
error_details);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
@@ -112,17 +112,17 @@ static grpc_error *process_plugin_result(
return error;
}
-static void plugin_md_request_metadata_ready(void *request,
- const grpc_metadata *md,
+static void plugin_md_request_metadata_ready(void* request,
+ const grpc_metadata* md,
size_t num_md,
grpc_status_code status,
- const char *error_details) {
+ const char* error_details) {
/* called from application code */
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER(
GRPC_EXEC_CTX_FLAG_IS_FINISHED | GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP,
NULL, NULL);
- grpc_plugin_credentials_pending_request *r =
- (grpc_plugin_credentials_pending_request *)request;
+ grpc_plugin_credentials_pending_request* r =
+ (grpc_plugin_credentials_pending_request*)request;
if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
@@ -133,7 +133,7 @@ static void plugin_md_request_metadata_ready(void *request,
pending_request_complete(&exec_ctx, r);
// If it has not been cancelled, process it.
if (!r->cancelled) {
- grpc_error *error =
+ grpc_error* error =
process_plugin_result(&exec_ctx, r, md, num_md, status, error_details);
GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error);
} else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) {
@@ -146,19 +146,19 @@ static void plugin_md_request_metadata_ready(void *request,
grpc_exec_ctx_finish(&exec_ctx);
}
-static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_credentials *creds,
- grpc_polling_entity *pollent,
+static bool plugin_get_request_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_credentials* creds,
+ grpc_polling_entity* pollent,
grpc_auth_metadata_context context,
- grpc_credentials_mdelem_array *md_array,
- grpc_closure *on_request_metadata,
- grpc_error **error) {
- grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
+ grpc_credentials_mdelem_array* md_array,
+ grpc_closure* on_request_metadata,
+ grpc_error** error) {
+ grpc_plugin_credentials* c = (grpc_plugin_credentials*)creds;
bool retval = true; // Synchronous return.
if (c->plugin.get_metadata != NULL) {
// Create pending_request object.
- grpc_plugin_credentials_pending_request *pending_request =
- (grpc_plugin_credentials_pending_request *)gpr_zalloc(
+ grpc_plugin_credentials_pending_request* pending_request =
+ (grpc_plugin_credentials_pending_request*)gpr_zalloc(
sizeof(*pending_request));
pending_request->creds = c;
pending_request->md_array = md_array;
@@ -180,7 +180,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0;
grpc_status_code status = GRPC_STATUS_OK;
- const char *error_details = NULL;
+ const char* error_details = NULL;
if (!c->plugin.get_metadata(c->plugin.state, context,
plugin_md_request_metadata_ready,
pending_request, creds_md, &num_creds_md,
@@ -222,18 +222,18 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx,
grpc_slice_unref_internal(exec_ctx, creds_md[i].key);
grpc_slice_unref_internal(exec_ctx, creds_md[i].value);
}
- gpr_free((void *)error_details);
+ gpr_free((void*)error_details);
gpr_free(pending_request);
}
return retval;
}
static void plugin_cancel_get_request_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds,
- grpc_credentials_mdelem_array *md_array, grpc_error *error) {
- grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
+ grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+ grpc_plugin_credentials* c = (grpc_plugin_credentials*)creds;
gpr_mu_lock(&c->mu);
- for (grpc_plugin_credentials_pending_request *pending_request =
+ for (grpc_plugin_credentials_pending_request* pending_request =
c->pending_requests;
pending_request != NULL; pending_request = pending_request->next) {
if (pending_request->md_array == md_array) {
@@ -256,10 +256,9 @@ static grpc_call_credentials_vtable plugin_vtable = {
plugin_destruct, plugin_get_request_metadata,
plugin_cancel_get_request_metadata};
-grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
- grpc_metadata_credentials_plugin plugin, void *reserved) {
- grpc_plugin_credentials *c =
- (grpc_plugin_credentials *)gpr_zalloc(sizeof(*c));
+grpc_call_credentials* grpc_metadata_credentials_create_from_plugin(
+ grpc_metadata_credentials_plugin plugin, void* reserved) {
+ grpc_plugin_credentials* c = (grpc_plugin_credentials*)gpr_zalloc(sizeof(*c));
GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1,
(reserved));
GPR_ASSERT(reserved == NULL);
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.h b/src/core/lib/security/credentials/plugin/plugin_credentials.h
index f56df9eac5..fc0955c695 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.h
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.h
@@ -27,18 +27,18 @@ struct grpc_plugin_credentials;
typedef struct grpc_plugin_credentials_pending_request {
bool cancelled;
- struct grpc_plugin_credentials *creds;
- grpc_credentials_mdelem_array *md_array;
- grpc_closure *on_request_metadata;
- struct grpc_plugin_credentials_pending_request *prev;
- struct grpc_plugin_credentials_pending_request *next;
+ struct grpc_plugin_credentials* creds;
+ grpc_credentials_mdelem_array* md_array;
+ grpc_closure* on_request_metadata;
+ struct grpc_plugin_credentials_pending_request* prev;
+ struct grpc_plugin_credentials_pending_request* next;
} grpc_plugin_credentials_pending_request;
typedef struct grpc_plugin_credentials {
grpc_call_credentials base;
grpc_metadata_credentials_plugin plugin;
gpr_mu mu;
- grpc_plugin_credentials_pending_request *pending_requests;
+ grpc_plugin_credentials_pending_request* pending_requests;
} grpc_plugin_credentials;
#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_PLUGIN_PLUGIN_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.cc b/src/core/lib/security/credentials/ssl/ssl_credentials.cc
index 2085e2b8e7..79e223ddcb 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.cc
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.cc
@@ -31,33 +31,33 @@
// SSL Channel Credentials.
//
-void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair *kp,
+void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair* kp,
size_t num_key_cert_pairs) {
if (kp == NULL) return;
for (size_t i = 0; i < num_key_cert_pairs; i++) {
- gpr_free((void *)kp[i].private_key);
- gpr_free((void *)kp[i].cert_chain);
+ gpr_free((void*)kp[i].private_key);
+ gpr_free((void*)kp[i].cert_chain);
}
gpr_free(kp);
}
-static void ssl_destruct(grpc_exec_ctx *exec_ctx,
- grpc_channel_credentials *creds) {
- grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
+static void ssl_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_channel_credentials* creds) {
+ grpc_ssl_credentials* c = (grpc_ssl_credentials*)creds;
gpr_free(c->config.pem_root_certs);
grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pair, 1);
}
static grpc_security_status ssl_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds,
- grpc_call_credentials *call_creds, const char *target,
- const grpc_channel_args *args, grpc_channel_security_connector **sc,
- grpc_channel_args **new_args) {
- grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds;
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* creds,
+ grpc_call_credentials* call_creds, const char* target,
+ const grpc_channel_args* args, grpc_channel_security_connector** sc,
+ grpc_channel_args** new_args) {
+ grpc_ssl_credentials* c = (grpc_ssl_credentials*)creds;
grpc_security_status status = GRPC_SECURITY_OK;
- const char *overridden_target_name = NULL;
+ const char* overridden_target_name = NULL;
for (size_t i = 0; args && i < args->num_args; i++) {
- grpc_arg *arg = &args->args[i];
+ grpc_arg* arg = &args->args[i];
if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 &&
arg->type == GRPC_ARG_STRING) {
overridden_target_name = arg->value.string;
@@ -71,7 +71,7 @@ static grpc_security_status ssl_create_security_connector(
return status;
}
grpc_arg new_arg = grpc_channel_arg_string_create(
- (char *)GRPC_ARG_HTTP2_SCHEME, (char *)"https");
+ (char*)GRPC_ARG_HTTP2_SCHEME, (char*)"https");
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
return status;
}
@@ -79,16 +79,16 @@ static grpc_security_status ssl_create_security_connector(
static grpc_channel_credentials_vtable ssl_vtable = {
ssl_destruct, ssl_create_security_connector, NULL};
-static void ssl_build_config(const char *pem_root_certs,
- grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
- grpc_ssl_config *config) {
+static void ssl_build_config(const char* pem_root_certs,
+ grpc_ssl_pem_key_cert_pair* pem_key_cert_pair,
+ grpc_ssl_config* config) {
if (pem_root_certs != NULL) {
config->pem_root_certs = gpr_strdup(pem_root_certs);
}
if (pem_key_cert_pair != NULL) {
GPR_ASSERT(pem_key_cert_pair->private_key != NULL);
GPR_ASSERT(pem_key_cert_pair->cert_chain != NULL);
- config->pem_key_cert_pair = (tsi_ssl_pem_key_cert_pair *)gpr_zalloc(
+ config->pem_key_cert_pair = (tsi_ssl_pem_key_cert_pair*)gpr_zalloc(
sizeof(tsi_ssl_pem_key_cert_pair));
config->pem_key_cert_pair->cert_chain =
gpr_strdup(pem_key_cert_pair->cert_chain);
@@ -97,11 +97,11 @@ static void ssl_build_config(const char *pem_root_certs,
}
}
-grpc_channel_credentials *grpc_ssl_credentials_create(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
- void *reserved) {
- grpc_ssl_credentials *c =
- (grpc_ssl_credentials *)gpr_zalloc(sizeof(grpc_ssl_credentials));
+grpc_channel_credentials* grpc_ssl_credentials_create(
+ const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pair,
+ void* reserved) {
+ grpc_ssl_credentials* c =
+ (grpc_ssl_credentials*)gpr_zalloc(sizeof(grpc_ssl_credentials));
GRPC_API_TRACE(
"grpc_ssl_credentials_create(pem_root_certs=%s, "
"pem_key_cert_pair=%p, "
@@ -121,34 +121,34 @@ grpc_channel_credentials *grpc_ssl_credentials_create(
struct grpc_ssl_server_credentials_options {
grpc_ssl_client_certificate_request_type client_certificate_request;
- grpc_ssl_server_certificate_config *certificate_config;
- grpc_ssl_server_certificate_config_fetcher *certificate_config_fetcher;
+ grpc_ssl_server_certificate_config* certificate_config;
+ grpc_ssl_server_certificate_config_fetcher* certificate_config_fetcher;
};
-static void ssl_server_destruct(grpc_exec_ctx *exec_ctx,
- grpc_server_credentials *creds) {
- grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds;
+static void ssl_server_destruct(grpc_exec_ctx* exec_ctx,
+ grpc_server_credentials* creds) {
+ grpc_ssl_server_credentials* c = (grpc_ssl_server_credentials*)creds;
grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pairs,
c->config.num_key_cert_pairs);
gpr_free(c->config.pem_root_certs);
}
static grpc_security_status ssl_server_create_security_connector(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds,
- grpc_server_security_connector **sc) {
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* creds,
+ grpc_server_security_connector** sc) {
return grpc_ssl_server_security_connector_create(exec_ctx, creds, sc);
}
static grpc_server_credentials_vtable ssl_server_vtable = {
ssl_server_destruct, ssl_server_create_security_connector};
-tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs(
- const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+tsi_ssl_pem_key_cert_pair* grpc_convert_grpc_to_tsi_cert_pairs(
+ const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs) {
- tsi_ssl_pem_key_cert_pair *tsi_pairs = NULL;
+ tsi_ssl_pem_key_cert_pair* tsi_pairs = NULL;
if (num_key_cert_pairs > 0) {
GPR_ASSERT(pem_key_cert_pairs != NULL);
- tsi_pairs = (tsi_ssl_pem_key_cert_pair *)gpr_zalloc(
+ tsi_pairs = (tsi_ssl_pem_key_cert_pair*)gpr_zalloc(
num_key_cert_pairs * sizeof(tsi_ssl_pem_key_cert_pair));
}
for (size_t i = 0; i < num_key_cert_pairs; i++) {
@@ -161,10 +161,10 @@ tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs(
}
static void ssl_build_server_config(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs,
grpc_ssl_client_certificate_request_type client_certificate_request,
- grpc_ssl_server_config *config) {
+ grpc_ssl_server_config* config) {
config->client_certificate_request = client_certificate_request;
if (pem_root_certs != NULL) {
config->pem_root_certs = gpr_strdup(pem_root_certs);
@@ -174,19 +174,19 @@ static void ssl_build_server_config(
config->num_key_cert_pairs = num_key_cert_pairs;
}
-grpc_ssl_server_certificate_config *grpc_ssl_server_certificate_config_create(
- const char *pem_root_certs,
- const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+grpc_ssl_server_certificate_config* grpc_ssl_server_certificate_config_create(
+ const char* pem_root_certs,
+ const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs) {
- grpc_ssl_server_certificate_config *config =
- (grpc_ssl_server_certificate_config *)gpr_zalloc(
+ grpc_ssl_server_certificate_config* config =
+ (grpc_ssl_server_certificate_config*)gpr_zalloc(
sizeof(grpc_ssl_server_certificate_config));
if (pem_root_certs != NULL) {
config->pem_root_certs = gpr_strdup(pem_root_certs);
}
if (num_key_cert_pairs > 0) {
GPR_ASSERT(pem_key_cert_pairs != NULL);
- config->pem_key_cert_pairs = (grpc_ssl_pem_key_cert_pair *)gpr_zalloc(
+ config->pem_key_cert_pairs = (grpc_ssl_pem_key_cert_pair*)gpr_zalloc(
num_key_cert_pairs * sizeof(grpc_ssl_pem_key_cert_pair));
}
config->num_key_cert_pairs = num_key_cert_pairs;
@@ -202,27 +202,27 @@ grpc_ssl_server_certificate_config *grpc_ssl_server_certificate_config_create(
}
void grpc_ssl_server_certificate_config_destroy(
- grpc_ssl_server_certificate_config *config) {
+ grpc_ssl_server_certificate_config* config) {
if (config == NULL) return;
for (size_t i = 0; i < config->num_key_cert_pairs; i++) {
- gpr_free((void *)config->pem_key_cert_pairs[i].private_key);
- gpr_free((void *)config->pem_key_cert_pairs[i].cert_chain);
+ gpr_free((void*)config->pem_key_cert_pairs[i].private_key);
+ gpr_free((void*)config->pem_key_cert_pairs[i].cert_chain);
}
gpr_free(config->pem_key_cert_pairs);
gpr_free(config->pem_root_certs);
gpr_free(config);
}
-grpc_ssl_server_credentials_options *
+grpc_ssl_server_credentials_options*
grpc_ssl_server_credentials_create_options_using_config(
grpc_ssl_client_certificate_request_type client_certificate_request,
- grpc_ssl_server_certificate_config *config) {
- grpc_ssl_server_credentials_options *options = NULL;
+ grpc_ssl_server_certificate_config* config) {
+ grpc_ssl_server_credentials_options* options = NULL;
if (config == NULL) {
gpr_log(GPR_ERROR, "Certificate config must not be NULL.");
goto done;
}
- options = (grpc_ssl_server_credentials_options *)gpr_zalloc(
+ options = (grpc_ssl_server_credentials_options*)gpr_zalloc(
sizeof(grpc_ssl_server_credentials_options));
options->client_certificate_request = client_certificate_request;
options->certificate_config = config;
@@ -230,23 +230,23 @@ done:
return options;
}
-grpc_ssl_server_credentials_options *
+grpc_ssl_server_credentials_options*
grpc_ssl_server_credentials_create_options_using_config_fetcher(
grpc_ssl_client_certificate_request_type client_certificate_request,
- grpc_ssl_server_certificate_config_callback cb, void *user_data) {
+ grpc_ssl_server_certificate_config_callback cb, void* user_data) {
if (cb == NULL) {
gpr_log(GPR_ERROR, "Invalid certificate config callback parameter.");
return NULL;
}
- grpc_ssl_server_certificate_config_fetcher *fetcher =
- (grpc_ssl_server_certificate_config_fetcher *)gpr_zalloc(
+ grpc_ssl_server_certificate_config_fetcher* fetcher =
+ (grpc_ssl_server_certificate_config_fetcher*)gpr_zalloc(
sizeof(grpc_ssl_server_certificate_config_fetcher));
fetcher->cb = cb;
fetcher->user_data = user_data;
- grpc_ssl_server_credentials_options *options =
- (grpc_ssl_server_credentials_options *)gpr_zalloc(
+ grpc_ssl_server_credentials_options* options =
+ (grpc_ssl_server_credentials_options*)gpr_zalloc(
sizeof(grpc_ssl_server_credentials_options));
options->client_certificate_request = client_certificate_request;
options->certificate_config_fetcher = fetcher;
@@ -254,9 +254,9 @@ grpc_ssl_server_credentials_create_options_using_config_fetcher(
return options;
}
-grpc_server_credentials *grpc_ssl_server_credentials_create(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, int force_client_auth, void *reserved) {
+grpc_server_credentials* grpc_ssl_server_credentials_create(
+ const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
+ size_t num_key_cert_pairs, int force_client_auth, void* reserved) {
return grpc_ssl_server_credentials_create_ex(
pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs,
force_client_auth
@@ -265,33 +265,34 @@ grpc_server_credentials *grpc_ssl_server_credentials_create(
reserved);
}
-grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
- const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+grpc_server_credentials* grpc_ssl_server_credentials_create_ex(
+ const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs,
grpc_ssl_client_certificate_request_type client_certificate_request,
- void *reserved) {
+ void* reserved) {
GRPC_API_TRACE(
"grpc_ssl_server_credentials_create_ex("
"pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
"client_certificate_request=%d, reserved=%p)",
- 5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
- client_certificate_request, reserved));
+ 5,
+ (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
+ client_certificate_request, reserved));
GPR_ASSERT(reserved == NULL);
- grpc_ssl_server_certificate_config *cert_config =
+ grpc_ssl_server_certificate_config* cert_config =
grpc_ssl_server_certificate_config_create(
pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs);
- grpc_ssl_server_credentials_options *options =
+ grpc_ssl_server_credentials_options* options =
grpc_ssl_server_credentials_create_options_using_config(
client_certificate_request, cert_config);
return grpc_ssl_server_credentials_create_with_options(options);
}
-grpc_server_credentials *grpc_ssl_server_credentials_create_with_options(
- grpc_ssl_server_credentials_options *options) {
- grpc_server_credentials *retval = NULL;
- grpc_ssl_server_credentials *c = NULL;
+grpc_server_credentials* grpc_ssl_server_credentials_create_with_options(
+ grpc_ssl_server_credentials_options* options) {
+ grpc_server_credentials* retval = NULL;
+ grpc_ssl_server_credentials* c = NULL;
if (options == NULL) {
gpr_log(GPR_ERROR,
@@ -311,7 +312,7 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_with_options(
goto done;
}
- c = (grpc_ssl_server_credentials *)gpr_zalloc(
+ c = (grpc_ssl_server_credentials*)gpr_zalloc(
sizeof(grpc_ssl_server_credentials));
c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL;
gpr_ref_init(&c->base.refcount, 1);
@@ -335,7 +336,7 @@ done:
}
void grpc_ssl_server_credentials_options_destroy(
- grpc_ssl_server_credentials_options *o) {
+ grpc_ssl_server_credentials_options* o) {
if (o == NULL) return;
gpr_free(o->certificate_config_fetcher);
grpc_ssl_server_certificate_config_destroy(o->certificate_config);
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.h b/src/core/lib/security/credentials/ssl/ssl_credentials.h
index 5542484aae..82b9ce11f6 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.h
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.h
@@ -30,14 +30,14 @@ typedef struct {
} grpc_ssl_credentials;
struct grpc_ssl_server_certificate_config {
- grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs;
+ grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs;
size_t num_key_cert_pairs;
- char *pem_root_certs;
+ char* pem_root_certs;
};
typedef struct {
grpc_ssl_server_certificate_config_callback cb;
- void *user_data;
+ void* user_data;
} grpc_ssl_server_certificate_config_fetcher;
typedef struct {
@@ -46,11 +46,11 @@ typedef struct {
grpc_ssl_server_certificate_config_fetcher certificate_config_fetcher;
} grpc_ssl_server_credentials;
-tsi_ssl_pem_key_cert_pair *grpc_convert_grpc_to_tsi_cert_pairs(
- const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+tsi_ssl_pem_key_cert_pair* grpc_convert_grpc_to_tsi_cert_pairs(
+ const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
size_t num_key_cert_pairs);
-void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair *kp,
+void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair* kp,
size_t num_key_cert_pairs);
#ifdef __cplusplus
diff --git a/src/core/lib/security/transport/auth_filters.h b/src/core/lib/security/transport/auth_filters.h
index b49bd554de..6376929890 100644
--- a/src/core/lib/security/transport/auth_filters.h
+++ b/src/core/lib/security/transport/auth_filters.h
@@ -30,11 +30,11 @@ extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
void grpc_auth_metadata_context_build(
- const char *url_scheme, grpc_slice call_host, grpc_slice call_method,
- grpc_auth_context *auth_context,
- grpc_auth_metadata_context *auth_md_context);
+ const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
+ grpc_auth_context* auth_context,
+ grpc_auth_metadata_context* auth_md_context);
-void grpc_auth_metadata_context_reset(grpc_auth_metadata_context *context);
+void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index 0bbfa471d2..8f7530ed27 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -39,9 +39,9 @@
/* We can have a per-call credentials. */
typedef struct {
- grpc_call_stack *owning_call;
- grpc_call_combiner *call_combiner;
- grpc_call_credentials *creds;
+ grpc_call_stack* owning_call;
+ grpc_call_combiner* call_combiner;
+ grpc_call_credentials* creds;
bool have_host;
bool have_method;
grpc_slice host;
@@ -50,7 +50,7 @@ typedef struct {
network requests, they should be done under a pollset added to this
pollset_set so that work can progress when this call wants work to progress
*/
- grpc_polling_entity *pollent;
+ grpc_polling_entity* pollent;
grpc_credentials_mdelem_array md_array;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
grpc_auth_metadata_context auth_md_context;
@@ -61,27 +61,27 @@ typedef struct {
/* We can have a per-channel credentials. */
typedef struct {
- grpc_channel_security_connector *security_connector;
- grpc_auth_context *auth_context;
+ grpc_channel_security_connector* security_connector;
+ grpc_auth_context* auth_context;
} channel_data;
void grpc_auth_metadata_context_reset(
- grpc_auth_metadata_context *auth_md_context) {
+ grpc_auth_metadata_context* auth_md_context) {
if (auth_md_context->service_url != NULL) {
- gpr_free((char *)auth_md_context->service_url);
+ gpr_free((char*)auth_md_context->service_url);
auth_md_context->service_url = NULL;
}
if (auth_md_context->method_name != NULL) {
- gpr_free((char *)auth_md_context->method_name);
+ gpr_free((char*)auth_md_context->method_name);
auth_md_context->method_name = NULL;
}
GRPC_AUTH_CONTEXT_UNREF(
- (grpc_auth_context *)auth_md_context->channel_auth_context,
+ (grpc_auth_context*)auth_md_context->channel_auth_context,
"grpc_auth_metadata_context");
auth_md_context->channel_auth_context = NULL;
}
-static void add_error(grpc_error **combined, grpc_error *error) {
+static void add_error(grpc_error** combined, grpc_error* error) {
if (error == GRPC_ERROR_NONE) return;
if (*combined == GRPC_ERROR_NONE) {
*combined = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -90,18 +90,18 @@ static void add_error(grpc_error **combined, grpc_error *error) {
*combined = grpc_error_add_child(*combined, error);
}
-static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *input_error) {
- grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
- grpc_call_element *elem =
- (grpc_call_element *)batch->handler_private.extra_arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_credentials_metadata(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* input_error) {
+ grpc_transport_stream_op_batch* batch = (grpc_transport_stream_op_batch*)arg;
+ grpc_call_element* elem =
+ (grpc_call_element*)batch->handler_private.extra_arg;
+ call_data* calld = (call_data*)elem->call_data;
grpc_auth_metadata_context_reset(&calld->auth_md_context);
- grpc_error *error = GRPC_ERROR_REF(input_error);
+ grpc_error* error = GRPC_ERROR_REF(input_error);
if (error == GRPC_ERROR_NONE) {
GPR_ASSERT(calld->md_array.size <= MAX_CREDENTIALS_METADATA_COUNT);
GPR_ASSERT(batch->send_initial_metadata);
- grpc_metadata_batch *mdb =
+ grpc_metadata_batch* mdb =
batch->payload->send_initial_metadata.send_initial_metadata;
for (size_t i = 0; i < calld->md_array.size; ++i) {
add_error(&error, grpc_metadata_batch_add_tail(
@@ -120,13 +120,13 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
}
void grpc_auth_metadata_context_build(
- const char *url_scheme, grpc_slice call_host, grpc_slice call_method,
- grpc_auth_context *auth_context,
- grpc_auth_metadata_context *auth_md_context) {
- char *service = grpc_slice_to_c_string(call_method);
- char *last_slash = strrchr(service, '/');
- char *method_name = NULL;
- char *service_url = NULL;
+ const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
+ grpc_auth_context* auth_context,
+ grpc_auth_metadata_context* auth_md_context) {
+ char* service = grpc_slice_to_c_string(call_method);
+ char* last_slash = strrchr(service, '/');
+ char* method_name = NULL;
+ char* service_url = NULL;
grpc_auth_metadata_context_reset(auth_md_context);
if (last_slash == NULL) {
gpr_log(GPR_ERROR, "No '/' found in fully qualified method name");
@@ -138,10 +138,10 @@ void grpc_auth_metadata_context_build(
*last_slash = '\0';
method_name = gpr_strdup(last_slash + 1);
}
- char *host_and_port = grpc_slice_to_c_string(call_host);
+ char* host_and_port = grpc_slice_to_c_string(call_host);
if (strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) {
/* Remove the port if it is 443. */
- char *port_delimiter = strrchr(host_and_port, ':');
+ char* port_delimiter = strrchr(host_and_port, ':');
if (port_delimiter != NULL && strcmp(port_delimiter + 1, "443") == 0) {
*port_delimiter = '\0';
}
@@ -156,10 +156,10 @@ void grpc_auth_metadata_context_build(
gpr_free(host_and_port);
}
-static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void cancel_get_request_metadata(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
if (error != GRPC_ERROR_NONE) {
grpc_call_credentials_cancel_get_request_metadata(
exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
@@ -168,16 +168,16 @@ static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg,
"cancel_get_request_metadata");
}
-static void send_security_metadata(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
- grpc_client_security_context *ctx =
- (grpc_client_security_context *)batch->payload
+static void send_security_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ grpc_client_security_context* ctx =
+ (grpc_client_security_context*)batch->payload
->context[GRPC_CONTEXT_SECURITY]
.value;
- grpc_call_credentials *channel_call_creds =
+ grpc_call_credentials* channel_call_creds =
chand->security_connector->request_metadata_creds;
int call_creds_has_md = (ctx != NULL) && (ctx->creds != NULL);
@@ -213,7 +213,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata,
batch, grpc_schedule_on_exec_ctx);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (grpc_call_credentials_get_request_metadata(
exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
&calld->md_array, &calld->async_result_closure, &error)) {
@@ -231,17 +231,17 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
}
}
-static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
- grpc_call_element *elem =
- (grpc_call_element *)batch->handler_private.extra_arg;
- call_data *calld = (call_data *)elem->call_data;
+static void on_host_checked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_transport_stream_op_batch* batch = (grpc_transport_stream_op_batch*)arg;
+ grpc_call_element* elem =
+ (grpc_call_element*)batch->handler_private.extra_arg;
+ call_data* calld = (call_data*)elem->call_data;
if (error == GRPC_ERROR_NONE) {
send_security_metadata(exec_ctx, elem, batch);
} else {
- char *error_msg;
- char *host = grpc_slice_to_c_string(calld->host);
+ char* error_msg;
+ char* host = grpc_slice_to_c_string(calld->host);
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
host);
gpr_free(host);
@@ -255,11 +255,11 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg,
}
}
-static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static void cancel_check_call_host(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (error != GRPC_ERROR_NONE) {
grpc_channel_security_connector_cancel_check_call_host(
exec_ctx, chand->security_connector, &calld->async_result_closure,
@@ -269,13 +269,13 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg,
}
static void auth_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
GPR_TIMER_BEGIN("auth_start_transport_stream_op_batch", 0);
/* grab pointers to our data from the call element */
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (!batch->cancel_stream) {
GPR_ASSERT(batch->payload->context != NULL);
@@ -285,8 +285,8 @@ static void auth_start_transport_stream_op_batch(
batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
- grpc_client_security_context *sec_ctx =
- (grpc_client_security_context *)batch->payload
+ grpc_client_security_context* sec_ctx =
+ (grpc_client_security_context*)batch->payload
->context[GRPC_CONTEXT_SECURITY]
.value;
GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter");
@@ -295,7 +295,7 @@ static void auth_start_transport_stream_op_batch(
}
if (batch->send_initial_metadata) {
- for (grpc_linked_mdelem *l = batch->payload->send_initial_metadata
+ for (grpc_linked_mdelem* l = batch->payload->send_initial_metadata
.send_initial_metadata->list.head;
l != NULL; l = l->next) {
grpc_mdelem md = l->md;
@@ -319,8 +319,8 @@ static void auth_start_transport_stream_op_batch(
batch->handler_private.extra_arg = elem;
GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch,
grpc_schedule_on_exec_ctx);
- char *call_host = grpc_slice_to_c_string(calld->host);
- grpc_error *error = GRPC_ERROR_NONE;
+ char* call_host = grpc_slice_to_c_string(calld->host);
+ grpc_error* error = GRPC_ERROR_NONE;
if (grpc_channel_security_connector_check_call_host(
exec_ctx, chand->security_connector, call_host,
chand->auth_context, &calld->async_result_closure, &error)) {
@@ -348,27 +348,27 @@ static void auth_start_transport_stream_op_batch(
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
calld->owning_call = args->call_stack;
calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
-static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- grpc_polling_entity *pollent) {
- call_data *calld = (call_data *)elem->call_data;
+static void set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ grpc_polling_entity* pollent) {
+ call_data* calld = (call_data*)elem->call_data;
calld->pollent = pollent;
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ call_data* calld = (call_data*)elem->call_data;
grpc_credentials_mdelem_array_destroy(exec_ctx, &calld->md_array);
grpc_call_credentials_unref(exec_ctx, calld->creds);
if (calld->have_host) {
@@ -381,16 +381,16 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- grpc_security_connector *sc =
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ grpc_security_connector* sc =
grpc_security_connector_find_in_args(args->channel_args);
if (sc == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Security connector missing from client auth filter args");
}
- grpc_auth_context *auth_context =
+ grpc_auth_context* auth_context =
grpc_find_auth_context_in_args(args->channel_args);
if (auth_context == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -398,7 +398,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* grab pointers to our data from the channel element */
- channel_data *chand = (channel_data *)elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
@@ -407,7 +407,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* initialize members */
chand->security_connector =
- (grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
+ (grpc_channel_security_connector*)GRPC_SECURITY_CONNECTOR_REF(
sc, "client_auth_filter");
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter");
@@ -415,11 +415,11 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
/* grab pointers to our data from the channel element */
- channel_data *chand = (channel_data *)elem->channel_data;
- grpc_channel_security_connector *sc = chand->security_connector;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ grpc_channel_security_connector* sc = chand->security_connector;
if (sc != NULL) {
GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "client_auth_filter");
}
diff --git a/src/core/lib/security/transport/lb_targets_info.cc b/src/core/lib/security/transport/lb_targets_info.cc
index 947fc1addf..1655e18f37 100644
--- a/src/core/lib/security/transport/lb_targets_info.cc
+++ b/src/core/lib/security/transport/lb_targets_info.cc
@@ -25,33 +25,33 @@
* secure naming purposes. */
#define GRPC_ARG_LB_SECURE_NAMING_MAP "grpc.lb_secure_naming_map"
-static void *targets_info_copy(void *p) {
- return grpc_slice_hash_table_ref((grpc_slice_hash_table *)p);
+static void* targets_info_copy(void* p) {
+ return grpc_slice_hash_table_ref((grpc_slice_hash_table*)p);
}
-static void targets_info_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- grpc_slice_hash_table_unref(exec_ctx, (grpc_slice_hash_table *)p);
+static void targets_info_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+ grpc_slice_hash_table_unref(exec_ctx, (grpc_slice_hash_table*)p);
}
-static int targets_info_cmp(void *a, void *b) {
- return grpc_slice_hash_table_cmp((const grpc_slice_hash_table *)a,
- (const grpc_slice_hash_table *)b);
+static int targets_info_cmp(void* a, void* b) {
+ return grpc_slice_hash_table_cmp((const grpc_slice_hash_table*)a,
+ (const grpc_slice_hash_table*)b);
}
static const grpc_arg_pointer_vtable server_to_balancer_names_vtable = {
targets_info_copy, targets_info_destroy, targets_info_cmp};
grpc_arg grpc_lb_targets_info_create_channel_arg(
- grpc_slice_hash_table *targets_info) {
- return grpc_channel_arg_pointer_create((char *)GRPC_ARG_LB_SECURE_NAMING_MAP,
+ grpc_slice_hash_table* targets_info) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_LB_SECURE_NAMING_MAP,
targets_info,
&server_to_balancer_names_vtable);
}
-grpc_slice_hash_table *grpc_lb_targets_info_find_in_args(
- const grpc_channel_args *args) {
- const grpc_arg *targets_info_arg =
+grpc_slice_hash_table* grpc_lb_targets_info_find_in_args(
+ const grpc_channel_args* args) {
+ const grpc_arg* targets_info_arg =
grpc_channel_args_find(args, GRPC_ARG_LB_SECURE_NAMING_MAP);
if (targets_info_arg != NULL) {
GPR_ASSERT(targets_info_arg->type == GRPC_ARG_POINTER);
- return (grpc_slice_hash_table *)targets_info_arg->value.pointer.p;
+ return (grpc_slice_hash_table*)targets_info_arg->value.pointer.p;
}
return NULL;
}
diff --git a/src/core/lib/security/transport/lb_targets_info.h b/src/core/lib/security/transport/lb_targets_info.h
index 43f0e64556..b4a0bc91da 100644
--- a/src/core/lib/security/transport/lb_targets_info.h
+++ b/src/core/lib/security/transport/lb_targets_info.h
@@ -27,11 +27,11 @@ extern "C" {
/** Return a channel argument containing \a targets_info. */
grpc_arg grpc_lb_targets_info_create_channel_arg(
- grpc_slice_hash_table *targets_info);
+ grpc_slice_hash_table* targets_info);
/** Return the instance of targets info in \a args or NULL */
-grpc_slice_hash_table *grpc_lb_targets_info_find_in_args(
- const grpc_channel_args *args);
+grpc_slice_hash_table* grpc_lb_targets_info_find_in_args(
+ const grpc_channel_args* args);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc
index 859d04ae5a..9a29e05715 100644
--- a/src/core/lib/security/transport/secure_endpoint.cc
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -40,15 +40,15 @@
typedef struct {
grpc_endpoint base;
- grpc_endpoint *wrapped_ep;
- struct tsi_frame_protector *protector;
- struct tsi_zero_copy_grpc_protector *zero_copy_protector;
+ grpc_endpoint* wrapped_ep;
+ struct tsi_frame_protector* protector;
+ struct tsi_zero_copy_grpc_protector* zero_copy_protector;
gpr_mu protector_mu;
/* saved upper level callbacks and user_data. */
- grpc_closure *read_cb;
- grpc_closure *write_cb;
+ grpc_closure* read_cb;
+ grpc_closure* write_cb;
grpc_closure on_read;
- grpc_slice_buffer *read_buffer;
+ grpc_slice_buffer* read_buffer;
grpc_slice_buffer source_buffer;
/* saved handshaker leftover data to unprotect. */
grpc_slice_buffer leftover_bytes;
@@ -64,8 +64,8 @@ typedef struct {
grpc_tracer_flag grpc_trace_secure_endpoint =
GRPC_TRACER_INITIALIZER(false, "secure_endpoint");
-static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
- secure_endpoint *ep = secure_ep;
+static void destroy(grpc_exec_ctx* exec_ctx, secure_endpoint* secure_ep) {
+ secure_endpoint* ep = secure_ep;
grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
tsi_frame_protector_destroy(ep->protector);
tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector);
@@ -83,8 +83,8 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
#define SECURE_ENDPOINT_REF(ep, reason) \
secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
-static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
- const char *reason, const char *file,
+static void secure_endpoint_unref(grpc_exec_ctx* exec_ctx, secure_endpoint* ep,
+ const char* reason, const char* file,
int line) {
if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
@@ -97,8 +97,8 @@ static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
}
}
-static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
- const char *file, int line) {
+static void secure_endpoint_ref(secure_endpoint* ep, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -111,30 +111,30 @@ static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
secure_endpoint_unref((exec_ctx), (ep))
#define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep))
-static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx,
- secure_endpoint *ep) {
+static void secure_endpoint_unref(grpc_exec_ctx* exec_ctx,
+ secure_endpoint* ep) {
if (gpr_unref(&ep->ref)) {
destroy(exec_ctx, ep);
}
}
-static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); }
+static void secure_endpoint_ref(secure_endpoint* ep) { gpr_ref(&ep->ref); }
#endif
-static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur,
- uint8_t **end) {
+static void flush_read_staging_buffer(secure_endpoint* ep, uint8_t** cur,
+ uint8_t** end) {
grpc_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer);
ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
*cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
*end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
}
-static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
- grpc_error *error) {
+static void call_read_cb(grpc_exec_ctx* exec_ctx, secure_endpoint* ep,
+ grpc_error* error) {
if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
size_t i;
for (i = 0; i < ep->read_buffer->count; i++) {
- char *data = grpc_dump_slice(ep->read_buffer->slices[i],
+ char* data = grpc_dump_slice(ep->read_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
gpr_free(data);
@@ -145,19 +145,20 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
-static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_error *error) {
+static void on_read(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_error* error) {
unsigned i;
uint8_t keep_looping = 0;
tsi_result result = TSI_OK;
- secure_endpoint *ep = (secure_endpoint *)user_data;
- uint8_t *cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
- uint8_t *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
+ secure_endpoint* ep = (secure_endpoint*)user_data;
+ uint8_t* cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
+ uint8_t* end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
- call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Secure read failed", &error, 1));
+ call_read_cb(exec_ctx, ep,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Secure read failed", &error, 1));
return;
}
@@ -170,7 +171,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
/* TODO(yangg) check error, maybe bail out early */
for (i = 0; i < ep->source_buffer.count; i++) {
grpc_slice encrypted = ep->source_buffer.slices[i];
- uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
+ uint8_t* message_bytes = GRPC_SLICE_START_PTR(encrypted);
size_t message_size = GRPC_SLICE_LENGTH(encrypted);
while (message_size > 0 || keep_looping) {
@@ -231,9 +232,9 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE);
}
-static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
- grpc_slice_buffer *slices, grpc_closure *cb) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static void endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* secure_ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
ep->read_cb = cb;
ep->read_buffer = slices;
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
@@ -250,29 +251,29 @@ static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
&ep->on_read);
}
-static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur,
- uint8_t **end) {
+static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
+ uint8_t** end) {
grpc_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer);
ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
*cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
*end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
}
-static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
- grpc_slice_buffer *slices, grpc_closure *cb) {
+static void endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* secure_ep,
+ grpc_slice_buffer* slices, grpc_closure* cb) {
GPR_TIMER_BEGIN("secure_endpoint.endpoint_write", 0);
unsigned i;
tsi_result result = TSI_OK;
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
- uint8_t *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
- uint8_t *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
+ uint8_t* cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
+ uint8_t* end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
for (i = 0; i < slices->count; i++) {
- char *data =
+ char* data =
grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
gpr_free(data);
@@ -287,7 +288,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
// Use frame protector to protect.
for (i = 0; i < slices->count; i++) {
grpc_slice plain = slices->slices[i];
- uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
+ uint8_t* message_bytes = GRPC_SLICE_START_PTR(plain);
size_t message_size = GRPC_SLICE_LENGTH(plain);
while (message_size > 0) {
size_t protected_buffer_size_to_send = (size_t)(end - cur);
@@ -353,52 +354,52 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
}
-static void endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
- grpc_error *why) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static void endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* secure_ep,
+ grpc_error* why) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep, why);
}
-static void endpoint_destroy(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *secure_ep) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static void endpoint_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* secure_ep) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy");
}
-static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *secure_ep,
- grpc_pollset *pollset) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static void endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* secure_ep,
+ grpc_pollset* pollset) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset);
}
-static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *secure_ep,
- grpc_pollset_set *pollset_set) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static void endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* secure_ep,
+ grpc_pollset_set* pollset_set) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
}
-static void endpoint_delete_from_pollset_set(grpc_exec_ctx *exec_ctx,
- grpc_endpoint *secure_ep,
- grpc_pollset_set *pollset_set) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static void endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
+ grpc_endpoint* secure_ep,
+ grpc_pollset_set* pollset_set) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
grpc_endpoint_delete_from_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
}
-static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static char* endpoint_get_peer(grpc_endpoint* secure_ep) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
return grpc_endpoint_get_peer(ep->wrapped_ep);
}
-static int endpoint_get_fd(grpc_endpoint *secure_ep) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static int endpoint_get_fd(grpc_endpoint* secure_ep) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
return grpc_endpoint_get_fd(ep->wrapped_ep);
}
-static grpc_resource_user *endpoint_get_resource_user(
- grpc_endpoint *secure_ep) {
- secure_endpoint *ep = (secure_endpoint *)secure_ep;
+static grpc_resource_user* endpoint_get_resource_user(
+ grpc_endpoint* secure_ep) {
+ secure_endpoint* ep = (secure_endpoint*)secure_ep;
return grpc_endpoint_get_resource_user(ep->wrapped_ep);
}
@@ -413,13 +414,13 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_get_peer,
endpoint_get_fd};
-grpc_endpoint *grpc_secure_endpoint_create(
- struct tsi_frame_protector *protector,
- struct tsi_zero_copy_grpc_protector *zero_copy_protector,
- grpc_endpoint *transport, grpc_slice *leftover_slices,
+grpc_endpoint* grpc_secure_endpoint_create(
+ struct tsi_frame_protector* protector,
+ struct tsi_zero_copy_grpc_protector* zero_copy_protector,
+ grpc_endpoint* transport, grpc_slice* leftover_slices,
size_t leftover_nslices) {
size_t i;
- secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
+ secure_endpoint* ep = (secure_endpoint*)gpr_malloc(sizeof(secure_endpoint));
ep->base.vtable = &vtable;
ep->wrapped_ep = transport;
ep->protector = protector;
diff --git a/src/core/lib/security/transport/secure_endpoint.h b/src/core/lib/security/transport/secure_endpoint.h
index 980449c03e..db8233f6e6 100644
--- a/src/core/lib/security/transport/secure_endpoint.h
+++ b/src/core/lib/security/transport/secure_endpoint.h
@@ -34,10 +34,10 @@ extern grpc_tracer_flag grpc_trace_secure_endpoint;
/* Takes ownership of protector, zero_copy_protector, and to_wrap, and refs
* leftover_slices. If zero_copy_protector is not NULL, protector will never be
* used. */
-grpc_endpoint *grpc_secure_endpoint_create(
- struct tsi_frame_protector *protector,
- struct tsi_zero_copy_grpc_protector *zero_copy_protector,
- grpc_endpoint *to_wrap, grpc_slice *leftover_slices,
+grpc_endpoint* grpc_secure_endpoint_create(
+ struct tsi_frame_protector* protector,
+ struct tsi_zero_copy_grpc_protector* zero_copy_protector,
+ grpc_endpoint* to_wrap, grpc_slice* leftover_slices,
size_t leftover_nslices);
#ifdef __cplusplus
diff --git a/src/core/lib/security/transport/security_connector.cc b/src/core/lib/security/transport/security_connector.cc
index 06160d0caa..b5822d7454 100644
--- a/src/core/lib/security/transport/security_connector.cc
+++ b/src/core/lib/security/transport/security_connector.cc
@@ -52,9 +52,9 @@ grpc_tracer_flag grpc_trace_security_connector_refcount =
/* -- Constants. -- */
#ifndef INSTALL_PREFIX
-static const char *installed_roots_path = "/usr/share/grpc/roots.pem";
+static const char* installed_roots_path = "/usr/share/grpc/roots.pem";
#else
-static const char *installed_roots_path =
+static const char* installed_roots_path =
INSTALL_PREFIX "/share/grpc/roots.pem";
#endif
@@ -74,14 +74,14 @@ void grpc_set_ssl_roots_override_callback(grpc_ssl_roots_override_callback cb) {
"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384"
static gpr_once cipher_suites_once = GPR_ONCE_INIT;
-static const char *cipher_suites = NULL;
+static const char* cipher_suites = NULL;
static void init_cipher_suites(void) {
- char *overridden = gpr_getenv("GRPC_SSL_CIPHER_SUITES");
+ char* overridden = gpr_getenv("GRPC_SSL_CIPHER_SUITES");
cipher_suites = overridden != NULL ? overridden : GRPC_SSL_CIPHER_SUITES;
}
-static const char *ssl_cipher_suites(void) {
+static const char* ssl_cipher_suites(void) {
gpr_once_init(&cipher_suites_once, init_cipher_suites);
return cipher_suites;
}
@@ -89,12 +89,12 @@ static const char *ssl_cipher_suites(void) {
/* -- Common methods. -- */
/* Returns the first property with that name. */
-const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
- const char *name) {
+const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* peer,
+ const char* name) {
size_t i;
if (peer == NULL) return NULL;
for (i = 0; i < peer->property_count; i++) {
- const tsi_peer_property *property = &peer->properties[i];
+ const tsi_peer_property* property = &peer->properties[i];
if (name == NULL && property->name == NULL) {
return property;
}
@@ -107,26 +107,26 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
}
void grpc_channel_security_connector_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector,
- grpc_handshake_manager *handshake_mgr) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* connector,
+ grpc_handshake_manager* handshake_mgr) {
if (connector != NULL) {
connector->add_handshakers(exec_ctx, connector, handshake_mgr);
}
}
void grpc_server_security_connector_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector,
- grpc_handshake_manager *handshake_mgr) {
+ grpc_exec_ctx* exec_ctx, grpc_server_security_connector* connector,
+ grpc_handshake_manager* handshake_mgr) {
if (connector != NULL) {
connector->add_handshakers(exec_ctx, connector, handshake_mgr);
}
}
-void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc,
+void grpc_security_connector_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc,
tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
if (sc == NULL) {
GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -137,43 +137,43 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
}
}
-int grpc_security_connector_cmp(grpc_security_connector *sc,
- grpc_security_connector *other) {
+int grpc_security_connector_cmp(grpc_security_connector* sc,
+ grpc_security_connector* other) {
if (sc == NULL || other == NULL) return GPR_ICMP(sc, other);
int c = GPR_ICMP(sc->vtable, other->vtable);
if (c != 0) return c;
return sc->vtable->cmp(sc, other);
}
-int grpc_channel_security_connector_cmp(grpc_channel_security_connector *sc1,
- grpc_channel_security_connector *sc2) {
+int grpc_channel_security_connector_cmp(grpc_channel_security_connector* sc1,
+ grpc_channel_security_connector* sc2) {
GPR_ASSERT(sc1->channel_creds != NULL);
GPR_ASSERT(sc2->channel_creds != NULL);
int c = GPR_ICMP(sc1->channel_creds, sc2->channel_creds);
if (c != 0) return c;
c = GPR_ICMP(sc1->request_metadata_creds, sc2->request_metadata_creds);
if (c != 0) return c;
- c = GPR_ICMP((void *)sc1->check_call_host, (void *)sc2->check_call_host);
+ c = GPR_ICMP((void*)sc1->check_call_host, (void*)sc2->check_call_host);
if (c != 0) return c;
- c = GPR_ICMP((void *)sc1->cancel_check_call_host,
- (void *)sc2->cancel_check_call_host);
+ c = GPR_ICMP((void*)sc1->cancel_check_call_host,
+ (void*)sc2->cancel_check_call_host);
if (c != 0) return c;
- return GPR_ICMP((void *)sc1->add_handshakers, (void *)sc2->add_handshakers);
+ return GPR_ICMP((void*)sc1->add_handshakers, (void*)sc2->add_handshakers);
}
-int grpc_server_security_connector_cmp(grpc_server_security_connector *sc1,
- grpc_server_security_connector *sc2) {
+int grpc_server_security_connector_cmp(grpc_server_security_connector* sc1,
+ grpc_server_security_connector* sc2) {
GPR_ASSERT(sc1->server_creds != NULL);
GPR_ASSERT(sc2->server_creds != NULL);
int c = GPR_ICMP(sc1->server_creds, sc2->server_creds);
if (c != 0) return c;
- return GPR_ICMP((void *)sc1->add_handshakers, (void *)sc2->add_handshakers);
+ return GPR_ICMP((void*)sc1->add_handshakers, (void*)sc2->add_handshakers);
}
bool grpc_channel_security_connector_check_call_host(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- const char *host, grpc_auth_context *auth_context,
- grpc_closure *on_call_host_checked, grpc_error **error) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ const char* host, grpc_auth_context* auth_context,
+ grpc_closure* on_call_host_checked, grpc_error** error) {
if (sc == NULL || sc->check_call_host == NULL) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"cannot check call host -- no security connector");
@@ -184,8 +184,8 @@ bool grpc_channel_security_connector_check_call_host(
}
void grpc_channel_security_connector_cancel_check_call_host(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_closure *on_call_host_checked, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ grpc_closure* on_call_host_checked, grpc_error* error) {
if (sc == NULL || sc->cancel_check_call_host == NULL) {
GRPC_ERROR_UNREF(error);
return;
@@ -194,9 +194,9 @@ void grpc_channel_security_connector_cancel_check_call_host(
}
#ifndef NDEBUG
-grpc_security_connector *grpc_security_connector_ref(
- grpc_security_connector *sc, const char *file, int line,
- const char *reason) {
+grpc_security_connector* grpc_security_connector_ref(
+ grpc_security_connector* sc, const char* file, int line,
+ const char* reason) {
if (sc == NULL) return NULL;
if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count);
@@ -205,8 +205,8 @@ grpc_security_connector *grpc_security_connector_ref(
val, val + 1, reason);
}
#else
-grpc_security_connector *grpc_security_connector_ref(
- grpc_security_connector *sc) {
+grpc_security_connector* grpc_security_connector_ref(
+ grpc_security_connector* sc) {
if (sc == NULL) return NULL;
#endif
gpr_ref(&sc->refcount);
@@ -214,10 +214,10 @@ grpc_security_connector *grpc_security_connector_ref(
}
#ifndef NDEBUG
-void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc,
- const char *file, int line,
- const char *reason) {
+void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc,
+ const char* file, int line,
+ const char* reason) {
if (sc == NULL) return;
if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count);
@@ -226,52 +226,52 @@ void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
val, val - 1, reason);
}
#else
-void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc) {
+void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc) {
if (sc == NULL) return;
#endif
if (gpr_unref(&sc->refcount)) sc->vtable->destroy(exec_ctx, sc);
}
-static void connector_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
- GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, (grpc_security_connector *)p,
+static void connector_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+ GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, (grpc_security_connector*)p,
"connector_arg_destroy");
}
-static void *connector_arg_copy(void *p) {
- return GRPC_SECURITY_CONNECTOR_REF((grpc_security_connector *)p,
+static void* connector_arg_copy(void* p) {
+ return GRPC_SECURITY_CONNECTOR_REF((grpc_security_connector*)p,
"connector_arg_copy");
}
-static int connector_cmp(void *a, void *b) {
- return grpc_security_connector_cmp((grpc_security_connector *)a,
- (grpc_security_connector *)b);
+static int connector_cmp(void* a, void* b) {
+ return grpc_security_connector_cmp((grpc_security_connector*)a,
+ (grpc_security_connector*)b);
}
static const grpc_arg_pointer_vtable connector_arg_vtable = {
connector_arg_copy, connector_arg_destroy, connector_cmp};
-grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc) {
- return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SECURITY_CONNECTOR,
- sc, &connector_arg_vtable);
+grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc) {
+ return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SECURITY_CONNECTOR, sc,
+ &connector_arg_vtable);
}
-grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg) {
+grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg) {
if (strcmp(arg->key, GRPC_ARG_SECURITY_CONNECTOR)) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
GRPC_ARG_SECURITY_CONNECTOR);
return NULL;
}
- return (grpc_security_connector *)arg->value.pointer.p;
+ return (grpc_security_connector*)arg->value.pointer.p;
}
-grpc_security_connector *grpc_security_connector_find_in_args(
- const grpc_channel_args *args) {
+grpc_security_connector* grpc_security_connector_find_in_args(
+ const grpc_channel_args* args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
- grpc_security_connector *sc =
+ grpc_security_connector* sc =
grpc_security_connector_from_arg(&args->args[i]);
if (sc != NULL) return sc;
}
@@ -306,31 +306,31 @@ get_tsi_client_certificate_request_type(
typedef struct {
grpc_channel_security_connector base;
- char *target;
- char *expected_targets;
+ char* target;
+ char* expected_targets;
bool is_lb_channel;
} grpc_fake_channel_security_connector;
-static void fake_channel_destroy(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc) {
- grpc_fake_channel_security_connector *c =
- (grpc_fake_channel_security_connector *)sc;
+static void fake_channel_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc) {
+ grpc_fake_channel_security_connector* c =
+ (grpc_fake_channel_security_connector*)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
gpr_free(c->target);
gpr_free(c->expected_targets);
gpr_free(c);
}
-static void fake_server_destroy(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc) {
+static void fake_server_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc) {
gpr_free(sc);
}
-static bool fake_check_target(const char *target_type, const char *target,
- const char *set_str) {
+static bool fake_check_target(const char* target_type, const char* target,
+ const char* set_str) {
GPR_ASSERT(target_type != NULL);
GPR_ASSERT(target != NULL);
- char **set = NULL;
+ char** set = NULL;
size_t set_size = 0;
gpr_string_split(set_str, ",", &set, &set_size);
bool found = false;
@@ -344,11 +344,11 @@ static bool fake_check_target(const char *target_type, const char *target,
return found;
}
-static void fake_secure_name_check(const char *target,
- const char *expected_targets,
+static void fake_secure_name_check(const char* target,
+ const char* expected_targets,
bool is_lb_channel) {
if (expected_targets == NULL) return;
- char **lbs_and_backends = NULL;
+ char** lbs_and_backends = NULL;
size_t lbs_and_backends_size = 0;
bool success = false;
gpr_string_split(expected_targets, ";", &lbs_and_backends,
@@ -388,12 +388,12 @@ done:
if (!success) abort();
}
-static void fake_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc, tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
- const char *prop_name;
- grpc_error *error = GRPC_ERROR_NONE;
+static void fake_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc, tsi_peer peer,
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
+ const char* prop_name;
+ grpc_error* error = GRPC_ERROR_NONE;
*auth_context = NULL;
if (peer.property_count != 1) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -403,7 +403,7 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
prop_name = peer.properties[0].name;
if (prop_name == NULL ||
strcmp(prop_name, TSI_CERTIFICATE_TYPE_PEER_PROPERTY)) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Unexpected property in fake peer: %s.",
prop_name == NULL ? "<EMPTY>" : prop_name);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
@@ -425,29 +425,29 @@ end:
tsi_peer_destruct(&peer);
}
-static void fake_channel_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc, tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
+static void fake_channel_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc, tsi_peer peer,
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
- grpc_fake_channel_security_connector *c =
- (grpc_fake_channel_security_connector *)sc;
+ grpc_fake_channel_security_connector* c =
+ (grpc_fake_channel_security_connector*)sc;
fake_secure_name_check(c->target, c->expected_targets, c->is_lb_channel);
}
-static void fake_server_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc, tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
+static void fake_server_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc, tsi_peer peer,
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
}
-static int fake_channel_cmp(grpc_security_connector *sc1,
- grpc_security_connector *sc2) {
- grpc_fake_channel_security_connector *c1 =
- (grpc_fake_channel_security_connector *)sc1;
- grpc_fake_channel_security_connector *c2 =
- (grpc_fake_channel_security_connector *)sc2;
+static int fake_channel_cmp(grpc_security_connector* sc1,
+ grpc_security_connector* sc2) {
+ grpc_fake_channel_security_connector* c1 =
+ (grpc_fake_channel_security_connector*)sc1;
+ grpc_fake_channel_security_connector* c2 =
+ (grpc_fake_channel_security_connector*)sc2;
int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base);
if (c != 0) return c;
c = strcmp(c1->target, c2->target);
@@ -461,31 +461,31 @@ static int fake_channel_cmp(grpc_security_connector *sc1,
return GPR_ICMP(c1->is_lb_channel, c2->is_lb_channel);
}
-static int fake_server_cmp(grpc_security_connector *sc1,
- grpc_security_connector *sc2) {
+static int fake_server_cmp(grpc_security_connector* sc1,
+ grpc_security_connector* sc2) {
return grpc_server_security_connector_cmp(
- (grpc_server_security_connector *)sc1,
- (grpc_server_security_connector *)sc2);
+ (grpc_server_security_connector*)sc1,
+ (grpc_server_security_connector*)sc2);
}
-static bool fake_channel_check_call_host(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- const char *host,
- grpc_auth_context *auth_context,
- grpc_closure *on_call_host_checked,
- grpc_error **error) {
+static bool fake_channel_check_call_host(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc,
+ const char* host,
+ grpc_auth_context* auth_context,
+ grpc_closure* on_call_host_checked,
+ grpc_error** error) {
return true;
}
static void fake_channel_cancel_check_call_host(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_closure *on_call_host_checked, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ grpc_closure* on_call_host_checked, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
static void fake_channel_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_handshake_manager *handshake_mgr) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr) {
grpc_handshake_manager_add(
handshake_mgr,
grpc_security_handshaker_create(
@@ -493,9 +493,9 @@ static void fake_channel_add_handshakers(
&sc->base));
}
-static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx,
- grpc_server_security_connector *sc,
- grpc_handshake_manager *handshake_mgr) {
+static void fake_server_add_handshakers(grpc_exec_ctx* exec_ctx,
+ grpc_server_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr) {
grpc_handshake_manager_add(
handshake_mgr,
grpc_security_handshaker_create(
@@ -509,12 +509,12 @@ static grpc_security_connector_vtable fake_channel_vtable = {
static grpc_security_connector_vtable fake_server_vtable = {
fake_server_destroy, fake_server_check_peer, fake_server_cmp};
-grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
- grpc_channel_credentials *channel_creds,
- grpc_call_credentials *request_metadata_creds, const char *target,
- const grpc_channel_args *args) {
- grpc_fake_channel_security_connector *c =
- (grpc_fake_channel_security_connector *)gpr_zalloc(sizeof(*c));
+grpc_channel_security_connector* grpc_fake_channel_security_connector_create(
+ grpc_channel_credentials* channel_creds,
+ grpc_call_credentials* request_metadata_creds, const char* target,
+ const grpc_channel_args* args) {
+ grpc_fake_channel_security_connector* c =
+ (grpc_fake_channel_security_connector*)gpr_zalloc(sizeof(*c));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
c->base.base.vtable = &fake_channel_vtable;
@@ -525,16 +525,16 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
c->base.cancel_check_call_host = fake_channel_cancel_check_call_host;
c->base.add_handshakers = fake_channel_add_handshakers;
c->target = gpr_strdup(target);
- const char *expected_targets = grpc_fake_transport_get_expected_targets(args);
+ const char* expected_targets = grpc_fake_transport_get_expected_targets(args);
c->expected_targets = gpr_strdup(expected_targets);
c->is_lb_channel = (grpc_lb_targets_info_find_in_args(args) != NULL);
return &c->base;
}
-grpc_server_security_connector *grpc_fake_server_security_connector_create(
- grpc_server_credentials *server_creds) {
- grpc_server_security_connector *c =
- (grpc_server_security_connector *)gpr_zalloc(
+grpc_server_security_connector* grpc_fake_server_security_connector_create(
+ grpc_server_credentials* server_creds) {
+ grpc_server_security_connector* c =
+ (grpc_server_security_connector*)gpr_zalloc(
sizeof(grpc_server_security_connector));
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &fake_server_vtable;
@@ -548,29 +548,29 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_client_handshaker_factory *client_handshaker_factory;
- char *target_name;
- char *overridden_target_name;
+ tsi_ssl_client_handshaker_factory* client_handshaker_factory;
+ char* target_name;
+ char* overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
- tsi_ssl_server_handshaker_factory *server_handshaker_factory;
+ tsi_ssl_server_handshaker_factory* server_handshaker_factory;
} grpc_ssl_server_security_connector;
static bool server_connector_has_cert_config_fetcher(
- grpc_ssl_server_security_connector *c) {
+ grpc_ssl_server_security_connector* c) {
GPR_ASSERT(c != NULL);
- grpc_ssl_server_credentials *server_creds =
- (grpc_ssl_server_credentials *)c->base.server_creds;
+ grpc_ssl_server_credentials* server_creds =
+ (grpc_ssl_server_credentials*)c->base.server_creds;
GPR_ASSERT(server_creds != NULL);
return server_creds->certificate_config_fetcher.cb != NULL;
}
-static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc) {
- grpc_ssl_channel_security_connector *c =
- (grpc_ssl_channel_security_connector *)sc;
+static void ssl_channel_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc) {
+ grpc_ssl_channel_security_connector* c =
+ (grpc_ssl_channel_security_connector*)sc;
grpc_channel_credentials_unref(exec_ctx, c->base.channel_creds);
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
@@ -580,23 +580,23 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
gpr_free(sc);
}
-static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc) {
- grpc_ssl_server_security_connector *c =
- (grpc_ssl_server_security_connector *)sc;
+static void ssl_server_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc) {
+ grpc_ssl_server_security_connector* c =
+ (grpc_ssl_server_security_connector*)sc;
grpc_server_credentials_unref(exec_ctx, c->base.server_creds);
tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
c->server_handshaker_factory = NULL;
gpr_free(sc);
}
-static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_handshake_manager *handshake_mgr) {
- grpc_ssl_channel_security_connector *c =
- (grpc_ssl_channel_security_connector *)sc;
+static void ssl_channel_add_handshakers(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr) {
+ grpc_ssl_channel_security_connector* c =
+ (grpc_ssl_channel_security_connector*)sc;
// Instantiate TSI handshaker.
- tsi_handshaker *tsi_hs = NULL;
+ tsi_handshaker* tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name
@@ -614,11 +614,11 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base));
}
-static const char **fill_alpn_protocol_strings(size_t *num_alpn_protocols) {
+static const char** fill_alpn_protocol_strings(size_t* num_alpn_protocols) {
GPR_ASSERT(num_alpn_protocols != NULL);
*num_alpn_protocols = grpc_chttp2_num_alpn_versions();
- const char **alpn_protocol_strings =
- (const char **)gpr_malloc(sizeof(const char *) * (*num_alpn_protocols));
+ const char** alpn_protocol_strings =
+ (const char**)gpr_malloc(sizeof(const char*) * (*num_alpn_protocols));
for (size_t i = 0; i < *num_alpn_protocols; i++) {
alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i);
}
@@ -630,8 +630,8 @@ static const char **fill_alpn_protocol_strings(size_t *num_alpn_protocols) {
* fail, the existing factory will not be replaced. Returns true on success (new
* factory created). */
static bool try_replace_server_handshaker_factory(
- grpc_ssl_server_security_connector *sc,
- const grpc_ssl_server_certificate_config *config) {
+ grpc_ssl_server_security_connector* sc,
+ const grpc_ssl_server_certificate_config* config) {
if (config == NULL) {
gpr_log(GPR_ERROR,
"Server certificate config callback returned invalid (NULL) "
@@ -641,13 +641,13 @@ static bool try_replace_server_handshaker_factory(
gpr_log(GPR_DEBUG, "Using new server certificate config (%p).", config);
size_t num_alpn_protocols = 0;
- const char **alpn_protocol_strings =
+ const char** alpn_protocol_strings =
fill_alpn_protocol_strings(&num_alpn_protocols);
- tsi_ssl_pem_key_cert_pair *cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs(
+ tsi_ssl_pem_key_cert_pair* cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs(
config->pem_key_cert_pairs, config->num_key_cert_pairs);
- tsi_ssl_server_handshaker_factory *new_handshaker_factory = NULL;
- grpc_ssl_server_credentials *server_creds =
- (grpc_ssl_server_credentials *)sc->base.server_creds;
+ tsi_ssl_server_handshaker_factory* new_handshaker_factory = NULL;
+ grpc_ssl_server_credentials* server_creds =
+ (grpc_ssl_server_credentials*)sc->base.server_creds;
tsi_result result = tsi_create_ssl_server_handshaker_factory_ex(
cert_pairs, config->num_key_cert_pairs, config->pem_root_certs,
get_tsi_client_certificate_request_type(
@@ -655,7 +655,7 @@ static bool try_replace_server_handshaker_factory(
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
&new_handshaker_factory);
gpr_free(cert_pairs);
- gpr_free((void *)alpn_protocol_strings);
+ gpr_free((void*)alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
@@ -671,15 +671,15 @@ static bool try_replace_server_handshaker_factory(
* Current certificate config will continue to be used if the callback returns
* an error. Returns true if new credentials were sucessfully loaded. */
static bool try_fetch_ssl_server_credentials(
- grpc_ssl_server_security_connector *sc) {
- grpc_ssl_server_certificate_config *certificate_config = NULL;
+ grpc_ssl_server_security_connector* sc) {
+ grpc_ssl_server_certificate_config* certificate_config = NULL;
bool status;
GPR_ASSERT(sc != NULL);
if (!server_connector_has_cert_config_fetcher(sc)) return false;
- grpc_ssl_server_credentials *server_creds =
- (grpc_ssl_server_credentials *)sc->base.server_creds;
+ grpc_ssl_server_credentials* server_creds =
+ (grpc_ssl_server_credentials*)sc->base.server_creds;
grpc_ssl_certificate_config_reload_status cb_result =
server_creds->certificate_config_fetcher.cb(
server_creds->certificate_config_fetcher.user_data,
@@ -703,14 +703,14 @@ static bool try_fetch_ssl_server_credentials(
return status;
}
-static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
- grpc_server_security_connector *sc,
- grpc_handshake_manager *handshake_mgr) {
- grpc_ssl_server_security_connector *c =
- (grpc_ssl_server_security_connector *)sc;
+static void ssl_server_add_handshakers(grpc_exec_ctx* exec_ctx,
+ grpc_server_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr) {
+ grpc_ssl_server_security_connector* c =
+ (grpc_ssl_server_security_connector*)sc;
// Instantiate TSI handshaker.
try_fetch_ssl_server_credentials(c);
- tsi_handshaker *tsi_hs = NULL;
+ tsi_handshaker* tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) {
@@ -725,12 +725,12 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base));
}
-static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
- char *allocated_name = NULL;
+static int ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
+ char* allocated_name = NULL;
int r;
if (strchr(peer_name, ':') != NULL) {
- char *ignored_port;
+ char* ignored_port;
gpr_split_host_port(peer_name, &allocated_name, &ignored_port);
gpr_free(ignored_port);
peer_name = allocated_name;
@@ -741,10 +741,10 @@ static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
return r;
}
-grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
+grpc_auth_context* tsi_ssl_peer_to_auth_context(const tsi_peer* peer) {
size_t i;
- grpc_auth_context *ctx = NULL;
- const char *peer_identity_property_name = NULL;
+ grpc_auth_context* ctx = NULL;
+ const char* peer_identity_property_name = NULL;
/* The caller has checked the certificate type property. */
GPR_ASSERT(peer->property_count >= 1);
@@ -753,7 +753,7 @@ grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
ctx, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_SSL_TRANSPORT_SECURITY_TYPE);
for (i = 0; i < peer->property_count; i++) {
- const tsi_peer_property *prop = &peer->properties[i];
+ const tsi_peer_property* prop = &peer->properties[i];
if (prop->name == NULL) continue;
if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
/* If there is no subject alt name, have the CN as the identity. */
@@ -779,11 +779,11 @@ grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
return ctx;
}
-static grpc_error *ssl_check_peer(grpc_security_connector *sc,
- const char *peer_name, const tsi_peer *peer,
- grpc_auth_context **auth_context) {
+static grpc_error* ssl_check_peer(grpc_security_connector* sc,
+ const char* peer_name, const tsi_peer* peer,
+ grpc_auth_context** auth_context) {
/* Check the ALPN. */
- const tsi_peer_property *p =
+ const tsi_peer_property* p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
if (p == NULL) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -796,9 +796,9 @@ static grpc_error *ssl_check_peer(grpc_security_connector *sc,
/* Check the peer name if specified. */
if (peer_name != NULL && !ssl_host_matches_name(peer, peer_name)) {
- char *msg;
+ char* msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
- grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
@@ -806,35 +806,36 @@ static grpc_error *ssl_check_peer(grpc_security_connector *sc,
return GRPC_ERROR_NONE;
}
-static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc, tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
- grpc_ssl_channel_security_connector *c =
- (grpc_ssl_channel_security_connector *)sc;
- grpc_error *error = ssl_check_peer(sc, c->overridden_target_name != NULL
- ? c->overridden_target_name
- : c->target_name,
+static void ssl_channel_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc, tsi_peer peer,
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
+ grpc_ssl_channel_security_connector* c =
+ (grpc_ssl_channel_security_connector*)sc;
+ grpc_error* error = ssl_check_peer(sc,
+ c->overridden_target_name != NULL
+ ? c->overridden_target_name
+ : c->target_name,
&peer, auth_context);
GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
tsi_peer_destruct(&peer);
}
-static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc, tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked) {
- grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
+static void ssl_server_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc, tsi_peer peer,
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked) {
+ grpc_error* error = ssl_check_peer(sc, NULL, &peer, auth_context);
tsi_peer_destruct(&peer);
GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
}
-static int ssl_channel_cmp(grpc_security_connector *sc1,
- grpc_security_connector *sc2) {
- grpc_ssl_channel_security_connector *c1 =
- (grpc_ssl_channel_security_connector *)sc1;
- grpc_ssl_channel_security_connector *c2 =
- (grpc_ssl_channel_security_connector *)sc2;
+static int ssl_channel_cmp(grpc_security_connector* sc1,
+ grpc_security_connector* sc2) {
+ grpc_ssl_channel_security_connector* c1 =
+ (grpc_ssl_channel_security_connector*)sc1;
+ grpc_ssl_channel_security_connector* c2 =
+ (grpc_ssl_channel_security_connector*)sc2;
int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base);
if (c != 0) return c;
c = strcmp(c1->target_name, c2->target_name);
@@ -845,27 +846,27 @@ static int ssl_channel_cmp(grpc_security_connector *sc1,
: strcmp(c1->overridden_target_name, c2->overridden_target_name);
}
-static int ssl_server_cmp(grpc_security_connector *sc1,
- grpc_security_connector *sc2) {
+static int ssl_server_cmp(grpc_security_connector* sc1,
+ grpc_security_connector* sc2) {
return grpc_server_security_connector_cmp(
- (grpc_server_security_connector *)sc1,
- (grpc_server_security_connector *)sc2);
+ (grpc_server_security_connector*)sc1,
+ (grpc_server_security_connector*)sc2);
}
-static void add_shallow_auth_property_to_peer(tsi_peer *peer,
- const grpc_auth_property *prop,
- const char *tsi_prop_name) {
- tsi_peer_property *tsi_prop = &peer->properties[peer->property_count++];
- tsi_prop->name = (char *)tsi_prop_name;
+static void add_shallow_auth_property_to_peer(tsi_peer* peer,
+ const grpc_auth_property* prop,
+ const char* tsi_prop_name) {
+ tsi_peer_property* tsi_prop = &peer->properties[peer->property_count++];
+ tsi_prop->name = (char*)tsi_prop_name;
tsi_prop->value.data = prop->value;
tsi_prop->value.length = prop->value_length;
}
tsi_peer tsi_shallow_peer_from_ssl_auth_context(
- const grpc_auth_context *auth_context) {
+ const grpc_auth_context* auth_context) {
size_t max_num_props = 0;
grpc_auth_property_iterator it;
- const grpc_auth_property *prop;
+ const grpc_auth_property* prop;
tsi_peer peer;
memset(&peer, 0, sizeof(peer));
@@ -873,8 +874,8 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
while (grpc_auth_property_iterator_next(&it) != NULL) max_num_props++;
if (max_num_props > 0) {
- peer.properties = (tsi_peer_property *)gpr_malloc(
- max_num_props * sizeof(tsi_peer_property));
+ peer.properties = (tsi_peer_property*)gpr_malloc(max_num_props *
+ sizeof(tsi_peer_property));
it = grpc_auth_context_property_iterator(auth_context);
while ((prop = grpc_auth_property_iterator_next(&it)) != NULL) {
if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) {
@@ -892,18 +893,18 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
return peer;
}
-void tsi_shallow_peer_destruct(tsi_peer *peer) {
+void tsi_shallow_peer_destruct(tsi_peer* peer) {
if (peer->properties != NULL) gpr_free(peer->properties);
}
-static bool ssl_channel_check_call_host(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- const char *host,
- grpc_auth_context *auth_context,
- grpc_closure *on_call_host_checked,
- grpc_error **error) {
- grpc_ssl_channel_security_connector *c =
- (grpc_ssl_channel_security_connector *)sc;
+static bool ssl_channel_check_call_host(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc,
+ const char* host,
+ grpc_auth_context* auth_context,
+ grpc_closure* on_call_host_checked,
+ grpc_error** error) {
+ grpc_ssl_channel_security_connector* c =
+ (grpc_ssl_channel_security_connector*)sc;
grpc_security_status status = GRPC_SECURITY_ERROR;
tsi_peer peer = tsi_shallow_peer_from_ssl_auth_context(auth_context);
if (ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK;
@@ -922,8 +923,8 @@ static bool ssl_channel_check_call_host(grpc_exec_ctx *exec_ctx,
}
static void ssl_channel_cancel_check_call_host(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_closure *on_call_host_checked, grpc_error *error) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ grpc_closure* on_call_host_checked, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
@@ -938,7 +939,7 @@ static grpc_slice compute_default_pem_root_certs_once(void) {
grpc_slice result = grpc_empty_slice();
/* First try to load the roots from the environment. */
- char *default_root_certs_path =
+ char* default_root_certs_path =
gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR);
if (default_root_certs_path != NULL) {
GRPC_LOG_IF_ERROR("load_file",
@@ -949,7 +950,7 @@ static grpc_slice compute_default_pem_root_certs_once(void) {
/* Try overridden roots if needed. */
grpc_ssl_roots_override_result ovrd_res = GRPC_SSL_ROOTS_OVERRIDE_FAIL;
if (GRPC_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != NULL) {
- char *pem_root_certs = NULL;
+ char* pem_root_certs = NULL;
ovrd_res = ssl_roots_override_cb(&pem_root_certs);
if (ovrd_res == GRPC_SSL_ROOTS_OVERRIDE_OK) {
GPR_ASSERT(pem_root_certs != NULL);
@@ -979,28 +980,28 @@ grpc_slice grpc_get_default_ssl_roots_for_testing(void) {
return compute_default_pem_root_certs_once();
}
-const char *grpc_get_default_ssl_roots(void) {
+const char* grpc_get_default_ssl_roots(void) {
/* TODO(jboeuf@google.com): Maybe revisit the approach which consists in
loading all the roots once for the lifetime of the process. */
static gpr_once once = GPR_ONCE_INIT;
gpr_once_init(&once, init_default_pem_root_certs);
return GRPC_SLICE_IS_EMPTY(default_pem_root_certs)
? NULL
- : (const char *)GRPC_SLICE_START_PTR(default_pem_root_certs);
+ : (const char*)GRPC_SLICE_START_PTR(default_pem_root_certs);
}
grpc_security_status grpc_ssl_channel_security_connector_create(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds,
- grpc_call_credentials *request_metadata_creds,
- const grpc_ssl_config *config, const char *target_name,
- const char *overridden_target_name, grpc_channel_security_connector **sc) {
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* channel_creds,
+ grpc_call_credentials* request_metadata_creds,
+ const grpc_ssl_config* config, const char* target_name,
+ const char* overridden_target_name, grpc_channel_security_connector** sc) {
size_t num_alpn_protocols = 0;
- const char **alpn_protocol_strings =
+ const char** alpn_protocol_strings =
fill_alpn_protocol_strings(&num_alpn_protocols);
tsi_result result = TSI_OK;
- grpc_ssl_channel_security_connector *c;
- const char *pem_root_certs;
- char *port;
+ grpc_ssl_channel_security_connector* c;
+ const char* pem_root_certs;
+ char* port;
bool has_key_cert_pair;
if (config == NULL || target_name == NULL) {
@@ -1017,7 +1018,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
pem_root_certs = config->pem_root_certs;
}
- c = (grpc_ssl_channel_security_connector *)gpr_zalloc(
+ c = (grpc_ssl_channel_security_connector*)gpr_zalloc(
sizeof(grpc_ssl_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
@@ -1050,19 +1051,19 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
goto error;
}
*sc = &c->base;
- gpr_free((void *)alpn_protocol_strings);
+ gpr_free((void*)alpn_protocol_strings);
return GRPC_SECURITY_OK;
error:
- gpr_free((void *)alpn_protocol_strings);
+ gpr_free((void*)alpn_protocol_strings);
return GRPC_SECURITY_ERROR;
}
-static grpc_ssl_server_security_connector *
+static grpc_ssl_server_security_connector*
grpc_ssl_server_security_connector_initialize(
- grpc_server_credentials *server_creds) {
- grpc_ssl_server_security_connector *c =
- (grpc_ssl_server_security_connector *)gpr_zalloc(
+ grpc_server_credentials* server_creds) {
+ grpc_ssl_server_security_connector* c =
+ (grpc_ssl_server_security_connector*)gpr_zalloc(
sizeof(grpc_ssl_server_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
@@ -1073,17 +1074,17 @@ grpc_ssl_server_security_connector_initialize(
}
grpc_security_status grpc_ssl_server_security_connector_create(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *gsc,
- grpc_server_security_connector **sc) {
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* gsc,
+ grpc_server_security_connector** sc) {
tsi_result result = TSI_OK;
- grpc_ssl_server_credentials *server_credentials =
- (grpc_ssl_server_credentials *)gsc;
+ grpc_ssl_server_credentials* server_credentials =
+ (grpc_ssl_server_credentials*)gsc;
grpc_security_status retval = GRPC_SECURITY_OK;
GPR_ASSERT(server_credentials != NULL);
GPR_ASSERT(sc != NULL);
- grpc_ssl_server_security_connector *c =
+ grpc_ssl_server_security_connector* c =
grpc_ssl_server_security_connector_initialize(gsc);
if (server_connector_has_cert_config_fetcher(c)) {
// Load initial credentials from certificate_config_fetcher:
@@ -1093,7 +1094,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
}
} else {
size_t num_alpn_protocols = 0;
- const char **alpn_protocol_strings =
+ const char** alpn_protocol_strings =
fill_alpn_protocol_strings(&num_alpn_protocols);
result = tsi_create_ssl_server_handshaker_factory_ex(
server_credentials->config.pem_key_cert_pairs,
@@ -1103,7 +1104,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
server_credentials->config.client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings,
(uint16_t)num_alpn_protocols, &c->server_handshaker_factory);
- gpr_free((void *)alpn_protocol_strings);
+ gpr_free((void*)alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 54a563bb2c..79fdbc1a66 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -56,17 +56,17 @@ typedef struct grpc_security_connector grpc_security_connector;
#define GRPC_ARG_SECURITY_CONNECTOR "grpc.security_connector"
typedef struct {
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc);
- void (*check_peer)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc,
- tsi_peer peer, grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked);
- int (*cmp)(grpc_security_connector *sc, grpc_security_connector *other);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_security_connector* sc);
+ void (*check_peer)(grpc_exec_ctx* exec_ctx, grpc_security_connector* sc,
+ tsi_peer peer, grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked);
+ int (*cmp)(grpc_security_connector* sc, grpc_security_connector* other);
} grpc_security_connector_vtable;
struct grpc_security_connector {
- const grpc_security_connector_vtable *vtable;
+ const grpc_security_connector_vtable* vtable;
gpr_refcount refcount;
- const char *url_scheme;
+ const char* url_scheme;
};
/* Refcounting. */
@@ -75,44 +75,44 @@ struct grpc_security_connector {
grpc_security_connector_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \
grpc_security_connector_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
-grpc_security_connector *grpc_security_connector_ref(
- grpc_security_connector *policy, const char *file, int line,
- const char *reason);
-void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *policy,
- const char *file, int line,
- const char *reason);
+grpc_security_connector* grpc_security_connector_ref(
+ grpc_security_connector* policy, const char* file, int line,
+ const char* reason);
+void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* policy,
+ const char* file, int line,
+ const char* reason);
#else
#define GRPC_SECURITY_CONNECTOR_REF(p, r) grpc_security_connector_ref((p))
#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \
grpc_security_connector_unref((exec_ctx), (p))
-grpc_security_connector *grpc_security_connector_ref(
- grpc_security_connector *policy);
-void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *policy);
+grpc_security_connector* grpc_security_connector_ref(
+ grpc_security_connector* policy);
+void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* policy);
#endif
/* Check the peer. Callee takes ownership of the peer object.
When done, sets *auth_context and invokes on_peer_checked. */
-void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
- grpc_security_connector *sc,
+void grpc_security_connector_check_peer(grpc_exec_ctx* exec_ctx,
+ grpc_security_connector* sc,
tsi_peer peer,
- grpc_auth_context **auth_context,
- grpc_closure *on_peer_checked);
+ grpc_auth_context** auth_context,
+ grpc_closure* on_peer_checked);
/* Compares two security connectors. */
-int grpc_security_connector_cmp(grpc_security_connector *sc,
- grpc_security_connector *other);
+int grpc_security_connector_cmp(grpc_security_connector* sc,
+ grpc_security_connector* other);
/* Util to encapsulate the connector in a channel arg. */
-grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc);
+grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc);
/* Util to get the connector from a channel arg. */
-grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg);
+grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg);
/* Util to find the connector from channel args. */
-grpc_security_connector *grpc_security_connector_find_in_args(
- const grpc_channel_args *args);
+grpc_security_connector* grpc_security_connector_find_in_args(
+ const grpc_channel_args* args);
/* --- channel_security_connector object. ---
@@ -123,46 +123,46 @@ typedef struct grpc_channel_security_connector grpc_channel_security_connector;
struct grpc_channel_security_connector {
grpc_security_connector base;
- grpc_channel_credentials *channel_creds;
- grpc_call_credentials *request_metadata_creds;
- bool (*check_call_host)(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc, const char *host,
- grpc_auth_context *auth_context,
- grpc_closure *on_call_host_checked,
- grpc_error **error);
- void (*cancel_check_call_host)(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_closure *on_call_host_checked,
- grpc_error *error);
- void (*add_handshakers)(grpc_exec_ctx *exec_ctx,
- grpc_channel_security_connector *sc,
- grpc_handshake_manager *handshake_mgr);
+ grpc_channel_credentials* channel_creds;
+ grpc_call_credentials* request_metadata_creds;
+ bool (*check_call_host)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc, const char* host,
+ grpc_auth_context* auth_context,
+ grpc_closure* on_call_host_checked,
+ grpc_error** error);
+ void (*cancel_check_call_host)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc,
+ grpc_closure* on_call_host_checked,
+ grpc_error* error);
+ void (*add_handshakers)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr);
};
/// A helper function for use in grpc_security_connector_cmp() implementations.
-int grpc_channel_security_connector_cmp(grpc_channel_security_connector *sc1,
- grpc_channel_security_connector *sc2);
+int grpc_channel_security_connector_cmp(grpc_channel_security_connector* sc1,
+ grpc_channel_security_connector* sc2);
/// Checks that the host that will be set for a call is acceptable.
/// Returns true if completed synchronously, in which case \a error will
/// be set to indicate the result. Otherwise, \a on_call_host_checked
/// will be invoked when complete.
bool grpc_channel_security_connector_check_call_host(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- const char *host, grpc_auth_context *auth_context,
- grpc_closure *on_call_host_checked, grpc_error **error);
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ const char* host, grpc_auth_context* auth_context,
+ grpc_closure* on_call_host_checked, grpc_error** error);
/// Cancels a pending asychronous call to
/// grpc_channel_security_connector_check_call_host() with
/// \a on_call_host_checked as its callback.
void grpc_channel_security_connector_cancel_check_call_host(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc,
- grpc_closure *on_call_host_checked, grpc_error *error);
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+ grpc_closure* on_call_host_checked, grpc_error* error);
/* Registers handshakers with \a handshake_mgr. */
void grpc_channel_security_connector_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector,
- grpc_handshake_manager *handshake_mgr);
+ grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* connector,
+ grpc_handshake_manager* handshake_mgr);
/* --- server_security_connector object. ---
@@ -173,39 +173,39 @@ typedef struct grpc_server_security_connector grpc_server_security_connector;
struct grpc_server_security_connector {
grpc_security_connector base;
- grpc_server_credentials *server_creds;
- void (*add_handshakers)(grpc_exec_ctx *exec_ctx,
- grpc_server_security_connector *sc,
- grpc_handshake_manager *handshake_mgr);
+ grpc_server_credentials* server_creds;
+ void (*add_handshakers)(grpc_exec_ctx* exec_ctx,
+ grpc_server_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr);
};
/// A helper function for use in grpc_security_connector_cmp() implementations.
-int grpc_server_security_connector_cmp(grpc_server_security_connector *sc1,
- grpc_server_security_connector *sc2);
+int grpc_server_security_connector_cmp(grpc_server_security_connector* sc1,
+ grpc_server_security_connector* sc2);
void grpc_server_security_connector_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc,
- grpc_handshake_manager *handshake_mgr);
+ grpc_exec_ctx* exec_ctx, grpc_server_security_connector* sc,
+ grpc_handshake_manager* handshake_mgr);
/* --- Creation security connectors. --- */
/* For TESTING ONLY!
Creates a fake connector that emulates real channel security. */
-grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
- grpc_channel_credentials *channel_creds,
- grpc_call_credentials *request_metadata_creds, const char *target,
- const grpc_channel_args *args);
+grpc_channel_security_connector* grpc_fake_channel_security_connector_create(
+ grpc_channel_credentials* channel_creds,
+ grpc_call_credentials* request_metadata_creds, const char* target,
+ const grpc_channel_args* args);
/* For TESTING ONLY!
Creates a fake connector that emulates real server security. */
-grpc_server_security_connector *grpc_fake_server_security_connector_create(
- grpc_server_credentials *server_creds);
+grpc_server_security_connector* grpc_fake_server_security_connector_create(
+ grpc_server_credentials* server_creds);
/* Config for ssl clients. */
typedef struct {
- tsi_ssl_pem_key_cert_pair *pem_key_cert_pair;
- char *pem_root_certs;
+ tsi_ssl_pem_key_cert_pair* pem_key_cert_pair;
+ char* pem_root_certs;
} grpc_ssl_config;
/* Creates an SSL channel_security_connector.
@@ -222,22 +222,22 @@ typedef struct {
specific error code otherwise.
*/
grpc_security_status grpc_ssl_channel_security_connector_create(
- grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds,
- grpc_call_credentials *request_metadata_creds,
- const grpc_ssl_config *config, const char *target_name,
- const char *overridden_target_name, grpc_channel_security_connector **sc);
+ grpc_exec_ctx* exec_ctx, grpc_channel_credentials* channel_creds,
+ grpc_call_credentials* request_metadata_creds,
+ const grpc_ssl_config* config, const char* target_name,
+ const char* overridden_target_name, grpc_channel_security_connector** sc);
/* Gets the default ssl roots. Returns NULL if not found. */
-const char *grpc_get_default_ssl_roots(void);
+const char* grpc_get_default_ssl_roots(void);
/* Exposed for TESTING ONLY!. */
grpc_slice grpc_get_default_ssl_roots_for_testing(void);
/* Config for ssl servers. */
typedef struct {
- tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs;
+ tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs;
size_t num_key_cert_pairs;
- char *pem_root_certs;
+ char* pem_root_certs;
grpc_ssl_client_certificate_request_type client_certificate_request;
} grpc_ssl_server_config;
@@ -248,18 +248,18 @@ typedef struct {
specific error code otherwise.
*/
grpc_security_status grpc_ssl_server_security_connector_create(
- grpc_exec_ctx *exec_ctx, grpc_server_credentials *server_credentials,
- grpc_server_security_connector **sc);
+ grpc_exec_ctx* exec_ctx, grpc_server_credentials* server_credentials,
+ grpc_server_security_connector** sc);
/* Util. */
-const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
- const char *name);
+const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* peer,
+ const char* name);
/* Exposed for testing only. */
-grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer);
+grpc_auth_context* tsi_ssl_peer_to_auth_context(const tsi_peer* peer);
tsi_peer tsi_shallow_peer_from_ssl_auth_context(
- const grpc_auth_context *auth_context);
-void tsi_shallow_peer_destruct(tsi_peer *peer);
+ const grpc_auth_context* auth_context);
+void tsi_shallow_peer_destruct(tsi_peer* peer);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/transport/security_handshaker.cc b/src/core/lib/security/transport/security_handshaker.cc
index 3d19605617..63706f142b 100644
--- a/src/core/lib/security/transport/security_handshaker.cc
+++ b/src/core/lib/security/transport/security_handshaker.cc
@@ -40,33 +40,33 @@ typedef struct {
grpc_handshaker base;
// State set at creation time.
- tsi_handshaker *handshaker;
- grpc_security_connector *connector;
+ tsi_handshaker* handshaker;
+ grpc_security_connector* connector;
gpr_mu mu;
gpr_refcount refs;
bool shutdown;
// Endpoint and read buffer to destroy after a shutdown.
- grpc_endpoint *endpoint_to_destroy;
- grpc_slice_buffer *read_buffer_to_destroy;
+ grpc_endpoint* endpoint_to_destroy;
+ grpc_slice_buffer* read_buffer_to_destroy;
// State saved while performing the handshake.
- grpc_handshaker_args *args;
- grpc_closure *on_handshake_done;
+ grpc_handshaker_args* args;
+ grpc_closure* on_handshake_done;
- unsigned char *handshake_buffer;
+ unsigned char* handshake_buffer;
size_t handshake_buffer_size;
grpc_slice_buffer outgoing;
grpc_closure on_handshake_data_sent_to_peer;
grpc_closure on_handshake_data_received_from_peer;
grpc_closure on_peer_checked;
- grpc_auth_context *auth_context;
- tsi_handshaker_result *handshaker_result;
+ grpc_auth_context* auth_context;
+ tsi_handshaker_result* handshaker_result;
} security_handshaker;
-static void security_handshaker_unref(grpc_exec_ctx *exec_ctx,
- security_handshaker *h) {
+static void security_handshaker_unref(grpc_exec_ctx* exec_ctx,
+ security_handshaker* h) {
if (gpr_unref(&h->refs)) {
gpr_mu_destroy(&h->mu);
tsi_handshaker_destroy(h->handshaker);
@@ -88,8 +88,8 @@ static void security_handshaker_unref(grpc_exec_ctx *exec_ctx,
// Set args fields to NULL, saving the endpoint and read buffer for
// later destruction.
-static void cleanup_args_for_failure_locked(grpc_exec_ctx *exec_ctx,
- security_handshaker *h) {
+static void cleanup_args_for_failure_locked(grpc_exec_ctx* exec_ctx,
+ security_handshaker* h) {
h->endpoint_to_destroy = h->args->endpoint;
h->args->endpoint = NULL;
h->read_buffer_to_destroy = h->args->read_buffer;
@@ -100,15 +100,15 @@ static void cleanup_args_for_failure_locked(grpc_exec_ctx *exec_ctx,
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
-static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
- security_handshaker *h,
- grpc_error *error) {
+static void security_handshake_failed_locked(grpc_exec_ctx* exec_ctx,
+ security_handshaker* h,
+ grpc_error* error) {
if (error == GRPC_ERROR_NONE) {
// If we were shut down after the handshake succeeded but before an
// endpoint callback was invoked, we need to generate our own error.
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
}
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);
if (!h->shutdown) {
@@ -128,14 +128,14 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
-static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
- security_handshaker *h, grpc_error *error) {
+static void on_peer_checked_inner(grpc_exec_ctx* exec_ctx,
+ security_handshaker* h, grpc_error* error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
return;
}
// Create zero-copy frame protector, if implemented.
- tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
+ tsi_zero_copy_grpc_protector* zero_copy_protector = NULL;
tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
exec_ctx, h->handshaker_result, NULL, &zero_copy_protector);
if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
@@ -147,7 +147,7 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
return;
}
// Create frame protector if zero-copy frame protector is NULL.
- tsi_frame_protector *protector = NULL;
+ tsi_frame_protector* protector = NULL;
if (zero_copy_protector == NULL) {
result = tsi_handshaker_result_create_frame_protector(h->handshaker_result,
NULL, &protector);
@@ -160,14 +160,14 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
}
}
// Get unused bytes.
- const unsigned char *unused_bytes = NULL;
+ const unsigned char* unused_bytes = NULL;
size_t unused_bytes_size = 0;
result = tsi_handshaker_result_get_unused_bytes(
h->handshaker_result, &unused_bytes, &unused_bytes_size);
// Create secure endpoint.
if (unused_bytes_size > 0) {
grpc_slice slice =
- grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size);
+ grpc_slice_from_copied_buffer((char*)unused_bytes, unused_bytes_size);
h->args->endpoint = grpc_secure_endpoint_create(
protector, zero_copy_protector, h->args->endpoint, &slice, 1);
grpc_slice_unref_internal(exec_ctx, slice);
@@ -181,7 +181,7 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, h->args->read_buffer);
// Add auth context to channel args.
grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context);
- grpc_channel_args *tmp_args = h->args->args;
+ grpc_channel_args* tmp_args = h->args->args;
h->args->args =
grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
grpc_channel_args_destroy(exec_ctx, tmp_args);
@@ -192,17 +192,17 @@ static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
h->shutdown = true;
}
-static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- security_handshaker *h = (security_handshaker *)arg;
+static void on_peer_checked(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ security_handshaker* h = (security_handshaker*)arg;
gpr_mu_lock(&h->mu);
on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
-static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx,
- security_handshaker *h) {
+static grpc_error* check_peer_locked(grpc_exec_ctx* exec_ctx,
+ security_handshaker* h) {
tsi_peer peer;
tsi_result result =
tsi_handshaker_result_extract_peer(h->handshaker_result, &peer);
@@ -215,11 +215,11 @@ static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *on_handshake_next_done_locked(
- grpc_exec_ctx *exec_ctx, security_handshaker *h, tsi_result result,
- const unsigned char *bytes_to_send, size_t bytes_to_send_size,
- tsi_handshaker_result *handshaker_result) {
- grpc_error *error = GRPC_ERROR_NONE;
+static grpc_error* on_handshake_next_done_locked(
+ grpc_exec_ctx* exec_ctx, security_handshaker* h, tsi_result result,
+ const unsigned char* bytes_to_send, size_t bytes_to_send_size,
+ tsi_handshaker_result* handshaker_result) {
+ grpc_error* error = GRPC_ERROR_NONE;
// Read more if we need to.
if (result == TSI_INCOMPLETE_DATA) {
GPR_ASSERT(bytes_to_send_size == 0);
@@ -239,7 +239,7 @@ static grpc_error *on_handshake_next_done_locked(
if (bytes_to_send_size > 0) {
// Send data to peer, if needed.
grpc_slice to_send = grpc_slice_from_copied_buffer(
- (const char *)bytes_to_send, bytes_to_send_size);
+ (const char*)bytes_to_send, bytes_to_send_size);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing);
grpc_slice_buffer_add(&h->outgoing, to_send);
grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing,
@@ -256,14 +256,14 @@ static grpc_error *on_handshake_next_done_locked(
}
static void on_handshake_next_done_grpc_wrapper(
- tsi_result result, void *user_data, const unsigned char *bytes_to_send,
- size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
- security_handshaker *h = (security_handshaker *)user_data;
+ tsi_result result, void* user_data, const unsigned char* bytes_to_send,
+ size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result) {
+ security_handshaker* h = (security_handshaker*)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(&h->mu);
- grpc_error *error =
+ grpc_error* error =
on_handshake_next_done_locked(&exec_ctx, h, result, bytes_to_send,
bytes_to_send_size, handshaker_result);
if (error != GRPC_ERROR_NONE) {
@@ -276,13 +276,13 @@ static void on_handshake_next_done_grpc_wrapper(
grpc_exec_ctx_finish(&exec_ctx);
}
-static grpc_error *do_handshaker_next_locked(
- grpc_exec_ctx *exec_ctx, security_handshaker *h,
- const unsigned char *bytes_received, size_t bytes_received_size) {
+static grpc_error* do_handshaker_next_locked(
+ grpc_exec_ctx* exec_ctx, security_handshaker* h,
+ const unsigned char* bytes_received, size_t bytes_received_size) {
// Invoke TSI handshaker.
- const unsigned char *bytes_to_send = NULL;
+ const unsigned char* bytes_to_send = NULL;
size_t bytes_to_send_size = 0;
- tsi_handshaker_result *handshaker_result = NULL;
+ tsi_handshaker_result* handshaker_result = NULL;
tsi_result result = tsi_handshaker_next(
h->handshaker, bytes_received, bytes_received_size, &bytes_to_send,
&bytes_to_send_size, &handshaker_result,
@@ -298,14 +298,15 @@ static grpc_error *do_handshaker_next_locked(
bytes_to_send_size, handshaker_result);
}
-static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
- void *arg, grpc_error *error) {
- security_handshaker *h = (security_handshaker *)arg;
+static void on_handshake_data_received_from_peer(grpc_exec_ctx* exec_ctx,
+ void* arg, grpc_error* error) {
+ security_handshaker* h = (security_handshaker*)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
- exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Handshake read failed", &error, 1));
+ exec_ctx, h,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Handshake read failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
return;
@@ -318,7 +319,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
}
if (bytes_received_size > h->handshake_buffer_size) {
h->handshake_buffer =
- (uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
+ (uint8_t*)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@@ -341,14 +342,15 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
}
}
-static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- security_handshaker *h = (security_handshaker *)arg;
+static void on_handshake_data_sent_to_peer(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ security_handshaker* h = (security_handshaker*)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
- exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Handshake write failed", &error, 1));
+ exec_ctx, h,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Handshake write failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
return;
@@ -373,16 +375,16 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
// public handshaker API
//
-static void security_handshaker_destroy(grpc_exec_ctx *exec_ctx,
- grpc_handshaker *handshaker) {
- security_handshaker *h = (security_handshaker *)handshaker;
+static void security_handshaker_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker* handshaker) {
+ security_handshaker* h = (security_handshaker*)handshaker;
security_handshaker_unref(exec_ctx, h);
}
-static void security_handshaker_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_handshaker *handshaker,
- grpc_error *why) {
- security_handshaker *h = (security_handshaker *)handshaker;
+static void security_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker* handshaker,
+ grpc_error* why) {
+ security_handshaker* h = (security_handshaker*)handshaker;
gpr_mu_lock(&h->mu);
if (!h->shutdown) {
h->shutdown = true;
@@ -393,17 +395,17 @@ static void security_handshaker_shutdown(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(why);
}
-static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
- grpc_handshaker *handshaker,
- grpc_tcp_server_acceptor *acceptor,
- grpc_closure *on_handshake_done,
- grpc_handshaker_args *args) {
- security_handshaker *h = (security_handshaker *)handshaker;
+static void security_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker* handshaker,
+ grpc_tcp_server_acceptor* acceptor,
+ grpc_closure* on_handshake_done,
+ grpc_handshaker_args* args) {
+ security_handshaker* h = (security_handshaker*)handshaker;
gpr_mu_lock(&h->mu);
h->args = args;
h->on_handshake_done = on_handshake_done;
gpr_ref(&h->refs);
- grpc_error *error = do_handshaker_next_locked(exec_ctx, h, NULL, 0);
+ grpc_error* error = do_handshaker_next_locked(exec_ctx, h, NULL, 0);
if (error != GRPC_ERROR_NONE) {
security_handshake_failed_locked(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
@@ -417,18 +419,18 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
security_handshaker_destroy, security_handshaker_shutdown,
security_handshaker_do_handshake};
-static grpc_handshaker *security_handshaker_create(
- grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
- grpc_security_connector *connector) {
- security_handshaker *h =
- (security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
+static grpc_handshaker* security_handshaker_create(
+ grpc_exec_ctx* exec_ctx, tsi_handshaker* handshaker,
+ grpc_security_connector* connector) {
+ security_handshaker* h =
+ (security_handshaker*)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
- h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
+ h->handshake_buffer = (uint8_t*)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@@ -445,22 +447,22 @@ static grpc_handshaker *security_handshaker_create(
// fail_handshaker
//
-static void fail_handshaker_destroy(grpc_exec_ctx *exec_ctx,
- grpc_handshaker *handshaker) {
+static void fail_handshaker_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker* handshaker) {
gpr_free(handshaker);
}
-static void fail_handshaker_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_handshaker *handshaker,
- grpc_error *why) {
+static void fail_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker* handshaker,
+ grpc_error* why) {
GRPC_ERROR_UNREF(why);
}
-static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
- grpc_handshaker *handshaker,
- grpc_tcp_server_acceptor *acceptor,
- grpc_closure *on_handshake_done,
- grpc_handshaker_args *args) {
+static void fail_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
+ grpc_handshaker* handshaker,
+ grpc_tcp_server_acceptor* acceptor,
+ grpc_closure* on_handshake_done,
+ grpc_handshaker_args* args) {
GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to create security handshaker"));
@@ -470,8 +472,8 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_destroy, fail_handshaker_shutdown,
fail_handshaker_do_handshake};
-static grpc_handshaker *fail_handshaker_create() {
- grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
+static grpc_handshaker* fail_handshaker_create() {
+ grpc_handshaker* h = (grpc_handshaker*)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}
@@ -481,27 +483,27 @@ static grpc_handshaker *fail_handshaker_create() {
//
static void client_handshaker_factory_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory,
- const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) {
- grpc_channel_security_connector *security_connector =
- (grpc_channel_security_connector *)grpc_security_connector_find_in_args(
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory,
+ const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+ grpc_channel_security_connector* security_connector =
+ (grpc_channel_security_connector*)grpc_security_connector_find_in_args(
args);
grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector,
handshake_mgr);
}
static void server_handshaker_factory_add_handshakers(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *hf,
- const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) {
- grpc_server_security_connector *security_connector =
- (grpc_server_security_connector *)grpc_security_connector_find_in_args(
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* hf,
+ const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+ grpc_server_security_connector* security_connector =
+ (grpc_server_security_connector*)grpc_security_connector_find_in_args(
args);
grpc_server_security_connector_add_handshakers(exec_ctx, security_connector,
handshake_mgr);
}
static void handshaker_factory_destroy(
- grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory) {}
+ grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory) {}
static const grpc_handshaker_factory_vtable client_handshaker_factory_vtable = {
client_handshaker_factory_add_handshakers, handshaker_factory_destroy};
@@ -519,9 +521,9 @@ static grpc_handshaker_factory server_handshaker_factory = {
// exported functions
//
-grpc_handshaker *grpc_security_handshaker_create(
- grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
- grpc_security_connector *connector) {
+grpc_handshaker* grpc_security_handshaker_create(
+ grpc_exec_ctx* exec_ctx, tsi_handshaker* handshaker,
+ grpc_security_connector* connector) {
// If no TSI handshaker was created, return a handshaker that always fails.
// Otherwise, return a real security handshaker.
if (handshaker == NULL) {
diff --git a/src/core/lib/security/transport/security_handshaker.h b/src/core/lib/security/transport/security_handshaker.h
index 178099bb94..174f70f0dd 100644
--- a/src/core/lib/security/transport/security_handshaker.h
+++ b/src/core/lib/security/transport/security_handshaker.h
@@ -28,9 +28,9 @@ extern "C" {
#endif
/// Creates a security handshaker using \a handshaker.
-grpc_handshaker *grpc_security_handshaker_create(
- grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
- grpc_security_connector *connector);
+grpc_handshaker* grpc_security_handshaker_create(
+ grpc_exec_ctx* exec_ctx, tsi_handshaker* handshaker,
+ grpc_security_connector* connector);
/// Registers security handshaker factories.
void grpc_security_register_handshaker_factories();
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index f5e02f42fe..e1307410d6 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -33,37 +33,37 @@ typedef enum {
} async_state;
typedef struct call_data {
- grpc_call_combiner *call_combiner;
- grpc_call_stack *owning_call;
- grpc_transport_stream_op_batch *recv_initial_metadata_batch;
- grpc_closure *original_recv_initial_metadata_ready;
+ grpc_call_combiner* call_combiner;
+ grpc_call_stack* owning_call;
+ grpc_transport_stream_op_batch* recv_initial_metadata_batch;
+ grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
grpc_metadata_array md;
- const grpc_metadata *consumed_md;
+ const grpc_metadata* consumed_md;
size_t num_consumed_md;
- grpc_auth_context *auth_context;
+ grpc_auth_context* auth_context;
grpc_closure cancel_closure;
gpr_atm state; // async_state
} call_data;
typedef struct channel_data {
- grpc_auth_context *auth_context;
- grpc_server_credentials *creds;
+ grpc_auth_context* auth_context;
+ grpc_server_credentials* creds;
} channel_data;
static grpc_metadata_array metadata_batch_to_md_array(
- const grpc_metadata_batch *batch) {
- grpc_linked_mdelem *l;
+ const grpc_metadata_batch* batch) {
+ grpc_linked_mdelem* l;
grpc_metadata_array result;
grpc_metadata_array_init(&result);
for (l = batch->list.head; l != NULL; l = l->next) {
- grpc_metadata *usr_md = NULL;
+ grpc_metadata* usr_md = NULL;
grpc_mdelem md = l->md;
grpc_slice key = GRPC_MDKEY(md);
grpc_slice value = GRPC_MDVALUE(md);
if (result.count == result.capacity) {
result.capacity = GPR_MAX(result.capacity + 8, result.capacity * 2);
- result.metadata = (grpc_metadata *)gpr_realloc(
+ result.metadata = (grpc_metadata*)gpr_realloc(
result.metadata, result.capacity * sizeof(grpc_metadata));
}
usr_md = &result.metadata[result.count++];
@@ -73,14 +73,14 @@ static grpc_metadata_array metadata_batch_to_md_array(
return result;
}
-static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
- void *user_data,
+static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx* exec_ctx,
+ void* user_data,
grpc_mdelem md) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
size_t i;
for (i = 0; i < calld->num_consumed_md; i++) {
- const grpc_metadata *consumed_md = &calld->consumed_md[i];
+ const grpc_metadata* consumed_md = &calld->consumed_md[i];
if (grpc_slice_eq(GRPC_MDKEY(md), consumed_md->key) &&
grpc_slice_eq(GRPC_MDVALUE(md), consumed_md->value))
return GRPC_FILTERED_REMOVE();
@@ -88,15 +88,15 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx,
return GRPC_FILTERED_MDELEM(md);
}
-static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_metadata *consumed_md,
+static void on_md_processing_done_inner(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_metadata* consumed_md,
size_t num_consumed_md,
- const grpc_metadata *response_md,
+ const grpc_metadata* response_md,
size_t num_response_md,
- grpc_error *error) {
- call_data *calld = (call_data *)elem->call_data;
- grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
+ grpc_error* error) {
+ call_data* calld = (call_data*)elem->call_data;
+ grpc_transport_stream_op_batch* batch = calld->recv_initial_metadata_batch;
/* TODO(jboeuf): Implement support for response_md. */
if (response_md != NULL && num_response_md > 0) {
gpr_log(GPR_INFO,
@@ -116,16 +116,16 @@ static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx,
// Called from application code.
static void on_md_processing_done(
- void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
- const grpc_metadata *response_md, size_t num_response_md,
- grpc_status_code status, const char *error_details) {
- grpc_call_element *elem = (grpc_call_element *)user_data;
- call_data *calld = (call_data *)elem->call_data;
+ void* user_data, const grpc_metadata* consumed_md, size_t num_consumed_md,
+ const grpc_metadata* response_md, size_t num_response_md,
+ grpc_status_code status, const char* error_details) {
+ grpc_call_element* elem = (grpc_call_element*)user_data;
+ call_data* calld = (call_data*)elem->call_data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
// If the call was not cancelled while we were in flight, process the result.
if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
(gpr_atm)STATE_DONE)) {
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
if (status != GRPC_STATUS_OK) {
if (error_details == NULL) {
error_details = "Authentication metadata processing failed.";
@@ -147,9 +147,9 @@ static void on_md_processing_done(
grpc_exec_ctx_finish(&exec_ctx);
}
-static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)elem->call_data;
+static void cancel_call(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)elem->call_data;
// If the result was not already processed, invoke the callback now.
if (error != GRPC_ERROR_NONE &&
gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
@@ -160,12 +160,12 @@ static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call");
}
-static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)arg;
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
- grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch;
+static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)arg;
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
+ grpc_transport_stream_op_batch* batch = calld->recv_initial_metadata_batch;
if (error == GRPC_ERROR_NONE) {
if (chand->creds != NULL && chand->creds->processor.process != NULL) {
// We're calling out to the application, so we need to make sure
@@ -189,9 +189,9 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
}
static void auth_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *batch) {
- call_data *calld = (call_data *)elem->call_data;
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* batch) {
+ call_data* calld = (call_data*)elem->call_data;
if (batch->recv_initial_metadata) {
// Inject our callback.
calld->recv_initial_metadata_batch = batch;
@@ -204,11 +204,11 @@ static void auth_start_transport_stream_op_batch(
}
/* Constructor for call_data */
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
calld->call_combiner = args->call_combiner;
calld->owning_call = args->call_stack;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
@@ -216,7 +216,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_schedule_on_exec_ctx);
// Create server security context. Set its auth context from channel
// data and save it in the call context.
- grpc_server_security_context *server_ctx =
+ grpc_server_security_context* server_ctx =
grpc_server_security_context_create();
server_ctx->auth_context = grpc_auth_context_create(chand->auth_context);
calld->auth_context = server_ctx->auth_context;
@@ -231,31 +231,31 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
}
/* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {}
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {}
/* Constructor for channel_data */
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
- channel_data *chand = (channel_data *)elem->channel_data;
- grpc_auth_context *auth_context =
+ channel_data* chand = (channel_data*)elem->channel_data;
+ grpc_auth_context* auth_context =
grpc_find_auth_context_in_args(args->channel_args);
GPR_ASSERT(auth_context != NULL);
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
- grpc_server_credentials *creds =
+ grpc_server_credentials* creds =
grpc_find_server_credentials_in_args(args->channel_args);
chand->creds = grpc_server_credentials_ref(creds);
return GRPC_ERROR_NONE;
}
/* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
grpc_server_credentials_unref(exec_ctx, chand->creds);
}
diff --git a/src/core/lib/security/transport/tsi_error.cc b/src/core/lib/security/transport/tsi_error.cc
index 72f9600e84..f71696d35d 100644
--- a/src/core/lib/security/transport/tsi_error.cc
+++ b/src/core/lib/security/transport/tsi_error.cc
@@ -18,7 +18,7 @@
#include "src/core/lib/security/transport/tsi_error.h"
-grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result) {
+grpc_error* grpc_set_tsi_error_result(grpc_error* error, tsi_result result) {
return grpc_error_set_int(
grpc_error_set_str(
error, GRPC_ERROR_STR_TSI_ERROR,
diff --git a/src/core/lib/security/transport/tsi_error.h b/src/core/lib/security/transport/tsi_error.h
index 4e19daf796..4e8418f3fd 100644
--- a/src/core/lib/security/transport/tsi_error.h
+++ b/src/core/lib/security/transport/tsi_error.h
@@ -26,7 +26,7 @@
extern "C" {
#endif
-grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result);
+grpc_error* grpc_set_tsi_error_result(grpc_error* error, tsi_result result);
#ifdef __cplusplus
}
diff --git a/src/core/lib/security/util/json_util.cc b/src/core/lib/security/util/json_util.cc
index d847addef9..365bd1537c 100644
--- a/src/core/lib/security/util/json_util.cc
+++ b/src/core/lib/security/util/json_util.cc
@@ -23,9 +23,9 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-const char *grpc_json_get_string_property(const grpc_json *json,
- const char *prop_name) {
- grpc_json *child;
+const char* grpc_json_get_string_property(const grpc_json* json,
+ const char* prop_name) {
+ grpc_json* child;
for (child = json->child; child != NULL; child = child->next) {
if (strcmp(child->key, prop_name) == 0) break;
}
@@ -36,10 +36,10 @@ const char *grpc_json_get_string_property(const grpc_json *json,
return child->value;
}
-bool grpc_copy_json_string_property(const grpc_json *json,
- const char *prop_name,
- char **copied_value) {
- const char *prop_value = grpc_json_get_string_property(json, prop_name);
+bool grpc_copy_json_string_property(const grpc_json* json,
+ const char* prop_name,
+ char** copied_value) {
+ const char* prop_value = grpc_json_get_string_property(json, prop_name);
if (prop_value == NULL) return false;
*copied_value = gpr_strdup(prop_value);
return true;
diff --git a/src/core/lib/security/util/json_util.h b/src/core/lib/security/util/json_util.h
index cdd8a7198a..7538f76120 100644
--- a/src/core/lib/security/util/json_util.h
+++ b/src/core/lib/security/util/json_util.h
@@ -33,13 +33,13 @@ extern "C" {
#endif
// Gets a child property from a json node.
-const char *grpc_json_get_string_property(const grpc_json *json,
- const char *prop_name);
+const char* grpc_json_get_string_property(const grpc_json* json,
+ const char* prop_name);
// Copies the value of the json child property specified by prop_name.
// Returns false if the property was not found.
-bool grpc_copy_json_string_property(const grpc_json *json,
- const char *prop_name, char **copied_value);
+bool grpc_copy_json_string_property(const grpc_json* json,
+ const char* prop_name, char** copied_value);
#ifdef __cplusplus
}
diff --git a/src/core/lib/slice/b64.cc b/src/core/lib/slice/b64.cc
index 50264719a4..fe7a86ef84 100644
--- a/src/core/lib/slice/b64.cc
+++ b/src/core/lib/slice/b64.cc
@@ -54,11 +54,11 @@ static const char base64_url_safe_chars[] =
/* --- base64 functions. --- */
-char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
+char* grpc_base64_encode(const void* vdata, size_t data_size, int url_safe,
int multiline) {
size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
- char *result = (char *)gpr_malloc(result_projected_size);
+ char* result = (char*)gpr_malloc(result_projected_size);
grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
return result;
}
@@ -73,15 +73,15 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
return result_projected_size;
}
-void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
+void grpc_base64_encode_core(char* result, const void* vdata, size_t data_size,
int url_safe, int multiline) {
- const unsigned char *data = (const unsigned char *)vdata;
- const char *base64_chars =
+ const unsigned char* data = (const unsigned char*)vdata;
+ const char* base64_chars =
url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
const size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
- char *current = result;
+ char* current = result;
size_t num_blocks = 0;
size_t i = 0;
@@ -122,27 +122,27 @@ void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
result[current - result] = '\0';
}
-grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64,
+grpc_slice grpc_base64_decode(grpc_exec_ctx* exec_ctx, const char* b64,
int url_safe) {
return grpc_base64_decode_with_len(exec_ctx, b64, strlen(b64), url_safe);
}
-static void decode_one_char(const unsigned char *codes, unsigned char *result,
- size_t *result_offset) {
+static void decode_one_char(const unsigned char* codes, unsigned char* result,
+ size_t* result_offset) {
uint32_t packed = ((uint32_t)codes[0] << 2) | ((uint32_t)codes[1] >> 4);
result[(*result_offset)++] = (unsigned char)packed;
}
-static void decode_two_chars(const unsigned char *codes, unsigned char *result,
- size_t *result_offset) {
+static void decode_two_chars(const unsigned char* codes, unsigned char* result,
+ size_t* result_offset) {
uint32_t packed = ((uint32_t)codes[0] << 10) | ((uint32_t)codes[1] << 4) |
((uint32_t)codes[2] >> 2);
result[(*result_offset)++] = (unsigned char)(packed >> 8);
result[(*result_offset)++] = (unsigned char)(packed);
}
-static int decode_group(const unsigned char *codes, size_t num_codes,
- unsigned char *result, size_t *result_offset) {
+static int decode_group(const unsigned char* codes, size_t num_codes,
+ unsigned char* result, size_t* result_offset) {
GPR_ASSERT(num_codes <= 4);
/* Short end groups that may not have padding. */
@@ -185,10 +185,10 @@ static int decode_group(const unsigned char *codes, size_t num_codes,
return 1;
}
-grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64,
+grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx* exec_ctx, const char* b64,
size_t b64_len, int url_safe) {
grpc_slice result = GRPC_SLICE_MALLOC(b64_len);
- unsigned char *current = GRPC_SLICE_START_PTR(result);
+ unsigned char* current = GRPC_SLICE_START_PTR(result);
size_t result_size = 0;
unsigned char codes[4];
size_t num_codes = 0;
diff --git a/src/core/lib/slice/b64.h b/src/core/lib/slice/b64.h
index 9b4dc65dbb..467f5d848a 100644
--- a/src/core/lib/slice/b64.h
+++ b/src/core/lib/slice/b64.h
@@ -28,7 +28,7 @@ extern "C" {
/* Encodes data using base64. It is the caller's responsability to free
the returned char * using gpr_free. Returns NULL on NULL input.
TODO(makdharma) : change the flags to bool from int */
-char *grpc_base64_encode(const void *data, size_t data_size, int url_safe,
+char* grpc_base64_encode(const void* data, size_t data_size, int url_safe,
int multiline);
/* estimate the upper bound on size of base64 encoded data. The actual size
@@ -39,16 +39,16 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
/* Encodes data using base64 and write it to memory pointed to by result. It is
* the caller's responsiblity to allocate enough memory in |result| to fit the
* encoded data. */
-void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
+void grpc_base64_encode_core(char* result, const void* vdata, size_t data_size,
int url_safe, int multiline);
/* Decodes data according to the base64 specification. Returns an empty
slice in case of failure. */
-grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64,
+grpc_slice grpc_base64_decode(grpc_exec_ctx* exec_ctx, const char* b64,
int url_safe);
/* Same as above except that the length is provided by the caller. */
-grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64,
+grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx* exec_ctx, const char* b64,
size_t b64_len, int url_safe);
#ifdef __cplusplus
diff --git a/src/core/lib/slice/percent_encoding.cc b/src/core/lib/slice/percent_encoding.cc
index effc8d7ad6..894e43b191 100644
--- a/src/core/lib/slice/percent_encoding.cc
+++ b/src/core/lib/slice/percent_encoding.cc
@@ -32,19 +32,19 @@ const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
static bool is_unreserved_character(uint8_t c,
- const uint8_t *unreserved_bytes) {
+ const uint8_t* unreserved_bytes) {
return ((unreserved_bytes[c / 8] >> (c % 8)) & 1) != 0;
}
grpc_slice grpc_percent_encode_slice(grpc_slice slice,
- const uint8_t *unreserved_bytes) {
+ const uint8_t* unreserved_bytes) {
static const uint8_t hex[] = "0123456789ABCDEF";
// first pass: count the number of bytes needed to output this string
size_t output_length = 0;
- const uint8_t *slice_start = GRPC_SLICE_START_PTR(slice);
- const uint8_t *slice_end = GRPC_SLICE_END_PTR(slice);
- const uint8_t *p;
+ const uint8_t* slice_start = GRPC_SLICE_START_PTR(slice);
+ const uint8_t* slice_end = GRPC_SLICE_END_PTR(slice);
+ const uint8_t* p;
bool any_reserved_bytes = false;
for (p = slice_start; p < slice_end; p++) {
bool unres = is_unreserved_character(*p, unreserved_bytes);
@@ -57,7 +57,7 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice,
}
// second pass: actually encode
grpc_slice out = GRPC_SLICE_MALLOC(output_length);
- uint8_t *q = GRPC_SLICE_START_PTR(out);
+ uint8_t* q = GRPC_SLICE_START_PTR(out);
for (p = slice_start; p < slice_end; p++) {
if (is_unreserved_character(*p, unreserved_bytes)) {
*q++ = *p;
@@ -71,7 +71,7 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice,
return out;
}
-static bool valid_hex(const uint8_t *p, const uint8_t *end) {
+static bool valid_hex(const uint8_t* p, const uint8_t* end) {
if (p >= end) return false;
return (*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') ||
(*p >= 'A' && *p <= 'F');
@@ -85,10 +85,10 @@ static uint8_t dehex(uint8_t c) {
}
bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
- const uint8_t *unreserved_bytes,
- grpc_slice *slice_out) {
- const uint8_t *p = GRPC_SLICE_START_PTR(slice_in);
- const uint8_t *in_end = GRPC_SLICE_END_PTR(slice_in);
+ const uint8_t* unreserved_bytes,
+ grpc_slice* slice_out) {
+ const uint8_t* p = GRPC_SLICE_START_PTR(slice_in);
+ const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in);
size_t out_length = 0;
bool any_percent_encoded_stuff = false;
while (p != in_end) {
@@ -111,7 +111,7 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
}
p = GRPC_SLICE_START_PTR(slice_in);
*slice_out = GRPC_SLICE_MALLOC(out_length);
- uint8_t *q = GRPC_SLICE_START_PTR(*slice_out);
+ uint8_t* q = GRPC_SLICE_START_PTR(*slice_out);
while (p != in_end) {
if (*p == '%') {
*q++ = (uint8_t)(dehex(p[1]) << 4) | (dehex(p[2]));
@@ -125,8 +125,8 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
}
grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) {
- const uint8_t *p = GRPC_SLICE_START_PTR(slice_in);
- const uint8_t *in_end = GRPC_SLICE_END_PTR(slice_in);
+ const uint8_t* p = GRPC_SLICE_START_PTR(slice_in);
+ const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in);
size_t out_length = 0;
bool any_percent_encoded_stuff = false;
while (p != in_end) {
@@ -149,7 +149,7 @@ grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) {
}
p = GRPC_SLICE_START_PTR(slice_in);
grpc_slice out = GRPC_SLICE_MALLOC(out_length);
- uint8_t *q = GRPC_SLICE_START_PTR(out);
+ uint8_t* q = GRPC_SLICE_START_PTR(out);
while (p != in_end) {
if (*p == '%') {
if (!valid_hex(p + 1, in_end) || !valid_hex(p + 2, in_end)) {
diff --git a/src/core/lib/slice/percent_encoding.h b/src/core/lib/slice/percent_encoding.h
index 14a4deb44b..22b5e8df31 100644
--- a/src/core/lib/slice/percent_encoding.h
+++ b/src/core/lib/slice/percent_encoding.h
@@ -49,7 +49,7 @@ extern const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8];
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
grpc_slice grpc_percent_encode_slice(grpc_slice slice,
- const uint8_t *unreserved_bytes);
+ const uint8_t* unreserved_bytes);
/* Percent-decode a slice, strictly.
If the input is legal (contains no unreserved bytes, and legal % encodings),
returns true and sets *slice_out to the decoded slice.
@@ -57,8 +57,8 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice,
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
- const uint8_t *unreserved_bytes,
- grpc_slice *slice_out);
+ const uint8_t* unreserved_bytes,
+ grpc_slice* slice_out);
/* Percent-decode a slice, permissively.
If a % triplet can not be decoded, pass it through verbatim.
This cannot fail. */
diff --git a/src/core/lib/slice/slice.cc b/src/core/lib/slice/slice.cc
index 0764eda052..d8e3200136 100644
--- a/src/core/lib/slice/slice.cc
+++ b/src/core/lib/slice/slice.cc
@@ -26,8 +26,8 @@
#include "src/core/lib/iomgr/exec_ctx.h"
-char *grpc_slice_to_c_string(grpc_slice slice) {
- char *out = (char *)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
+char* grpc_slice_to_c_string(grpc_slice slice) {
+ char* out = (char*)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
out[GRPC_SLICE_LENGTH(slice)] = 0;
return out;
@@ -54,7 +54,7 @@ grpc_slice grpc_slice_ref_internal(grpc_slice slice) {
return slice;
}
-void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice) {
+void grpc_slice_unref_internal(grpc_exec_ctx* exec_ctx, grpc_slice slice) {
if (slice.refcount) {
slice.refcount->vtable->unref(exec_ctx, slice.refcount);
}
@@ -74,8 +74,8 @@ void grpc_slice_unref(grpc_slice slice) {
/* grpc_slice_from_static_string support structure - a refcount that does
nothing */
-static void noop_ref(void *unused) {}
-static void noop_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
+static void noop_ref(void* unused) {}
+static void noop_unref(grpc_exec_ctx* exec_ctx, void* unused) {}
static const grpc_slice_refcount_vtable noop_refcount_vtable = {
noop_ref, noop_unref, grpc_slice_default_eq_impl,
@@ -83,15 +83,15 @@ static const grpc_slice_refcount_vtable noop_refcount_vtable = {
static grpc_slice_refcount noop_refcount = {&noop_refcount_vtable,
&noop_refcount};
-grpc_slice grpc_slice_from_static_buffer(const void *s, size_t len) {
+grpc_slice grpc_slice_from_static_buffer(const void* s, size_t len) {
grpc_slice slice;
slice.refcount = &noop_refcount;
- slice.data.refcounted.bytes = (uint8_t *)s;
+ slice.data.refcounted.bytes = (uint8_t*)s;
slice.data.refcounted.length = len;
return slice;
}
-grpc_slice grpc_slice_from_static_string(const char *s) {
+grpc_slice grpc_slice_from_static_string(const char* s) {
return grpc_slice_from_static_buffer(s, strlen(s));
}
@@ -100,17 +100,17 @@ grpc_slice grpc_slice_from_static_string(const char *s) {
typedef struct new_slice_refcount {
grpc_slice_refcount rc;
gpr_refcount refs;
- void (*user_destroy)(void *);
- void *user_data;
+ void (*user_destroy)(void*);
+ void* user_data;
} new_slice_refcount;
-static void new_slice_ref(void *p) {
- new_slice_refcount *r = (new_slice_refcount *)p;
+static void new_slice_ref(void* p) {
+ new_slice_refcount* r = (new_slice_refcount*)p;
gpr_ref(&r->refs);
}
-static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_slice_refcount *r = (new_slice_refcount *)p;
+static void new_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
+ new_slice_refcount* r = (new_slice_refcount*)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data);
gpr_free(r);
@@ -121,12 +121,12 @@ static const grpc_slice_refcount_vtable new_slice_vtable = {
new_slice_ref, new_slice_unref, grpc_slice_default_eq_impl,
grpc_slice_default_hash_impl};
-grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
- void (*destroy)(void *),
- void *user_data) {
+grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
+ void (*destroy)(void*),
+ void* user_data) {
grpc_slice slice;
- new_slice_refcount *rc =
- (new_slice_refcount *)gpr_malloc(sizeof(new_slice_refcount));
+ new_slice_refcount* rc =
+ (new_slice_refcount*)gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_slice_vtable;
rc->rc.sub_refcount = &rc->rc;
@@ -134,12 +134,12 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
rc->user_data = user_data;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = (uint8_t *)p;
+ slice.data.refcounted.bytes = (uint8_t*)p;
slice.data.refcounted.length = len;
return slice;
}
-grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) {
+grpc_slice grpc_slice_new(void* p, size_t len, void (*destroy)(void*)) {
/* Pass "p" to *destroy when the slice is no longer needed. */
return grpc_slice_new_with_user_data(p, len, destroy, p);
}
@@ -149,18 +149,18 @@ grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) {
typedef struct new_with_len_slice_refcount {
grpc_slice_refcount rc;
gpr_refcount refs;
- void *user_data;
+ void* user_data;
size_t user_length;
- void (*user_destroy)(void *, size_t);
+ void (*user_destroy)(void*, size_t);
} new_with_len_slice_refcount;
-static void new_with_len_ref(void *p) {
- new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
+static void new_with_len_ref(void* p) {
+ new_with_len_slice_refcount* r = (new_with_len_slice_refcount*)p;
gpr_ref(&r->refs);
}
-static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) {
- new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
+static void new_with_len_unref(grpc_exec_ctx* exec_ctx, void* p) {
+ new_with_len_slice_refcount* r = (new_with_len_slice_refcount*)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data, r->user_length);
gpr_free(r);
@@ -171,10 +171,10 @@ static const grpc_slice_refcount_vtable new_with_len_vtable = {
new_with_len_ref, new_with_len_unref, grpc_slice_default_eq_impl,
grpc_slice_default_hash_impl};
-grpc_slice grpc_slice_new_with_len(void *p, size_t len,
- void (*destroy)(void *, size_t)) {
+grpc_slice grpc_slice_new_with_len(void* p, size_t len,
+ void (*destroy)(void*, size_t)) {
grpc_slice slice;
- new_with_len_slice_refcount *rc = (new_with_len_slice_refcount *)gpr_malloc(
+ new_with_len_slice_refcount* rc = (new_with_len_slice_refcount*)gpr_malloc(
sizeof(new_with_len_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_with_len_vtable;
@@ -184,19 +184,19 @@ grpc_slice grpc_slice_new_with_len(void *p, size_t len,
rc->user_length = len;
slice.refcount = &rc->rc;
- slice.data.refcounted.bytes = (uint8_t *)p;
+ slice.data.refcounted.bytes = (uint8_t*)p;
slice.data.refcounted.length = len;
return slice;
}
-grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t length) {
+grpc_slice grpc_slice_from_copied_buffer(const char* source, size_t length) {
if (length == 0) return grpc_empty_slice();
grpc_slice slice = GRPC_SLICE_MALLOC(length);
memcpy(GRPC_SLICE_START_PTR(slice), source, length);
return slice;
}
-grpc_slice grpc_slice_from_copied_string(const char *source) {
+grpc_slice grpc_slice_from_copied_string(const char* source) {
return grpc_slice_from_copied_buffer(source, strlen(source));
}
@@ -205,13 +205,13 @@ typedef struct {
gpr_refcount refs;
} malloc_refcount;
-static void malloc_ref(void *p) {
- malloc_refcount *r = (malloc_refcount *)p;
+static void malloc_ref(void* p) {
+ malloc_refcount* r = (malloc_refcount*)p;
gpr_ref(&r->refs);
}
-static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) {
- malloc_refcount *r = (malloc_refcount *)p;
+static void malloc_unref(grpc_exec_ctx* exec_ctx, void* p) {
+ malloc_refcount* r = (malloc_refcount*)p;
if (gpr_unref(&r->refs)) {
gpr_free(r);
}
@@ -233,8 +233,8 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
refcount is a malloc_refcount
bytes is an array of bytes of the requested length
Both parts are placed in the same allocation returned from gpr_malloc */
- malloc_refcount *rc =
- (malloc_refcount *)gpr_malloc(sizeof(malloc_refcount) + length);
+ malloc_refcount* rc =
+ (malloc_refcount*)gpr_malloc(sizeof(malloc_refcount) + length);
/* Initial refcount on rc is 1 - and it's up to the caller to release
this reference. */
@@ -247,7 +247,7 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
/* The slices refcount points back to the allocated block. */
slice.refcount = &rc->base;
/* The data bytes are placed immediately after the refcount struct */
- slice.data.refcounted.bytes = (uint8_t *)(rc + 1);
+ slice.data.refcounted.bytes = (uint8_t*)(rc + 1);
/* And the length of the block is set to the requested length */
slice.data.refcounted.length = length;
return slice;
@@ -307,7 +307,7 @@ grpc_slice grpc_slice_sub(grpc_slice source, size_t begin, size_t end) {
return subset;
}
-grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice *source, size_t split,
+grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice* source, size_t split,
grpc_slice_ref_whom ref_whom) {
grpc_slice tail;
@@ -358,11 +358,11 @@ grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice *source, size_t split,
return tail;
}
-grpc_slice grpc_slice_split_tail(grpc_slice *source, size_t split) {
+grpc_slice grpc_slice_split_tail(grpc_slice* source, size_t split) {
return grpc_slice_split_tail_maybe_ref(source, split, GRPC_SLICE_REF_BOTH);
}
-grpc_slice grpc_slice_split_head(grpc_slice *source, size_t split) {
+grpc_slice grpc_slice_split_head(grpc_slice* source, size_t split) {
grpc_slice head;
if (source->refcount == NULL) {
@@ -423,7 +423,7 @@ int grpc_slice_cmp(grpc_slice a, grpc_slice b) {
GRPC_SLICE_LENGTH(a));
}
-int grpc_slice_str_cmp(grpc_slice a, const char *b) {
+int grpc_slice_str_cmp(grpc_slice a, const char* b) {
size_t b_length = strlen(b);
int d = (int)(GRPC_SLICE_LENGTH(a) - b_length);
if (d != 0) return d;
@@ -438,13 +438,13 @@ int grpc_slice_is_equivalent(grpc_slice a, grpc_slice b) {
a.data.refcounted.bytes == b.data.refcounted.bytes;
}
-int grpc_slice_buf_start_eq(grpc_slice a, const void *b, size_t len) {
+int grpc_slice_buf_start_eq(grpc_slice a, const void* b, size_t len) {
if (GRPC_SLICE_LENGTH(a) < len) return 0;
return 0 == memcmp(GRPC_SLICE_START_PTR(a), b, len);
}
int grpc_slice_rchr(grpc_slice s, char c) {
- const char *b = (const char *)GRPC_SLICE_START_PTR(s);
+ const char* b = (const char*)GRPC_SLICE_START_PTR(s);
int i;
for (i = (int)GRPC_SLICE_LENGTH(s) - 1; i != -1 && b[i] != c; i--)
;
@@ -452,16 +452,16 @@ int grpc_slice_rchr(grpc_slice s, char c) {
}
int grpc_slice_chr(grpc_slice s, char c) {
- const char *b = (const char *)GRPC_SLICE_START_PTR(s);
- const char *p = (const char *)memchr(b, c, GRPC_SLICE_LENGTH(s));
+ const char* b = (const char*)GRPC_SLICE_START_PTR(s);
+ const char* p = (const char*)memchr(b, c, GRPC_SLICE_LENGTH(s));
return p == NULL ? -1 : (int)(p - b);
}
int grpc_slice_slice(grpc_slice haystack, grpc_slice needle) {
size_t haystack_len = GRPC_SLICE_LENGTH(haystack);
- const uint8_t *haystack_bytes = GRPC_SLICE_START_PTR(haystack);
+ const uint8_t* haystack_bytes = GRPC_SLICE_START_PTR(haystack);
size_t needle_len = GRPC_SLICE_LENGTH(needle);
- const uint8_t *needle_bytes = GRPC_SLICE_START_PTR(needle);
+ const uint8_t* needle_bytes = GRPC_SLICE_START_PTR(needle);
if (haystack_len == 0 || needle_len == 0) return -1;
if (haystack_len < needle_len) return -1;
@@ -469,8 +469,8 @@ int grpc_slice_slice(grpc_slice haystack, grpc_slice needle) {
return grpc_slice_eq(haystack, needle) ? 0 : -1;
if (needle_len == 1) return grpc_slice_chr(haystack, (char)*needle_bytes);
- const uint8_t *last = haystack_bytes + haystack_len - needle_len;
- for (const uint8_t *cur = haystack_bytes; cur != last; ++cur) {
+ const uint8_t* last = haystack_bytes + haystack_len - needle_len;
+ for (const uint8_t* cur = haystack_bytes; cur != last; ++cur) {
if (0 == memcmp(cur, needle_bytes, needle_len)) {
return (int)(cur - haystack_bytes);
}
diff --git a/src/core/lib/slice/slice_buffer.cc b/src/core/lib/slice/slice_buffer.cc
index 63ffc0b00d..3b71fdd4ea 100644
--- a/src/core/lib/slice/slice_buffer.cc
+++ b/src/core/lib/slice/slice_buffer.cc
@@ -30,7 +30,7 @@
/* grow a buffer; requires GRPC_SLICE_BUFFER_INLINE_ELEMENTS > 1 */
#define GROW(x) (3 * (x) / 2)
-static void maybe_embiggen(grpc_slice_buffer *sb) {
+static void maybe_embiggen(grpc_slice_buffer* sb) {
/* How far away from sb->base_slices is sb->slices pointer */
size_t slice_offset = (size_t)(sb->slices - sb->base_slices);
size_t slice_count = sb->count + slice_offset;
@@ -46,10 +46,10 @@ static void maybe_embiggen(grpc_slice_buffer *sb) {
GPR_ASSERT(sb->capacity > slice_count);
if (sb->base_slices == sb->inlined) {
sb->base_slices =
- (grpc_slice *)gpr_malloc(sb->capacity * sizeof(grpc_slice));
+ (grpc_slice*)gpr_malloc(sb->capacity * sizeof(grpc_slice));
memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
} else {
- sb->base_slices = (grpc_slice *)gpr_realloc(
+ sb->base_slices = (grpc_slice*)gpr_realloc(
sb->base_slices, sb->capacity * sizeof(grpc_slice));
}
@@ -58,30 +58,30 @@ static void maybe_embiggen(grpc_slice_buffer *sb) {
}
}
-void grpc_slice_buffer_init(grpc_slice_buffer *sb) {
+void grpc_slice_buffer_init(grpc_slice_buffer* sb) {
sb->count = 0;
sb->length = 0;
sb->capacity = GRPC_SLICE_BUFFER_INLINE_ELEMENTS;
sb->base_slices = sb->slices = sb->inlined;
}
-void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx,
- grpc_slice_buffer *sb) {
+void grpc_slice_buffer_destroy_internal(grpc_exec_ctx* exec_ctx,
+ grpc_slice_buffer* sb) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, sb);
if (sb->base_slices != sb->inlined) {
gpr_free(sb->base_slices);
}
}
-void grpc_slice_buffer_destroy(grpc_slice_buffer *sb) {
+void grpc_slice_buffer_destroy(grpc_slice_buffer* sb) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_slice_buffer_destroy_internal(&exec_ctx, sb);
grpc_exec_ctx_finish(&exec_ctx);
}
-uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t n) {
- grpc_slice *back;
- uint8_t *out;
+uint8_t* grpc_slice_buffer_tiny_add(grpc_slice_buffer* sb, size_t n) {
+ grpc_slice* back;
+ uint8_t* out;
sb->length += n;
@@ -103,7 +103,7 @@ add_new:
return back->data.inlined.bytes;
}
-size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb, grpc_slice s) {
+size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer* sb, grpc_slice s) {
size_t out = sb->count;
maybe_embiggen(sb);
sb->slices[out] = s;
@@ -112,7 +112,7 @@ size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb, grpc_slice s) {
return out;
}
-void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) {
+void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice s) {
size_t n = sb->count;
/* if both the last slice in the slice buffer and the slice being added
are inlined (that is, that they carry their data inside the slice data
@@ -120,7 +120,7 @@ void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) {
into the back slice, preventing many small slices being passed into
writes */
if (!s.refcount && n) {
- grpc_slice *back = &sb->slices[n - 1];
+ grpc_slice* back = &sb->slices[n - 1];
if (!back->refcount &&
back->data.inlined.length < GRPC_SLICE_INLINED_SIZE) {
if (s.data.inlined.length + back->data.inlined.length <=
@@ -149,22 +149,22 @@ void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) {
grpc_slice_buffer_add_indexed(sb, s);
}
-void grpc_slice_buffer_addn(grpc_slice_buffer *sb, grpc_slice *s, size_t n) {
+void grpc_slice_buffer_addn(grpc_slice_buffer* sb, grpc_slice* s, size_t n) {
size_t i;
for (i = 0; i < n; i++) {
grpc_slice_buffer_add(sb, s[i]);
}
}
-void grpc_slice_buffer_pop(grpc_slice_buffer *sb) {
+void grpc_slice_buffer_pop(grpc_slice_buffer* sb) {
if (sb->count != 0) {
size_t count = --sb->count;
sb->length -= GRPC_SLICE_LENGTH(sb->slices[count]);
}
}
-void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx,
- grpc_slice_buffer *sb) {
+void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx* exec_ctx,
+ grpc_slice_buffer* sb) {
size_t i;
for (i = 0; i < sb->count; i++) {
grpc_slice_unref_internal(exec_ctx, sb->slices[i]);
@@ -174,13 +174,13 @@ void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx,
sb->length = 0;
}
-void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer *sb) {
+void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer* sb) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, sb);
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) {
+void grpc_slice_buffer_swap(grpc_slice_buffer* a, grpc_slice_buffer* b) {
size_t a_offset = (size_t)(a->slices - a->base_slices);
size_t b_offset = (size_t)(b->slices - b->base_slices);
@@ -207,7 +207,7 @@ void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) {
memcpy(a->base_slices, b->inlined, b_count * sizeof(grpc_slice));
} else {
/* no inlining: easy swap */
- GPR_SWAP(grpc_slice *, a->base_slices, b->base_slices);
+ GPR_SWAP(grpc_slice*, a->base_slices, b->base_slices);
}
/* Update the slices pointers (cannot do a GPR_SWAP on slices fields here).
@@ -222,8 +222,8 @@ void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) {
GPR_SWAP(size_t, a->length, b->length);
}
-void grpc_slice_buffer_move_into(grpc_slice_buffer *src,
- grpc_slice_buffer *dst) {
+void grpc_slice_buffer_move_into(grpc_slice_buffer* src,
+ grpc_slice_buffer* dst) {
/* anything to move? */
if (src->count == 0) {
return;
@@ -239,8 +239,8 @@ void grpc_slice_buffer_move_into(grpc_slice_buffer *src,
src->length = 0;
}
-static void slice_buffer_move_first_maybe_ref(grpc_slice_buffer *src, size_t n,
- grpc_slice_buffer *dst,
+static void slice_buffer_move_first_maybe_ref(grpc_slice_buffer* src, size_t n,
+ grpc_slice_buffer* dst,
bool incref) {
GPR_ASSERT(src->length >= n);
if (src->length == n) {
@@ -279,20 +279,20 @@ static void slice_buffer_move_first_maybe_ref(grpc_slice_buffer *src, size_t n,
GPR_ASSERT(src->count > 0);
}
-void grpc_slice_buffer_move_first(grpc_slice_buffer *src, size_t n,
- grpc_slice_buffer *dst) {
+void grpc_slice_buffer_move_first(grpc_slice_buffer* src, size_t n,
+ grpc_slice_buffer* dst) {
slice_buffer_move_first_maybe_ref(src, n, dst, true);
}
-void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n,
- grpc_slice_buffer *dst) {
+void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer* src, size_t n,
+ grpc_slice_buffer* dst) {
slice_buffer_move_first_maybe_ref(src, n, dst, false);
}
-void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
- grpc_slice_buffer *src, size_t n,
- void *dst) {
- char *dstp = (char *)dst;
+void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx* exec_ctx,
+ grpc_slice_buffer* src, size_t n,
+ void* dst) {
+ char* dstp = (char*)dst;
GPR_ASSERT(src->length >= n);
while (n > 0) {
@@ -316,8 +316,8 @@ void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
}
}
-void grpc_slice_buffer_trim_end(grpc_slice_buffer *sb, size_t n,
- grpc_slice_buffer *garbage) {
+void grpc_slice_buffer_trim_end(grpc_slice_buffer* sb, size_t n,
+ grpc_slice_buffer* garbage) {
GPR_ASSERT(n <= sb->length);
sb->length -= n;
for (;;) {
@@ -340,7 +340,7 @@ void grpc_slice_buffer_trim_end(grpc_slice_buffer *sb, size_t n,
}
}
-grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *sb) {
+grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer* sb) {
grpc_slice slice;
GPR_ASSERT(sb->count > 0);
slice = sb->slices[0];
@@ -351,7 +351,7 @@ grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *sb) {
return slice;
}
-void grpc_slice_buffer_undo_take_first(grpc_slice_buffer *sb,
+void grpc_slice_buffer_undo_take_first(grpc_slice_buffer* sb,
grpc_slice slice) {
sb->slices--;
sb->slices[0] = slice;
diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h
index 41250df738..f86f25ea7c 100644
--- a/src/core/lib/slice/slice_hash_table.h
+++ b/src/core/lib/slice/slice_hash_table.h
@@ -39,7 +39,7 @@ typedef struct grpc_slice_hash_table grpc_slice_hash_table;
typedef struct grpc_slice_hash_table_entry {
grpc_slice key;
- void *value; /* Must not be NULL. */
+ void* value; /* Must not be NULL. */
} grpc_slice_hash_table_entry;
/** Creates a new hash table of containing \a entries, which is an array
@@ -48,18 +48,18 @@ typedef struct grpc_slice_hash_table_entry {
value_cmp will be used to compare values in the context of \a
grpc_slice_hash_table_cmp. If NULL, raw pointer (\a GPR_ICMP) comparison
will be used. */
-grpc_slice_hash_table *grpc_slice_hash_table_create(
- size_t num_entries, grpc_slice_hash_table_entry *entries,
- void (*destroy_value)(grpc_exec_ctx *exec_ctx, void *value),
- int (*value_cmp)(void *a, void *b));
+grpc_slice_hash_table* grpc_slice_hash_table_create(
+ size_t num_entries, grpc_slice_hash_table_entry* entries,
+ void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
+ int (*value_cmp)(void* a, void* b));
-grpc_slice_hash_table *grpc_slice_hash_table_ref(grpc_slice_hash_table *table);
-void grpc_slice_hash_table_unref(grpc_exec_ctx *exec_ctx,
- grpc_slice_hash_table *table);
+grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table);
+void grpc_slice_hash_table_unref(grpc_exec_ctx* exec_ctx,
+ grpc_slice_hash_table* table);
/** Returns the value from \a table associated with \a key.
Returns NULL if \a key is not found. */
-void *grpc_slice_hash_table_get(const grpc_slice_hash_table *table,
+void* grpc_slice_hash_table_get(const grpc_slice_hash_table* table,
const grpc_slice key);
/** Compares \a a vs. \a b.
@@ -68,8 +68,8 @@ void *grpc_slice_hash_table_get(const grpc_slice_hash_table *table,
* - else, it contains fewer (resp. more) entries,
* - else, if strcmp(a_key, b_key) < 1 (resp. > 1),
* - else, if value_cmp(a_value, b_value) < 1 (resp. > 1). */
-int grpc_slice_hash_table_cmp(const grpc_slice_hash_table *a,
- const grpc_slice_hash_table *b);
+int grpc_slice_hash_table_cmp(const grpc_slice_hash_table* a,
+ const grpc_slice_hash_table* b);
#ifdef __cplusplus
}
diff --git a/src/core/lib/slice/slice_intern.cc b/src/core/lib/slice/slice_intern.cc
index 1ea9a2aa67..50a0eba49c 100644
--- a/src/core/lib/slice/slice_intern.cc
+++ b/src/core/lib/slice/slice_intern.cc
@@ -43,12 +43,12 @@ typedef struct interned_slice_refcount {
size_t length;
gpr_atm refcnt;
uint32_t hash;
- struct interned_slice_refcount *bucket_next;
+ struct interned_slice_refcount* bucket_next;
} interned_slice_refcount;
typedef struct slice_shard {
gpr_mu mu;
- interned_slice_refcount **strs;
+ interned_slice_refcount** strs;
size_t count;
size_t capacity;
} slice_shard;
@@ -69,17 +69,17 @@ static static_metadata_hash_ent
static uint32_t max_static_metadata_hash_probe;
static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT];
-static void interned_slice_ref(void *p) {
- interned_slice_refcount *s = (interned_slice_refcount *)p;
+static void interned_slice_ref(void* p) {
+ interned_slice_refcount* s = (interned_slice_refcount*)p;
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0);
}
-static void interned_slice_destroy(interned_slice_refcount *s) {
- slice_shard *shard = &g_shards[SHARD_IDX(s->hash)];
+static void interned_slice_destroy(interned_slice_refcount* s) {
+ slice_shard* shard = &g_shards[SHARD_IDX(s->hash)];
gpr_mu_lock(&shard->mu);
GPR_ASSERT(0 == gpr_atm_no_barrier_load(&s->refcnt));
- interned_slice_refcount **prev_next;
- interned_slice_refcount *cur;
+ interned_slice_refcount** prev_next;
+ interned_slice_refcount* cur;
for (prev_next = &shard->strs[TABLE_IDX(s->hash, shard->capacity)],
cur = *prev_next;
cur != s; prev_next = &cur->bucket_next, cur = cur->bucket_next)
@@ -90,24 +90,24 @@ static void interned_slice_destroy(interned_slice_refcount *s) {
gpr_mu_unlock(&shard->mu);
}
-static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
- interned_slice_refcount *s = (interned_slice_refcount *)p;
+static void interned_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
+ interned_slice_refcount* s = (interned_slice_refcount*)p;
if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
interned_slice_destroy(s);
}
}
-static void interned_slice_sub_ref(void *p) {
- interned_slice_ref(((char *)p) - offsetof(interned_slice_refcount, sub));
+static void interned_slice_sub_ref(void* p) {
+ interned_slice_ref(((char*)p) - offsetof(interned_slice_refcount, sub));
}
-static void interned_slice_sub_unref(grpc_exec_ctx *exec_ctx, void *p) {
+static void interned_slice_sub_unref(grpc_exec_ctx* exec_ctx, void* p) {
interned_slice_unref(exec_ctx,
- ((char *)p) - offsetof(interned_slice_refcount, sub));
+ ((char*)p) - offsetof(interned_slice_refcount, sub));
}
static uint32_t interned_slice_hash(grpc_slice slice) {
- interned_slice_refcount *s = (interned_slice_refcount *)slice.refcount;
+ interned_slice_refcount* s = (interned_slice_refcount*)slice.refcount;
return s->hash;
}
@@ -122,16 +122,16 @@ static const grpc_slice_refcount_vtable interned_slice_sub_vtable = {
interned_slice_sub_ref, interned_slice_sub_unref,
grpc_slice_default_eq_impl, grpc_slice_default_hash_impl};
-static void grow_shard(slice_shard *shard) {
+static void grow_shard(slice_shard* shard) {
size_t capacity = shard->capacity * 2;
size_t i;
- interned_slice_refcount **strtab;
+ interned_slice_refcount** strtab;
interned_slice_refcount *s, *next;
GPR_TIMER_BEGIN("grow_strtab", 0);
- strtab = (interned_slice_refcount **)gpr_zalloc(
- sizeof(interned_slice_refcount *) * capacity);
+ strtab = (interned_slice_refcount**)gpr_zalloc(
+ sizeof(interned_slice_refcount*) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (s = shard->strs[i]; s; s = next) {
@@ -149,10 +149,10 @@ static void grow_shard(slice_shard *shard) {
GPR_TIMER_END("grow_strtab", 0);
}
-static grpc_slice materialize(interned_slice_refcount *s) {
+static grpc_slice materialize(interned_slice_refcount* s) {
grpc_slice slice;
slice.refcount = &s->base;
- slice.data.refcounted.bytes = (uint8_t *)(s + 1);
+ slice.data.refcounted.bytes = (uint8_t*)(s + 1);
slice.data.refcounted.length = s->length;
return slice;
}
@@ -176,7 +176,7 @@ uint32_t grpc_slice_hash(grpc_slice s) {
}
grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
- bool *returned_slice_is_different) {
+ bool* returned_slice_is_different) {
if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
return slice;
}
@@ -218,8 +218,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
}
}
- interned_slice_refcount *s;
- slice_shard *shard = &g_shards[SHARD_IDX(hash)];
+ interned_slice_refcount* s;
+ slice_shard* shard = &g_shards[SHARD_IDX(hash)];
gpr_mu_lock(&shard->mu);
@@ -244,8 +244,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
/* not found: create a new string */
/* string data goes after the internal_string header */
- s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
- GRPC_SLICE_LENGTH(slice));
+ s = (interned_slice_refcount*)gpr_malloc(sizeof(*s) +
+ GRPC_SLICE_LENGTH(slice));
gpr_atm_rel_store(&s->refcnt, 1);
s->length = GRPC_SLICE_LENGTH(slice);
s->hash = hash;
@@ -279,12 +279,12 @@ void grpc_slice_intern_init(void) {
g_hash_seed = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
for (size_t i = 0; i < SHARD_COUNT; i++) {
- slice_shard *shard = &g_shards[i];
+ slice_shard* shard = &g_shards[i];
gpr_mu_init(&shard->mu);
shard->count = 0;
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->strs = (interned_slice_refcount **)gpr_zalloc(sizeof(*shard->strs) *
- shard->capacity);
+ shard->strs = (interned_slice_refcount**)gpr_zalloc(sizeof(*shard->strs) *
+ shard->capacity);
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
static_metadata_hash[i].hash = 0;
@@ -311,16 +311,16 @@ void grpc_slice_intern_init(void) {
void grpc_slice_intern_shutdown(void) {
for (size_t i = 0; i < SHARD_COUNT; i++) {
- slice_shard *shard = &g_shards[i];
+ slice_shard* shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
/* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
if (shard->count != 0) {
gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata strings were leaked",
shard->count);
for (size_t j = 0; j < shard->capacity; j++) {
- for (interned_slice_refcount *s = shard->strs[j]; s;
+ for (interned_slice_refcount* s = shard->strs[j]; s;
s = s->bucket_next) {
- char *text =
+ char* text =
grpc_dump_slice(materialize(s), GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "LEAKED: %s", text);
gpr_free(text);
diff --git a/src/core/lib/slice/slice_internal.h b/src/core/lib/slice/slice_internal.h
index fcf70a0e55..2439fc0826 100644
--- a/src/core/lib/slice/slice_internal.h
+++ b/src/core/lib/slice/slice_internal.h
@@ -29,11 +29,11 @@ extern "C" {
#endif
grpc_slice grpc_slice_ref_internal(grpc_slice slice);
-void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice);
-void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx,
- grpc_slice_buffer *sb);
-void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx,
- grpc_slice_buffer *sb);
+void grpc_slice_unref_internal(grpc_exec_ctx* exec_ctx, grpc_slice slice);
+void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx* exec_ctx,
+ grpc_slice_buffer* sb);
+void grpc_slice_buffer_destroy_internal(grpc_exec_ctx* exec_ctx,
+ grpc_slice_buffer* sb);
/* Check if a slice is interned */
bool grpc_slice_is_interned(grpc_slice slice);
@@ -46,7 +46,7 @@ void grpc_test_only_set_slice_hash_seed(uint32_t key);
// used for surface boundaries where we might receive an un-interned static
// string
grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
- bool *returned_slice_is_different);
+ bool* returned_slice_is_different);
uint32_t grpc_static_slice_hash(grpc_slice s);
int grpc_static_slice_eq(grpc_slice a, grpc_slice b);
diff --git a/src/core/lib/slice/slice_string_helpers.cc b/src/core/lib/slice/slice_string_helpers.cc
index d461c474d2..5385be9866 100644
--- a/src/core/lib/slice/slice_string_helpers.cc
+++ b/src/core/lib/slice/slice_string_helpers.cc
@@ -25,8 +25,8 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
-char *grpc_dump_slice(grpc_slice s, uint32_t flags) {
- return gpr_dump((const char *)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s),
+char* grpc_dump_slice(grpc_slice s, uint32_t flags) {
+ return gpr_dump((const char*)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s),
flags);
}
@@ -35,11 +35,11 @@ char *grpc_dump_slice(grpc_slice s, uint32_t flags) {
* str.
*
* Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */
-static int slice_find_separator_offset(const grpc_slice str, const char *sep,
- const size_t read_offset, size_t *begin,
- size_t *end) {
+static int slice_find_separator_offset(const grpc_slice str, const char* sep,
+ const size_t read_offset, size_t* begin,
+ size_t* end) {
size_t i;
- const uint8_t *str_ptr = GRPC_SLICE_START_PTR(str) + read_offset;
+ const uint8_t* str_ptr = GRPC_SLICE_START_PTR(str) + read_offset;
const size_t str_len = GRPC_SLICE_LENGTH(str) - read_offset;
const size_t sep_len = strlen(sep);
if (str_len < sep_len) {
@@ -56,7 +56,7 @@ static int slice_find_separator_offset(const grpc_slice str, const char *sep,
return 0;
}
-void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst) {
+void grpc_slice_split(grpc_slice str, const char* sep, grpc_slice_buffer* dst) {
const size_t sep_len = strlen(sep);
size_t begin, end;
@@ -74,7 +74,7 @@ void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst) {
}
}
-bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t *result) {
- return gpr_parse_bytes_to_uint32((const char *)GRPC_SLICE_START_PTR(str),
+bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t* result) {
+ return gpr_parse_bytes_to_uint32((const char*)GRPC_SLICE_START_PTR(str),
GRPC_SLICE_LENGTH(str), result) != 0;
}
diff --git a/src/core/lib/slice/slice_string_helpers.h b/src/core/lib/slice/slice_string_helpers.h
index bcfb33bfb3..acbc41e711 100644
--- a/src/core/lib/slice/slice_string_helpers.h
+++ b/src/core/lib/slice/slice_string_helpers.h
@@ -33,13 +33,13 @@ extern "C" {
#endif
/* Calls gpr_dump on a slice. */
-char *grpc_dump_slice(grpc_slice slice, uint32_t flags);
+char* grpc_dump_slice(grpc_slice slice, uint32_t flags);
/** Split \a str by the separator \a sep. Results are stored in \a dst, which
* should be a properly initialized instance. */
-void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst);
+void grpc_slice_split(grpc_slice str, const char* sep, grpc_slice_buffer* dst);
-bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t *result);
+bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t* result);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/alloc.cc b/src/core/lib/support/alloc.cc
index 886d69d64c..aef4cb8d51 100644
--- a/src/core/lib/support/alloc.cc
+++ b/src/core/lib/support/alloc.cc
@@ -24,10 +24,10 @@
#include <string.h>
#include "src/core/lib/profiling/timers.h"
-static void *zalloc_with_calloc(size_t sz) { return calloc(sz, 1); }
+static void* zalloc_with_calloc(size_t sz) { return calloc(sz, 1); }
-static void *zalloc_with_gpr_malloc(size_t sz) {
- void *p = gpr_malloc(sz);
+static void* zalloc_with_gpr_malloc(size_t sz) {
+ void* p = gpr_malloc(sz);
memset(p, 0, sz);
return p;
}
@@ -49,8 +49,8 @@ void gpr_set_allocation_functions(gpr_allocation_functions functions) {
g_alloc_functions = functions;
}
-void *gpr_malloc(size_t size) {
- void *p;
+void* gpr_malloc(size_t size) {
+ void* p;
if (size == 0) return NULL;
GPR_TIMER_BEGIN("gpr_malloc", 0);
p = g_alloc_functions.malloc_fn(size);
@@ -61,8 +61,8 @@ void *gpr_malloc(size_t size) {
return p;
}
-void *gpr_zalloc(size_t size) {
- void *p;
+void* gpr_zalloc(size_t size) {
+ void* p;
if (size == 0) return NULL;
GPR_TIMER_BEGIN("gpr_zalloc", 0);
p = g_alloc_functions.zalloc_fn(size);
@@ -73,13 +73,13 @@ void *gpr_zalloc(size_t size) {
return p;
}
-void gpr_free(void *p) {
+void gpr_free(void* p) {
GPR_TIMER_BEGIN("gpr_free", 0);
g_alloc_functions.free_fn(p);
GPR_TIMER_END("gpr_free", 0);
}
-void *gpr_realloc(void *p, size_t size) {
+void* gpr_realloc(void* p, size_t size) {
if ((size == 0) && (p == NULL)) return NULL;
GPR_TIMER_BEGIN("gpr_realloc", 0);
p = g_alloc_functions.realloc_fn(p, size);
@@ -90,13 +90,13 @@ void *gpr_realloc(void *p, size_t size) {
return p;
}
-void *gpr_malloc_aligned(size_t size, size_t alignment_log) {
+void* gpr_malloc_aligned(size_t size, size_t alignment_log) {
size_t alignment = ((size_t)1) << alignment_log;
- size_t extra = alignment - 1 + sizeof(void *);
- void *p = gpr_malloc(size + extra);
- void **ret = (void **)(((uintptr_t)p + extra) & ~(alignment - 1));
+ size_t extra = alignment - 1 + sizeof(void*);
+ void* p = gpr_malloc(size + extra);
+ void** ret = (void**)(((uintptr_t)p + extra) & ~(alignment - 1));
ret[-1] = p;
- return (void *)ret;
+ return (void*)ret;
}
-void gpr_free_aligned(void *ptr) { gpr_free(((void **)ptr)[-1]); }
+void gpr_free_aligned(void* ptr) { gpr_free(((void**)ptr)[-1]); }
diff --git a/src/core/lib/support/arena.cc b/src/core/lib/support/arena.cc
index 9e0f73ae3d..0b37a88230 100644
--- a/src/core/lib/support/arena.cc
+++ b/src/core/lib/support/arena.cc
@@ -36,40 +36,40 @@ struct gpr_arena {
zone initial_zone;
};
-gpr_arena *gpr_arena_create(size_t initial_size) {
+gpr_arena* gpr_arena_create(size_t initial_size) {
initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
- gpr_arena *a = (gpr_arena *)gpr_zalloc(sizeof(gpr_arena) + initial_size);
+ gpr_arena* a = (gpr_arena*)gpr_zalloc(sizeof(gpr_arena) + initial_size);
a->initial_zone.size_end = initial_size;
return a;
}
-size_t gpr_arena_destroy(gpr_arena *arena) {
+size_t gpr_arena_destroy(gpr_arena* arena) {
gpr_atm size = gpr_atm_no_barrier_load(&arena->size_so_far);
- zone *z = (zone *)gpr_atm_no_barrier_load(&arena->initial_zone.next_atm);
+ zone* z = (zone*)gpr_atm_no_barrier_load(&arena->initial_zone.next_atm);
gpr_free(arena);
while (z) {
- zone *next_z = (zone *)gpr_atm_no_barrier_load(&z->next_atm);
+ zone* next_z = (zone*)gpr_atm_no_barrier_load(&z->next_atm);
gpr_free(z);
z = next_z;
}
return (size_t)size;
}
-void *gpr_arena_alloc(gpr_arena *arena, size_t size) {
+void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
size = ROUND_UP_TO_ALIGNMENT_SIZE(size);
size_t start =
(size_t)gpr_atm_no_barrier_fetch_add(&arena->size_so_far, size);
- zone *z = &arena->initial_zone;
+ zone* z = &arena->initial_zone;
while (start > z->size_end) {
- zone *next_z = (zone *)gpr_atm_acq_load(&z->next_atm);
+ zone* next_z = (zone*)gpr_atm_acq_load(&z->next_atm);
if (next_z == NULL) {
size_t next_z_size = (size_t)gpr_atm_no_barrier_load(&arena->size_so_far);
- next_z = (zone *)gpr_zalloc(sizeof(zone) + next_z_size);
+ next_z = (zone*)gpr_zalloc(sizeof(zone) + next_z_size);
next_z->size_begin = z->size_end;
next_z->size_end = z->size_end + next_z_size;
if (!gpr_atm_rel_cas(&z->next_atm, (gpr_atm)NULL, (gpr_atm)next_z)) {
gpr_free(next_z);
- next_z = (zone *)gpr_atm_acq_load(&z->next_atm);
+ next_z = (zone*)gpr_atm_acq_load(&z->next_atm);
}
}
z = next_z;
@@ -79,5 +79,5 @@ void *gpr_arena_alloc(gpr_arena *arena, size_t size) {
}
GPR_ASSERT(start >= z->size_begin);
GPR_ASSERT(start + size <= z->size_end);
- return ((char *)(z + 1)) + start - z->size_begin;
+ return ((char*)(z + 1)) + start - z->size_begin;
}
diff --git a/src/core/lib/support/arena.h b/src/core/lib/support/arena.h
index 8a50786348..4d43c56bb9 100644
--- a/src/core/lib/support/arena.h
+++ b/src/core/lib/support/arena.h
@@ -34,11 +34,11 @@ extern "C" {
typedef struct gpr_arena gpr_arena;
// Create an arena, with \a initial_size bytes in the first allocated buffer
-gpr_arena *gpr_arena_create(size_t initial_size);
+gpr_arena* gpr_arena_create(size_t initial_size);
// Allocate \a size bytes from the arena
-void *gpr_arena_alloc(gpr_arena *arena, size_t size);
+void* gpr_arena_alloc(gpr_arena* arena, size_t size);
// Destroy an arena, returning the total number of bytes allocated
-size_t gpr_arena_destroy(gpr_arena *arena);
+size_t gpr_arena_destroy(gpr_arena* arena);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/atm.cc b/src/core/lib/support/atm.cc
index 2f37d62f76..15bfe52d64 100644
--- a/src/core/lib/support/atm.cc
+++ b/src/core/lib/support/atm.cc
@@ -19,7 +19,7 @@
#include <grpc/support/atm.h>
#include <grpc/support/useful.h>
-gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta,
+gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm* value, gpr_atm delta,
gpr_atm min, gpr_atm max) {
gpr_atm current_value;
gpr_atm new_value;
diff --git a/src/core/lib/support/avl.cc b/src/core/lib/support/avl.cc
index 0e28b24c98..4ba101b74a 100644
--- a/src/core/lib/support/avl.cc
+++ b/src/core/lib/support/avl.cc
@@ -25,22 +25,22 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
-gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable) {
+gpr_avl gpr_avl_create(const gpr_avl_vtable* vtable) {
gpr_avl out;
out.vtable = vtable;
out.root = NULL;
return out;
}
-static gpr_avl_node *ref_node(gpr_avl_node *node) {
+static gpr_avl_node* ref_node(gpr_avl_node* node) {
if (node) {
gpr_ref(&node->refs);
}
return node;
}
-static void unref_node(const gpr_avl_vtable *vtable, gpr_avl_node *node,
- void *user_data) {
+static void unref_node(const gpr_avl_vtable* vtable, gpr_avl_node* node,
+ void* user_data) {
if (node == NULL) {
return;
}
@@ -53,17 +53,18 @@ static void unref_node(const gpr_avl_vtable *vtable, gpr_avl_node *node,
}
}
-static long node_height(gpr_avl_node *node) {
+static long node_height(gpr_avl_node* node) {
return node == NULL ? 0 : node->height;
}
#ifndef NDEBUG
-static long calculate_height(gpr_avl_node *node) {
- return node == NULL ? 0 : 1 + GPR_MAX(calculate_height(node->left),
- calculate_height(node->right));
+static long calculate_height(gpr_avl_node* node) {
+ return node == NULL ? 0
+ : 1 + GPR_MAX(calculate_height(node->left),
+ calculate_height(node->right));
}
-static gpr_avl_node *assert_invariants(gpr_avl_node *n) {
+static gpr_avl_node* assert_invariants(gpr_avl_node* n) {
if (n == NULL) return NULL;
assert_invariants(n->left);
assert_invariants(n->right);
@@ -72,12 +73,12 @@ static gpr_avl_node *assert_invariants(gpr_avl_node *n) {
return n;
}
#else
-static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; }
+static gpr_avl_node* assert_invariants(gpr_avl_node* n) { return n; }
#endif
-gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left,
- gpr_avl_node *right) {
- gpr_avl_node *node = (gpr_avl_node *)gpr_malloc(sizeof(*node));
+gpr_avl_node* new_node(void* key, void* value, gpr_avl_node* left,
+ gpr_avl_node* right) {
+ gpr_avl_node* node = (gpr_avl_node*)gpr_malloc(sizeof(*node));
gpr_ref_init(&node->refs, 1);
node->key = key;
node->value = value;
@@ -87,8 +88,8 @@ gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left,
return node;
}
-static gpr_avl_node *get(const gpr_avl_vtable *vtable, gpr_avl_node *node,
- void *key, void *user_data) {
+static gpr_avl_node* get(const gpr_avl_vtable* vtable, gpr_avl_node* node,
+ void* key, void* user_data) {
long cmp;
if (node == NULL) {
@@ -105,13 +106,13 @@ static gpr_avl_node *get(const gpr_avl_vtable *vtable, gpr_avl_node *node,
}
}
-void *gpr_avl_get(gpr_avl avl, void *key, void *user_data) {
- gpr_avl_node *node = get(avl.vtable, avl.root, key, user_data);
+void* gpr_avl_get(gpr_avl avl, void* key, void* user_data) {
+ gpr_avl_node* node = get(avl.vtable, avl.root, key, user_data);
return node ? node->value : NULL;
}
-int gpr_avl_maybe_get(gpr_avl avl, void *key, void **value, void *user_data) {
- gpr_avl_node *node = get(avl.vtable, avl.root, key, user_data);
+int gpr_avl_maybe_get(gpr_avl avl, void* key, void** value, void* user_data) {
+ gpr_avl_node* node = get(avl.vtable, avl.root, key, user_data);
if (node != NULL) {
*value = node->value;
return 1;
@@ -119,10 +120,10 @@ int gpr_avl_maybe_get(gpr_avl avl, void *key, void **value, void *user_data) {
return 0;
}
-static gpr_avl_node *rotate_left(const gpr_avl_vtable *vtable, void *key,
- void *value, gpr_avl_node *left,
- gpr_avl_node *right, void *user_data) {
- gpr_avl_node *n = new_node(vtable->copy_key(right->key, user_data),
+static gpr_avl_node* rotate_left(const gpr_avl_vtable* vtable, void* key,
+ void* value, gpr_avl_node* left,
+ gpr_avl_node* right, void* user_data) {
+ gpr_avl_node* n = new_node(vtable->copy_key(right->key, user_data),
vtable->copy_value(right->value, user_data),
new_node(key, value, left, ref_node(right->left)),
ref_node(right->right));
@@ -130,10 +131,10 @@ static gpr_avl_node *rotate_left(const gpr_avl_vtable *vtable, void *key,
return n;
}
-static gpr_avl_node *rotate_right(const gpr_avl_vtable *vtable, void *key,
- void *value, gpr_avl_node *left,
- gpr_avl_node *right, void *user_data) {
- gpr_avl_node *n =
+static gpr_avl_node* rotate_right(const gpr_avl_vtable* vtable, void* key,
+ void* value, gpr_avl_node* left,
+ gpr_avl_node* right, void* user_data) {
+ gpr_avl_node* n =
new_node(vtable->copy_key(left->key, user_data),
vtable->copy_value(left->value, user_data), ref_node(left->left),
new_node(key, value, ref_node(left->right), right));
@@ -141,11 +142,11 @@ static gpr_avl_node *rotate_right(const gpr_avl_vtable *vtable, void *key,
return n;
}
-static gpr_avl_node *rotate_left_right(const gpr_avl_vtable *vtable, void *key,
- void *value, gpr_avl_node *left,
- gpr_avl_node *right, void *user_data) {
+static gpr_avl_node* rotate_left_right(const gpr_avl_vtable* vtable, void* key,
+ void* value, gpr_avl_node* left,
+ gpr_avl_node* right, void* user_data) {
/* rotate_right(..., rotate_left(left), right) */
- gpr_avl_node *n =
+ gpr_avl_node* n =
new_node(vtable->copy_key(left->right->key, user_data),
vtable->copy_value(left->right->value, user_data),
new_node(vtable->copy_key(left->key, user_data),
@@ -156,11 +157,11 @@ static gpr_avl_node *rotate_left_right(const gpr_avl_vtable *vtable, void *key,
return n;
}
-static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key,
- void *value, gpr_avl_node *left,
- gpr_avl_node *right, void *user_data) {
+static gpr_avl_node* rotate_right_left(const gpr_avl_vtable* vtable, void* key,
+ void* value, gpr_avl_node* left,
+ gpr_avl_node* right, void* user_data) {
/* rotate_left(..., left, rotate_right(right)) */
- gpr_avl_node *n =
+ gpr_avl_node* n =
new_node(vtable->copy_key(right->left->key, user_data),
vtable->copy_value(right->left->value, user_data),
new_node(key, value, left, ref_node(right->left->left)),
@@ -171,9 +172,9 @@ static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key,
return n;
}
-static gpr_avl_node *rebalance(const gpr_avl_vtable *vtable, void *key,
- void *value, gpr_avl_node *left,
- gpr_avl_node *right, void *user_data) {
+static gpr_avl_node* rebalance(const gpr_avl_vtable* vtable, void* key,
+ void* value, gpr_avl_node* left,
+ gpr_avl_node* right, void* user_data) {
switch (node_height(left) - node_height(right)) {
case 2:
if (node_height(left->left) - node_height(left->right) == -1) {
@@ -196,8 +197,8 @@ static gpr_avl_node *rebalance(const gpr_avl_vtable *vtable, void *key,
}
}
-static gpr_avl_node *add_key(const gpr_avl_vtable *vtable, gpr_avl_node *node,
- void *key, void *value, void *user_data) {
+static gpr_avl_node* add_key(const gpr_avl_vtable* vtable, gpr_avl_node* node,
+ void* key, void* value, void* user_data) {
long cmp;
if (node == NULL) {
return new_node(key, value, NULL, NULL);
@@ -218,31 +219,31 @@ static gpr_avl_node *add_key(const gpr_avl_vtable *vtable, gpr_avl_node *node,
}
}
-gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value, void *user_data) {
- gpr_avl_node *old_root = avl.root;
+gpr_avl gpr_avl_add(gpr_avl avl, void* key, void* value, void* user_data) {
+ gpr_avl_node* old_root = avl.root;
avl.root = add_key(avl.vtable, avl.root, key, value, user_data);
assert_invariants(avl.root);
unref_node(avl.vtable, old_root, user_data);
return avl;
}
-static gpr_avl_node *in_order_head(gpr_avl_node *node) {
+static gpr_avl_node* in_order_head(gpr_avl_node* node) {
while (node->left != NULL) {
node = node->left;
}
return node;
}
-static gpr_avl_node *in_order_tail(gpr_avl_node *node) {
+static gpr_avl_node* in_order_tail(gpr_avl_node* node) {
while (node->right != NULL) {
node = node->right;
}
return node;
}
-static gpr_avl_node *remove_key(const gpr_avl_vtable *vtable,
- gpr_avl_node *node, void *key,
- void *user_data) {
+static gpr_avl_node* remove_key(const gpr_avl_vtable* vtable,
+ gpr_avl_node* node, void* key,
+ void* user_data) {
long cmp;
if (node == NULL) {
return NULL;
@@ -254,13 +255,13 @@ static gpr_avl_node *remove_key(const gpr_avl_vtable *vtable,
} else if (node->right == NULL) {
return ref_node(node->left);
} else if (node->left->height < node->right->height) {
- gpr_avl_node *h = in_order_head(node->right);
+ gpr_avl_node* h = in_order_head(node->right);
return rebalance(
vtable, vtable->copy_key(h->key, user_data),
vtable->copy_value(h->value, user_data), ref_node(node->left),
remove_key(vtable, node->right, h->key, user_data), user_data);
} else {
- gpr_avl_node *h = in_order_tail(node->left);
+ gpr_avl_node* h = in_order_tail(node->left);
return rebalance(vtable, vtable->copy_key(h->key, user_data),
vtable->copy_value(h->value, user_data),
remove_key(vtable, node->left, h->key, user_data),
@@ -279,20 +280,20 @@ static gpr_avl_node *remove_key(const gpr_avl_vtable *vtable,
}
}
-gpr_avl gpr_avl_remove(gpr_avl avl, void *key, void *user_data) {
- gpr_avl_node *old_root = avl.root;
+gpr_avl gpr_avl_remove(gpr_avl avl, void* key, void* user_data) {
+ gpr_avl_node* old_root = avl.root;
avl.root = remove_key(avl.vtable, avl.root, key, user_data);
assert_invariants(avl.root);
unref_node(avl.vtable, old_root, user_data);
return avl;
}
-gpr_avl gpr_avl_ref(gpr_avl avl, void *user_data) {
+gpr_avl gpr_avl_ref(gpr_avl avl, void* user_data) {
ref_node(avl.root);
return avl;
}
-void gpr_avl_unref(gpr_avl avl, void *user_data) {
+void gpr_avl_unref(gpr_avl avl, void* user_data) {
unref_node(avl.vtable, avl.root, user_data);
}
diff --git a/src/core/lib/support/cmdline.cc b/src/core/lib/support/cmdline.cc
index 9fb80d4460..49b34194c3 100644
--- a/src/core/lib/support/cmdline.cc
+++ b/src/core/lib/support/cmdline.cc
@@ -30,33 +30,33 @@
typedef enum { ARGTYPE_INT, ARGTYPE_BOOL, ARGTYPE_STRING } argtype;
typedef struct arg {
- const char *name;
- const char *help;
+ const char* name;
+ const char* help;
argtype type;
- void *value;
- struct arg *next;
+ void* value;
+ struct arg* next;
} arg;
struct gpr_cmdline {
- const char *description;
- arg *args;
- const char *argv0;
+ const char* description;
+ arg* args;
+ const char* argv0;
- const char *extra_arg_name;
- const char *extra_arg_help;
- void (*extra_arg)(void *user_data, const char *arg);
- void *extra_arg_user_data;
+ const char* extra_arg_name;
+ const char* extra_arg_help;
+ void (*extra_arg)(void* user_data, const char* arg);
+ void* extra_arg_user_data;
- int (*state)(gpr_cmdline *cl, char *arg);
- arg *cur_arg;
+ int (*state)(gpr_cmdline* cl, char* arg);
+ arg* cur_arg;
int survive_failure;
};
-static int normal_state(gpr_cmdline *cl, char *arg);
+static int normal_state(gpr_cmdline* cl, char* arg);
-gpr_cmdline *gpr_cmdline_create(const char *description) {
- gpr_cmdline *cl = (gpr_cmdline *)gpr_zalloc(sizeof(gpr_cmdline));
+gpr_cmdline* gpr_cmdline_create(const char* description) {
+ gpr_cmdline* cl = (gpr_cmdline*)gpr_zalloc(sizeof(gpr_cmdline));
cl->description = description;
cl->state = normal_state;
@@ -64,28 +64,28 @@ gpr_cmdline *gpr_cmdline_create(const char *description) {
return cl;
}
-void gpr_cmdline_set_survive_failure(gpr_cmdline *cl) {
+void gpr_cmdline_set_survive_failure(gpr_cmdline* cl) {
cl->survive_failure = 1;
}
-void gpr_cmdline_destroy(gpr_cmdline *cl) {
+void gpr_cmdline_destroy(gpr_cmdline* cl) {
while (cl->args) {
- arg *a = cl->args;
+ arg* a = cl->args;
cl->args = a->next;
gpr_free(a);
}
gpr_free(cl);
}
-static void add_arg(gpr_cmdline *cl, const char *name, const char *help,
- argtype type, void *value) {
- arg *a;
+static void add_arg(gpr_cmdline* cl, const char* name, const char* help,
+ argtype type, void* value) {
+ arg* a;
for (a = cl->args; a; a = a->next) {
GPR_ASSERT(0 != strcmp(a->name, name));
}
- a = (arg *)gpr_zalloc(sizeof(arg));
+ a = (arg*)gpr_zalloc(sizeof(arg));
a->name = name;
a->help = help;
a->type = type;
@@ -94,24 +94,24 @@ static void add_arg(gpr_cmdline *cl, const char *name, const char *help,
cl->args = a;
}
-void gpr_cmdline_add_int(gpr_cmdline *cl, const char *name, const char *help,
- int *value) {
+void gpr_cmdline_add_int(gpr_cmdline* cl, const char* name, const char* help,
+ int* value) {
add_arg(cl, name, help, ARGTYPE_INT, value);
}
-void gpr_cmdline_add_flag(gpr_cmdline *cl, const char *name, const char *help,
- int *value) {
+void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
+ int* value) {
add_arg(cl, name, help, ARGTYPE_BOOL, value);
}
-void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name, const char *help,
- char **value) {
+void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
+ char** value) {
add_arg(cl, name, help, ARGTYPE_STRING, value);
}
void gpr_cmdline_on_extra_arg(
- gpr_cmdline *cl, const char *name, const char *help,
- void (*on_extra_arg)(void *user_data, const char *arg), void *user_data) {
+ gpr_cmdline* cl, const char* name, const char* help,
+ void (*on_extra_arg)(void* user_data, const char* arg), void* user_data) {
GPR_ASSERT(!cl->extra_arg);
GPR_ASSERT(on_extra_arg);
@@ -124,8 +124,8 @@ void gpr_cmdline_on_extra_arg(
/* recursively descend argument list, adding the last element
to s first - so that arguments are added in the order they were
added to the list by api calls */
-static void add_args_to_usage(gpr_strvec *s, arg *a) {
- char *tmp;
+static void add_args_to_usage(gpr_strvec* s, arg* a) {
+ char* tmp;
if (!a) return;
add_args_to_usage(s, a->next);
@@ -146,11 +146,11 @@ static void add_args_to_usage(gpr_strvec *s, arg *a) {
}
}
-char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) {
+char* gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0) {
/* TODO(ctiller): make this prettier */
gpr_strvec s;
- char *tmp;
- const char *name = strrchr(argv0, '/');
+ char* tmp;
+ const char* name = strrchr(argv0, '/');
if (name) {
name++;
@@ -174,8 +174,8 @@ char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) {
return tmp;
}
-static int print_usage_and_die(gpr_cmdline *cl) {
- char *usage = gpr_cmdline_usage_string(cl, cl->argv0);
+static int print_usage_and_die(gpr_cmdline* cl) {
+ char* usage = gpr_cmdline_usage_string(cl, cl->argv0);
fprintf(stderr, "%s", usage);
gpr_free(usage);
if (!cl->survive_failure) {
@@ -184,7 +184,7 @@ static int print_usage_and_die(gpr_cmdline *cl) {
return 0;
}
-static int extra_state(gpr_cmdline *cl, char *str) {
+static int extra_state(gpr_cmdline* cl, char* str) {
if (!cl->extra_arg) {
return print_usage_and_die(cl);
}
@@ -192,8 +192,8 @@ static int extra_state(gpr_cmdline *cl, char *str) {
return 1;
}
-static arg *find_arg(gpr_cmdline *cl, char *name) {
- arg *a;
+static arg* find_arg(gpr_cmdline* cl, char* name) {
+ arg* a;
for (a = cl->args; a; a = a->next) {
if (0 == strcmp(a->name, name)) {
@@ -209,9 +209,9 @@ static arg *find_arg(gpr_cmdline *cl, char *name) {
return a;
}
-static int value_state(gpr_cmdline *cl, char *str) {
+static int value_state(gpr_cmdline* cl, char* str) {
long intval;
- char *end;
+ char* end;
GPR_ASSERT(cl->cur_arg);
@@ -223,13 +223,13 @@ static int value_state(gpr_cmdline *cl, char *str) {
cl->cur_arg->name);
return print_usage_and_die(cl);
}
- *(int *)cl->cur_arg->value = (int)intval;
+ *(int*)cl->cur_arg->value = (int)intval;
break;
case ARGTYPE_BOOL:
if (0 == strcmp(str, "1") || 0 == strcmp(str, "true")) {
- *(int *)cl->cur_arg->value = 1;
+ *(int*)cl->cur_arg->value = 1;
} else if (0 == strcmp(str, "0") || 0 == strcmp(str, "false")) {
- *(int *)cl->cur_arg->value = 0;
+ *(int*)cl->cur_arg->value = 0;
} else {
fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
cl->cur_arg->name);
@@ -237,7 +237,7 @@ static int value_state(gpr_cmdline *cl, char *str) {
}
break;
case ARGTYPE_STRING:
- *(char **)cl->cur_arg->value = str;
+ *(char**)cl->cur_arg->value = str;
break;
}
@@ -245,10 +245,10 @@ static int value_state(gpr_cmdline *cl, char *str) {
return 1;
}
-static int normal_state(gpr_cmdline *cl, char *str) {
- char *eq = NULL;
- char *tmp = NULL;
- char *arg_name = NULL;
+static int normal_state(gpr_cmdline* cl, char* str) {
+ char* eq = NULL;
+ char* tmp = NULL;
+ char* arg_name = NULL;
int r = 1;
if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
@@ -281,13 +281,13 @@ static int normal_state(gpr_cmdline *cl, char *str) {
fprintf(stderr, "%s is not a flag argument\n", str);
return print_usage_and_die(cl);
}
- *(int *)cl->cur_arg->value = 0;
+ *(int*)cl->cur_arg->value = 0;
return 1; /* early out */
}
eq = strchr(str, '=');
if (eq != NULL) {
/* copy the string into a temp buffer and extract the name */
- tmp = arg_name = (char *)gpr_malloc((size_t)(eq - str + 1));
+ tmp = arg_name = (char*)gpr_malloc((size_t)(eq - str + 1));
memcpy(arg_name, str, (size_t)(eq - str));
arg_name[eq - str] = 0;
} else {
@@ -305,7 +305,7 @@ static int normal_state(gpr_cmdline *cl, char *str) {
cl->state = value_state;
} else {
/* flag parameter: just set the value */
- *(int *)cl->cur_arg->value = 1;
+ *(int*)cl->cur_arg->value = 1;
}
} else {
r = extra_state(cl, str);
@@ -315,7 +315,7 @@ static int normal_state(gpr_cmdline *cl, char *str) {
return r;
}
-int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
+int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv) {
int i;
GPR_ASSERT(argc >= 1);
diff --git a/src/core/lib/support/env.h b/src/core/lib/support/env.h
index e2c012a728..f50d7bcb7a 100644
--- a/src/core/lib/support/env.h
+++ b/src/core/lib/support/env.h
@@ -31,16 +31,16 @@ extern "C" {
Returns a newly allocated string. It is the responsability of the caller to
gpr_free the return value if not NULL (which means that the environment
variable exists). */
-char *gpr_getenv(const char *name);
+char* gpr_getenv(const char* name);
/* Sets the the environment with the specified name to the specified value. */
-void gpr_setenv(const char *name, const char *value);
+void gpr_setenv(const char* name, const char* value);
/* This is a version of gpr_getenv that does not produce any output if it has to
use an insecure version of the function. It is ONLY to be used to solve the
problem in which we need to check an env variable to configure the verbosity
level of logging. So DO NOT USE THIS. */
-const char *gpr_getenv_silent(const char *name, char **dst);
+const char* gpr_getenv_silent(const char* name, char** dst);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/env_linux.cc b/src/core/lib/support/env_linux.cc
index 4c45a977ca..012ef63eff 100644
--- a/src/core/lib/support/env_linux.cc
+++ b/src/core/lib/support/env_linux.cc
@@ -38,15 +38,15 @@
#include "src/core/lib/support/string.h"
-const char *gpr_getenv_silent(const char *name, char **dst) {
- const char *insecure_func_used = NULL;
- char *result = NULL;
+const char* gpr_getenv_silent(const char* name, char** dst) {
+ const char* insecure_func_used = NULL;
+ char* result = NULL;
#if defined(GPR_BACKWARDS_COMPATIBILITY_MODE)
- typedef char *(*getenv_type)(const char *);
+ typedef char* (*getenv_type)(const char*);
static getenv_type getenv_func = NULL;
/* Check to see which getenv variant is supported (go from most
* to least secure) */
- const char *names[] = {"secure_getenv", "__secure_getenv", "getenv"};
+ const char* names[] = {"secure_getenv", "__secure_getenv", "getenv"};
for (size_t i = 0; getenv_func == NULL && i < GPR_ARRAY_SIZE(names); i++) {
getenv_func = (getenv_type)dlsym(RTLD_DEFAULT, names[i]);
if (getenv_func != NULL && strstr(names[i], "secure") == NULL) {
@@ -64,9 +64,9 @@ const char *gpr_getenv_silent(const char *name, char **dst) {
return insecure_func_used;
}
-char *gpr_getenv(const char *name) {
- char *result = NULL;
- const char *insecure_func_used = gpr_getenv_silent(name, &result);
+char* gpr_getenv(const char* name) {
+ char* result = NULL;
+ const char* insecure_func_used = gpr_getenv_silent(name, &result);
if (insecure_func_used != NULL) {
gpr_log(GPR_DEBUG, "Warning: insecure environment read function '%s' used",
insecure_func_used);
@@ -74,7 +74,7 @@ char *gpr_getenv(const char *name) {
return result;
}
-void gpr_setenv(const char *name, const char *value) {
+void gpr_setenv(const char* name, const char* value) {
int res = setenv(name, value, 1);
GPR_ASSERT(res == 0);
}
diff --git a/src/core/lib/support/env_posix.cc b/src/core/lib/support/env_posix.cc
index b88822ca02..7bea31ca55 100644
--- a/src/core/lib/support/env_posix.cc
+++ b/src/core/lib/support/env_posix.cc
@@ -29,17 +29,17 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/support/string.h"
-const char *gpr_getenv_silent(const char *name, char **dst) {
+const char* gpr_getenv_silent(const char* name, char** dst) {
*dst = gpr_getenv(name);
return NULL;
}
-char *gpr_getenv(const char *name) {
- char *result = getenv(name);
+char* gpr_getenv(const char* name) {
+ char* result = getenv(name);
return result == NULL ? result : gpr_strdup(result);
}
-void gpr_setenv(const char *name, const char *value) {
+void gpr_setenv(const char* name, const char* value) {
int res = setenv(name, value, 1);
GPR_ASSERT(res == 0);
}
diff --git a/src/core/lib/support/env_windows.cc b/src/core/lib/support/env_windows.cc
index c5a25dc201..cdb1d58ccd 100644
--- a/src/core/lib/support/env_windows.cc
+++ b/src/core/lib/support/env_windows.cc
@@ -30,13 +30,13 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-const char *gpr_getenv_silent(const char *name, char **dst) {
+const char* gpr_getenv_silent(const char* name, char** dst) {
*dst = gpr_getenv(name);
return NULL;
}
-char *gpr_getenv(const char *name) {
- char *result = NULL;
+char* gpr_getenv(const char* name) {
+ char* result = NULL;
DWORD size;
LPTSTR tresult = NULL;
LPTSTR tname = gpr_char_to_tchar(name);
@@ -60,7 +60,7 @@ char *gpr_getenv(const char *name) {
return result;
}
-void gpr_setenv(const char *name, const char *value) {
+void gpr_setenv(const char* name, const char* value) {
LPTSTR tname = gpr_char_to_tchar(name);
LPTSTR tvalue = gpr_char_to_tchar(value);
BOOL res = SetEnvironmentVariable(tname, tvalue);
diff --git a/src/core/lib/support/histogram.cc b/src/core/lib/support/histogram.cc
index 6d5ead9aa6..73c821a28b 100644
--- a/src/core/lib/support/histogram.cc
+++ b/src/core/lib/support/histogram.cc
@@ -51,29 +51,29 @@ struct gpr_histogram {
/* number of buckets */
size_t num_buckets;
/* the buckets themselves */
- uint32_t *buckets;
+ uint32_t* buckets;
};
/* determine a bucket index given a value - does no bounds checking */
-static size_t bucket_for_unchecked(gpr_histogram *h, double x) {
+static size_t bucket_for_unchecked(gpr_histogram* h, double x) {
return (size_t)(log(x) * h->one_on_log_multiplier);
}
/* bounds checked version of the above */
-static size_t bucket_for(gpr_histogram *h, double x) {
+static size_t bucket_for(gpr_histogram* h, double x) {
size_t bucket = bucket_for_unchecked(h, GPR_CLAMP(x, 1.0, h->max_possible));
GPR_ASSERT(bucket < h->num_buckets);
return bucket;
}
/* at what value does a bucket start? */
-static double bucket_start(gpr_histogram *h, double x) {
+static double bucket_start(gpr_histogram* h, double x) {
return pow(h->multiplier, x);
}
-gpr_histogram *gpr_histogram_create(double resolution,
+gpr_histogram* gpr_histogram_create(double resolution,
double max_bucket_start) {
- gpr_histogram *h = (gpr_histogram *)gpr_malloc(sizeof(gpr_histogram));
+ gpr_histogram* h = (gpr_histogram*)gpr_malloc(sizeof(gpr_histogram));
GPR_ASSERT(resolution > 0.0);
GPR_ASSERT(max_bucket_start > resolution);
h->sum = 0.0;
@@ -87,16 +87,16 @@ gpr_histogram *gpr_histogram_create(double resolution,
h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1;
GPR_ASSERT(h->num_buckets > 1);
GPR_ASSERT(h->num_buckets < 100000000);
- h->buckets = (uint32_t *)gpr_zalloc(sizeof(uint32_t) * h->num_buckets);
+ h->buckets = (uint32_t*)gpr_zalloc(sizeof(uint32_t) * h->num_buckets);
return h;
}
-void gpr_histogram_destroy(gpr_histogram *h) {
+void gpr_histogram_destroy(gpr_histogram* h) {
gpr_free(h->buckets);
gpr_free(h);
}
-void gpr_histogram_add(gpr_histogram *h, double x) {
+void gpr_histogram_add(gpr_histogram* h, double x) {
h->sum += x;
h->sum_of_squares += x * x;
h->count++;
@@ -109,7 +109,7 @@ void gpr_histogram_add(gpr_histogram *h, double x) {
h->buckets[bucket_for(h, x)]++;
}
-int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src) {
+int gpr_histogram_merge(gpr_histogram* dst, const gpr_histogram* src) {
if ((dst->num_buckets != src->num_buckets) ||
(dst->multiplier != src->multiplier)) {
/* Fail because these histograms don't match */
@@ -121,7 +121,7 @@ int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src) {
return 1;
}
-void gpr_histogram_merge_contents(gpr_histogram *dst, const uint32_t *data,
+void gpr_histogram_merge_contents(gpr_histogram* dst, const uint32_t* data,
size_t data_count, double min_seen,
double max_seen, double sum,
double sum_of_squares, double count) {
@@ -141,7 +141,7 @@ void gpr_histogram_merge_contents(gpr_histogram *dst, const uint32_t *data,
}
}
-static double threshold_for_count_below(gpr_histogram *h, double count_below) {
+static double threshold_for_count_below(gpr_histogram* h, double count_below) {
double count_so_far;
double lower_bound;
double upper_bound;
@@ -183,46 +183,45 @@ static double threshold_for_count_below(gpr_histogram *h, double count_below) {
should lie */
lower_bound = bucket_start(h, (double)lower_idx);
upper_bound = bucket_start(h, (double)(lower_idx + 1));
- return GPR_CLAMP(upper_bound -
- (upper_bound - lower_bound) *
- (count_so_far - count_below) /
- h->buckets[lower_idx],
+ return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
+ (count_so_far - count_below) /
+ h->buckets[lower_idx],
h->min_seen, h->max_seen);
}
}
-double gpr_histogram_percentile(gpr_histogram *h, double percentile) {
+double gpr_histogram_percentile(gpr_histogram* h, double percentile) {
return threshold_for_count_below(h, h->count * percentile / 100.0);
}
-double gpr_histogram_mean(gpr_histogram *h) {
+double gpr_histogram_mean(gpr_histogram* h) {
GPR_ASSERT(h->count != 0);
return h->sum / h->count;
}
-double gpr_histogram_stddev(gpr_histogram *h) {
+double gpr_histogram_stddev(gpr_histogram* h) {
return sqrt(gpr_histogram_variance(h));
}
-double gpr_histogram_variance(gpr_histogram *h) {
+double gpr_histogram_variance(gpr_histogram* h) {
if (h->count == 0) return 0.0;
return (h->sum_of_squares * h->count - h->sum * h->sum) /
(h->count * h->count);
}
-double gpr_histogram_maximum(gpr_histogram *h) { return h->max_seen; }
+double gpr_histogram_maximum(gpr_histogram* h) { return h->max_seen; }
-double gpr_histogram_minimum(gpr_histogram *h) { return h->min_seen; }
+double gpr_histogram_minimum(gpr_histogram* h) { return h->min_seen; }
-double gpr_histogram_count(gpr_histogram *h) { return h->count; }
+double gpr_histogram_count(gpr_histogram* h) { return h->count; }
-double gpr_histogram_sum(gpr_histogram *h) { return h->sum; }
+double gpr_histogram_sum(gpr_histogram* h) { return h->sum; }
-double gpr_histogram_sum_of_squares(gpr_histogram *h) {
+double gpr_histogram_sum_of_squares(gpr_histogram* h) {
return h->sum_of_squares;
}
-const uint32_t *gpr_histogram_get_contents(gpr_histogram *h, size_t *size) {
+const uint32_t* gpr_histogram_get_contents(gpr_histogram* h, size_t* size) {
*size = h->num_buckets;
return h->buckets;
}
diff --git a/src/core/lib/support/host_port.cc b/src/core/lib/support/host_port.cc
index 3302e574ab..1927d5507d 100644
--- a/src/core/lib/support/host_port.cc
+++ b/src/core/lib/support/host_port.cc
@@ -25,7 +25,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/support/string.h"
-int gpr_join_host_port(char **out, const char *host, int port) {
+int gpr_join_host_port(char** out, const char* host, int port) {
if (host[0] != '[' && strchr(host, ':') != NULL) {
/* IPv6 literals must be enclosed in brackets. */
return gpr_asprintf(out, "[%s]:%d", host, port);
@@ -35,17 +35,17 @@ int gpr_join_host_port(char **out, const char *host, int port) {
}
}
-int gpr_split_host_port(const char *name, char **host, char **port) {
- const char *host_start;
+int gpr_split_host_port(const char* name, char** host, char** port) {
+ const char* host_start;
size_t host_len;
- const char *port_start;
+ const char* port_start;
*host = NULL;
*port = NULL;
if (name[0] == '[') {
/* Parse a bracketed host, typically an IPv6 literal. */
- const char *rbracket = strchr(name, ']');
+ const char* rbracket = strchr(name, ']');
if (rbracket == NULL) {
/* Unmatched [ */
return 0;
@@ -68,7 +68,7 @@ int gpr_split_host_port(const char *name, char **host, char **port) {
return 0;
}
} else {
- const char *colon = strchr(name, ':');
+ const char* colon = strchr(name, ':');
if (colon != NULL && strchr(colon + 1, ':') == NULL) {
/* Exactly 1 colon. Split into host:port. */
host_start = name;
@@ -83,7 +83,7 @@ int gpr_split_host_port(const char *name, char **host, char **port) {
}
/* Allocate return values. */
- *host = (char *)gpr_malloc(host_len + 1);
+ *host = (char*)gpr_malloc(host_len + 1);
memcpy(*host, host_start, host_len);
(*host)[host_len] = '\0';
diff --git a/src/core/lib/support/log.cc b/src/core/lib/support/log.cc
index 69f92e001c..2140e4bd69 100644
--- a/src/core/lib/support/log.cc
+++ b/src/core/lib/support/log.cc
@@ -27,11 +27,11 @@
#include <stdio.h>
#include <string.h>
-extern "C" void gpr_default_log(gpr_log_func_args *args);
+extern "C" void gpr_default_log(gpr_log_func_args* args);
static gpr_atm g_log_func = (gpr_atm)gpr_default_log;
static gpr_atm g_min_severity_to_print = GPR_LOG_VERBOSITY_UNSET;
-const char *gpr_log_severity_string(gpr_log_severity severity) {
+const char* gpr_log_severity_string(gpr_log_severity severity) {
switch (severity) {
case GPR_LOG_SEVERITY_DEBUG:
return "D";
@@ -43,8 +43,8 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-void gpr_log_message(const char *file, int line, gpr_log_severity severity,
- const char *message) {
+void gpr_log_message(const char* file, int line, gpr_log_severity severity,
+ const char* message) {
if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print)) {
return;
}
@@ -64,8 +64,8 @@ void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print) {
}
void gpr_log_verbosity_init() {
- char *verbosity = NULL;
- const char *insecure_getenv = gpr_getenv_silent("GRPC_VERBOSITY", &verbosity);
+ char* verbosity = NULL;
+ const char* insecure_getenv = gpr_getenv_silent("GRPC_VERBOSITY", &verbosity);
gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR;
if (verbosity != NULL) {
diff --git a/src/core/lib/support/log_android.cc b/src/core/lib/support/log_android.cc
index 9e8529cbac..73d24cd84d 100644
--- a/src/core/lib/support/log_android.cc
+++ b/src/core/lib/support/log_android.cc
@@ -39,9 +39,9 @@ static android_LogPriority severity_to_log_priority(gpr_log_severity severity) {
return ANDROID_LOG_DEFAULT;
}
-extern "C" void gpr_log(const char *file, int line, gpr_log_severity severity,
- const char *format, ...) {
- char *message = NULL;
+extern "C" void gpr_log(const char* file, int line, gpr_log_severity severity,
+ const char* format, ...) {
+ char* message = NULL;
va_list args;
va_start(args, format);
vasprintf(&message, format, args);
@@ -50,10 +50,10 @@ extern "C" void gpr_log(const char *file, int line, gpr_log_severity severity,
free(message);
}
-extern "C" void gpr_default_log(gpr_log_func_args *args) {
- const char *final_slash;
- const char *display_file;
- char *output = NULL;
+extern "C" void gpr_default_log(gpr_log_func_args* args) {
+ const char* final_slash;
+ const char* display_file;
+ char* output = NULL;
final_slash = strrchr(args->file, '/');
if (final_slash == NULL)
diff --git a/src/core/lib/support/log_linux.cc b/src/core/lib/support/log_linux.cc
index 0914acedf4..e9be970305 100644
--- a/src/core/lib/support/log_linux.cc
+++ b/src/core/lib/support/log_linux.cc
@@ -41,9 +41,9 @@
static long gettid(void) { return syscall(__NR_gettid); }
-void gpr_log(const char *file, int line, gpr_log_severity severity,
- const char *format, ...) {
- char *message = NULL;
+void gpr_log(const char* file, int line, gpr_log_severity severity,
+ const char* format, ...) {
+ char* message = NULL;
va_list args;
va_start(args, format);
if (vasprintf(&message, format, args) == -1) {
@@ -56,10 +56,10 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
free(message);
}
-extern "C" void gpr_default_log(gpr_log_func_args *args) {
- const char *final_slash;
- char *prefix;
- const char *display_file;
+extern "C" void gpr_default_log(gpr_log_func_args* args) {
+ const char* final_slash;
+ char* prefix;
+ const char* display_file;
char time_buffer[64];
time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
diff --git a/src/core/lib/support/log_posix.cc b/src/core/lib/support/log_posix.cc
index 29530c858f..e765f91390 100644
--- a/src/core/lib/support/log_posix.cc
+++ b/src/core/lib/support/log_posix.cc
@@ -27,17 +27,16 @@
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
-#include <stdio.h>
#include <string.h>
#include <time.h>
static intptr_t gettid(void) { return (intptr_t)pthread_self(); }
-void gpr_log(const char *file, int line, gpr_log_severity severity,
- const char *format, ...) {
+void gpr_log(const char* file, int line, gpr_log_severity severity,
+ const char* format, ...) {
char buf[64];
- char *allocated = NULL;
- char *message = NULL;
+ char* allocated = NULL;
+ char* message = NULL;
int ret;
va_list args;
va_start(args, format);
@@ -48,7 +47,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
} else if ((size_t)ret <= sizeof(buf) - 1) {
message = buf;
} else {
- message = allocated = (char *)gpr_malloc((size_t)ret + 1);
+ message = allocated = (char*)gpr_malloc((size_t)ret + 1);
va_start(args, format);
vsnprintf(message, (size_t)(ret + 1), format, args);
va_end(args);
@@ -57,9 +56,9 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
gpr_free(allocated);
}
-extern "C" void gpr_default_log(gpr_log_func_args *args) {
- const char *final_slash;
- const char *display_file;
+extern "C" void gpr_default_log(gpr_log_func_args* args) {
+ const char* final_slash;
+ const char* display_file;
char time_buffer[64];
time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
@@ -79,7 +78,7 @@ extern "C" void gpr_default_log(gpr_log_func_args *args) {
strcpy(time_buffer, "error:strftime");
}
- char *prefix;
+ char* prefix;
gpr_asprintf(&prefix, "%s%s.%09d %7tu %s:%d]",
gpr_log_severity_string(args->severity), time_buffer,
(int)(now.tv_nsec), gettid(), display_file, args->line);
diff --git a/src/core/lib/support/log_windows.cc b/src/core/lib/support/log_windows.cc
index ee52abea73..d4481791e2 100644
--- a/src/core/lib/support/log_windows.cc
+++ b/src/core/lib/support/log_windows.cc
@@ -32,9 +32,9 @@
#include "src/core/lib/support/string.h"
#include "src/core/lib/support/string_windows.h"
-void gpr_log(const char *file, int line, gpr_log_severity severity,
- const char *format, ...) {
- char *message = NULL;
+void gpr_log(const char* file, int line, gpr_log_severity severity,
+ const char* format, ...) {
+ char* message = NULL;
va_list args;
int ret;
@@ -47,7 +47,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
} else {
/* Allocate a new buffer, with space for the NUL terminator. */
size_t strp_buflen = (size_t)ret + 1;
- message = (char *)gpr_malloc(strp_buflen);
+ message = (char*)gpr_malloc(strp_buflen);
/* Print to the buffer. */
va_start(args, format);
@@ -65,9 +65,9 @@ void gpr_log(const char *file, int line, gpr_log_severity severity,
}
/* Simple starter implementation */
-extern "C" void gpr_default_log(gpr_log_func_args *args) {
- const char *final_slash;
- const char *display_file;
+extern "C" void gpr_default_log(gpr_log_func_args* args) {
+ const char* final_slash;
+ const char* display_file;
char time_buffer[64];
time_t timer;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
diff --git a/src/core/lib/support/mpscq.cc b/src/core/lib/support/mpscq.cc
index e9f893988d..db25f24264 100644
--- a/src/core/lib/support/mpscq.cc
+++ b/src/core/lib/support/mpscq.cc
@@ -20,32 +20,32 @@
#include <grpc/support/log.h>
-void gpr_mpscq_init(gpr_mpscq *q) {
+void gpr_mpscq_init(gpr_mpscq* q) {
gpr_atm_no_barrier_store(&q->head, (gpr_atm)&q->stub);
q->tail = &q->stub;
gpr_atm_no_barrier_store(&q->stub.next, (gpr_atm)NULL);
}
-void gpr_mpscq_destroy(gpr_mpscq *q) {
+void gpr_mpscq_destroy(gpr_mpscq* q) {
GPR_ASSERT(gpr_atm_no_barrier_load(&q->head) == (gpr_atm)&q->stub);
GPR_ASSERT(q->tail == &q->stub);
}
-void gpr_mpscq_push(gpr_mpscq *q, gpr_mpscq_node *n) {
+void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) {
gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL);
- gpr_mpscq_node *prev =
- (gpr_mpscq_node *)gpr_atm_full_xchg(&q->head, (gpr_atm)n);
+ gpr_mpscq_node* prev =
+ (gpr_mpscq_node*)gpr_atm_full_xchg(&q->head, (gpr_atm)n);
gpr_atm_rel_store(&prev->next, (gpr_atm)n);
}
-gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q) {
+gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q) {
bool empty;
return gpr_mpscq_pop_and_check_end(q, &empty);
}
-gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty) {
- gpr_mpscq_node *tail = q->tail;
- gpr_mpscq_node *next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next);
+gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty) {
+ gpr_mpscq_node* tail = q->tail;
+ gpr_mpscq_node* next = (gpr_mpscq_node*)gpr_atm_acq_load(&tail->next);
if (tail == &q->stub) {
// indicates the list is actually (ephemerally) empty
if (next == NULL) {
@@ -54,21 +54,21 @@ gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty) {
}
q->tail = next;
tail = next;
- next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next);
+ next = (gpr_mpscq_node*)gpr_atm_acq_load(&tail->next);
}
if (next != NULL) {
*empty = false;
q->tail = next;
return tail;
}
- gpr_mpscq_node *head = (gpr_mpscq_node *)gpr_atm_acq_load(&q->head);
+ gpr_mpscq_node* head = (gpr_mpscq_node*)gpr_atm_acq_load(&q->head);
if (tail != head) {
*empty = false;
// indicates a retry is in order: we're still adding
return NULL;
}
gpr_mpscq_push(q, &q->stub);
- next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next);
+ next = (gpr_mpscq_node*)gpr_atm_acq_load(&tail->next);
if (next != NULL) {
q->tail = next;
return tail;
diff --git a/src/core/lib/support/mpscq.h b/src/core/lib/support/mpscq.h
index ca63a044bb..1cc9d89feb 100644
--- a/src/core/lib/support/mpscq.h
+++ b/src/core/lib/support/mpscq.h
@@ -33,26 +33,28 @@ extern "C" {
// List node (include this in a data structure at the top, and add application
// fields after it - to simulate inheritance)
-typedef struct gpr_mpscq_node { gpr_atm next; } gpr_mpscq_node;
+typedef struct gpr_mpscq_node {
+ gpr_atm next;
+} gpr_mpscq_node;
// Actual queue type
typedef struct gpr_mpscq {
gpr_atm head;
// make sure head & tail don't share a cacheline
char padding[GPR_CACHELINE_SIZE];
- gpr_mpscq_node *tail;
+ gpr_mpscq_node* tail;
gpr_mpscq_node stub;
} gpr_mpscq;
-void gpr_mpscq_init(gpr_mpscq *q);
-void gpr_mpscq_destroy(gpr_mpscq *q);
+void gpr_mpscq_init(gpr_mpscq* q);
+void gpr_mpscq_destroy(gpr_mpscq* q);
// Push a node
-void gpr_mpscq_push(gpr_mpscq *q, gpr_mpscq_node *n);
+void gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n);
// Pop a node (returns NULL if no node is ready - which doesn't indicate that
// the queue is empty!!)
-gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q);
+gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q);
// Pop a node; sets *empty to true if the queue is empty, or false if it is not
-gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty);
+gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/murmur_hash.cc b/src/core/lib/support/murmur_hash.cc
index f06b970de7..4e08579a1d 100644
--- a/src/core/lib/support/murmur_hash.cc
+++ b/src/core/lib/support/murmur_hash.cc
@@ -29,8 +29,8 @@
(h) *= 0xc2b2ae35; \
(h) ^= (h) >> 16;
-uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) {
- const uint8_t *data = (const uint8_t *)key;
+uint32_t gpr_murmur_hash3(const void* key, size_t len, uint32_t seed) {
+ const uint8_t* data = (const uint8_t*)key;
const size_t nblocks = len / 4;
int i;
@@ -40,8 +40,8 @@ uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) {
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
- const uint32_t *blocks = ((const uint32_t *)key) + nblocks;
- const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
+ const uint32_t* blocks = ((const uint32_t*)key) + nblocks;
+ const uint8_t* tail = (const uint8_t*)(data + nblocks * 4);
/* body */
for (i = -(int)nblocks; i; i++) {
diff --git a/src/core/lib/support/murmur_hash.h b/src/core/lib/support/murmur_hash.h
index a4c642e49f..d02bba6962 100644
--- a/src/core/lib/support/murmur_hash.h
+++ b/src/core/lib/support/murmur_hash.h
@@ -28,7 +28,7 @@ extern "C" {
#endif
/* compute the hash of key (length len) */
-uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed);
+uint32_t gpr_murmur_hash3(const void* key, size_t len, uint32_t seed);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/spinlock.h b/src/core/lib/support/spinlock.h
index 47584f6279..8b439642e9 100644
--- a/src/core/lib/support/spinlock.h
+++ b/src/core/lib/support/spinlock.h
@@ -23,7 +23,9 @@
/* Simple spinlock. No backoff strategy, gpr_spinlock_lock is almost always
a concurrency code smell. */
-typedef struct { gpr_atm atm; } gpr_spinlock;
+typedef struct {
+ gpr_atm atm;
+} gpr_spinlock;
#ifdef __cplusplus
#define GPR_SPINLOCK_INITIALIZER (gpr_spinlock{0})
diff --git a/src/core/lib/support/stack_lockfree.cc b/src/core/lib/support/stack_lockfree.cc
index 0fb64ed001..7a4ede3b92 100644
--- a/src/core/lib/support/stack_lockfree.cc
+++ b/src/core/lib/support/stack_lockfree.cc
@@ -55,18 +55,18 @@ typedef union lockfree_node {
#define INVALID_ENTRY_INDEX ((1 << 16) - 1)
struct gpr_stack_lockfree {
- lockfree_node *entries;
+ lockfree_node* entries;
lockfree_node head; /* An atomic entry describing curr head */
};
-gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
- gpr_stack_lockfree *stack;
- stack = (gpr_stack_lockfree *)gpr_malloc(sizeof(*stack));
+gpr_stack_lockfree* gpr_stack_lockfree_create(size_t entries) {
+ gpr_stack_lockfree* stack;
+ stack = (gpr_stack_lockfree*)gpr_malloc(sizeof(*stack));
/* Since we only allocate 16 bits to represent an entry number,
* make sure that we are within the desired range */
/* Reserve the highest entry number as a dummy */
GPR_ASSERT(entries < INVALID_ENTRY_INDEX);
- stack->entries = (lockfree_node *)gpr_malloc_aligned(
+ stack->entries = (lockfree_node*)gpr_malloc_aligned(
entries * sizeof(stack->entries[0]), ENTRY_ALIGNMENT_BITS);
/* Clear out all entries */
memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
@@ -84,12 +84,12 @@ gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) {
return stack;
}
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
+void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack) {
gpr_free_aligned(stack->entries);
gpr_free(stack);
}
-int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
+int gpr_stack_lockfree_push(gpr_stack_lockfree* stack, int entry) {
lockfree_node head;
lockfree_node newhead;
lockfree_node curent;
@@ -119,7 +119,7 @@ int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
return head.contents.index == INVALID_ENTRY_INDEX;
}
-int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
+int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack) {
lockfree_node head;
lockfree_node newhead;
diff --git a/src/core/lib/support/stack_lockfree.h b/src/core/lib/support/stack_lockfree.h
index 706f63fbf6..337ecc2b17 100644
--- a/src/core/lib/support/stack_lockfree.h
+++ b/src/core/lib/support/stack_lockfree.h
@@ -29,15 +29,15 @@ typedef struct gpr_stack_lockfree gpr_stack_lockfree;
/* This stack must specify the maximum number of entries to track.
The current implementation only allows up to 65534 entries */
-gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries);
-void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack);
+gpr_stack_lockfree* gpr_stack_lockfree_create(size_t entries);
+void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack);
/* Pass in a valid entry number for the next stack entry */
/* Returns 1 if this is the first element on the stack, 0 otherwise */
-int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry);
+int gpr_stack_lockfree_push(gpr_stack_lockfree*, int entry);
/* Returns -1 on empty or the actual entry number */
-int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack);
+int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/string.cc b/src/core/lib/support/string.cc
index d55863892f..6dc4fbc921 100644
--- a/src/core/lib/support/string.cc
+++ b/src/core/lib/support/string.cc
@@ -30,8 +30,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
-char *gpr_strdup(const char *src) {
- char *dst;
+char* gpr_strdup(const char* src) {
+ char* dst;
size_t len;
if (!src) {
@@ -39,7 +39,7 @@ char *gpr_strdup(const char *src) {
}
len = strlen(src) + 1;
- dst = (char *)gpr_malloc(len);
+ dst = (char*)gpr_malloc(len);
memcpy(dst, src, len);
@@ -49,7 +49,7 @@ char *gpr_strdup(const char *src) {
typedef struct {
size_t capacity;
size_t length;
- char *data;
+ char* data;
} dump_out;
static dump_out dump_out_create(void) {
@@ -57,20 +57,20 @@ static dump_out dump_out_create(void) {
return r;
}
-static void dump_out_append(dump_out *out, char c) {
+static void dump_out_append(dump_out* out, char c) {
if (out->length == out->capacity) {
out->capacity = GPR_MAX(8, 2 * out->capacity);
- out->data = (char *)gpr_realloc(out->data, out->capacity);
+ out->data = (char*)gpr_realloc(out->data, out->capacity);
}
out->data[out->length++] = c;
}
-static void hexdump(dump_out *out, const char *buf, size_t len) {
- static const char *hex = "0123456789abcdef";
+static void hexdump(dump_out* out, const char* buf, size_t len) {
+ static const char* hex = "0123456789abcdef";
- const uint8_t *const beg = (const uint8_t *)buf;
- const uint8_t *const end = beg + len;
- const uint8_t *cur;
+ const uint8_t* const beg = (const uint8_t*)buf;
+ const uint8_t* const end = beg + len;
+ const uint8_t* cur;
for (cur = beg; cur != end; ++cur) {
if (cur != beg) dump_out_append(out, ' ');
@@ -79,24 +79,24 @@ static void hexdump(dump_out *out, const char *buf, size_t len) {
}
}
-static void asciidump(dump_out *out, const char *buf, size_t len) {
- const uint8_t *const beg = (const uint8_t *)buf;
- const uint8_t *const end = beg + len;
- const uint8_t *cur;
+static void asciidump(dump_out* out, const char* buf, size_t len) {
+ const uint8_t* const beg = (const uint8_t*)buf;
+ const uint8_t* const end = beg + len;
+ const uint8_t* cur;
int out_was_empty = (out->length == 0);
if (!out_was_empty) {
dump_out_append(out, ' ');
dump_out_append(out, '\'');
}
for (cur = beg; cur != end; ++cur) {
- dump_out_append(out, (char)(isprint(*cur) ? *(char *)cur : '.'));
+ dump_out_append(out, (char)(isprint(*cur) ? *(char*)cur : '.'));
}
if (!out_was_empty) {
dump_out_append(out, '\'');
}
}
-char *gpr_dump(const char *buf, size_t len, uint32_t flags) {
+char* gpr_dump(const char* buf, size_t len, uint32_t flags) {
dump_out out = dump_out_create();
if (flags & GPR_DUMP_HEX) {
hexdump(&out, buf, len);
@@ -108,7 +108,7 @@ char *gpr_dump(const char *buf, size_t len, uint32_t flags) {
return out.data;
}
-int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) {
+int gpr_parse_bytes_to_uint32(const char* buf, size_t len, uint32_t* result) {
uint32_t out = 0;
uint32_t new_val;
size_t i;
@@ -126,7 +126,7 @@ int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) {
return 1;
}
-void gpr_reverse_bytes(char *str, int len) {
+void gpr_reverse_bytes(char* str, int len) {
char *p1, *p2;
for (p1 = str, p2 = str + len - 1; p2 > p1; ++p1, --p2) {
char temp = *p1;
@@ -135,7 +135,7 @@ void gpr_reverse_bytes(char *str, int len) {
}
}
-int gpr_ltoa(long value, char *string) {
+int gpr_ltoa(long value, char* string) {
long sign;
int i = 0;
@@ -156,7 +156,7 @@ int gpr_ltoa(long value, char *string) {
return i;
}
-int int64_ttoa(int64_t value, char *string) {
+int int64_ttoa(int64_t value, char* string) {
int64_t sign;
int i = 0;
@@ -177,33 +177,33 @@ int int64_ttoa(int64_t value, char *string) {
return i;
}
-int gpr_parse_nonnegative_int(const char *value) {
- char *end;
+int gpr_parse_nonnegative_int(const char* value) {
+ char* end;
long result = strtol(value, &end, 0);
if (*end != '\0' || result < 0 || result > INT_MAX) return -1;
return (int)result;
}
-char *gpr_leftpad(const char *str, char flag, size_t length) {
+char* gpr_leftpad(const char* str, char flag, size_t length) {
const size_t str_length = strlen(str);
const size_t out_length = str_length > length ? str_length : length;
- char *out = (char *)gpr_malloc(out_length + 1);
+ char* out = (char*)gpr_malloc(out_length + 1);
memset(out, flag, out_length - str_length);
memcpy(out + out_length - str_length, str, str_length);
out[out_length] = 0;
return out;
}
-char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
+char* gpr_strjoin(const char** strs, size_t nstrs, size_t* final_length) {
return gpr_strjoin_sep(strs, nstrs, "", final_length);
}
-char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
- size_t *final_length) {
+char* gpr_strjoin_sep(const char** strs, size_t nstrs, const char* sep,
+ size_t* final_length) {
const size_t sep_len = strlen(sep);
size_t out_length = 0;
size_t i;
- char *out;
+ char* out;
for (i = 0; i < nstrs; i++) {
out_length += strlen(strs[i]);
}
@@ -211,7 +211,7 @@ char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
if (nstrs > 0) {
out_length += sep_len * (nstrs - 1); /* separators */
}
- out = (char *)gpr_malloc(out_length);
+ out = (char*)gpr_malloc(out_length);
out_length = 0;
for (i = 0; i < nstrs; i++) {
const size_t slen = strlen(strs[i]);
@@ -229,9 +229,9 @@ char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
return out;
}
-void gpr_strvec_init(gpr_strvec *sv) { memset(sv, 0, sizeof(*sv)); }
+void gpr_strvec_init(gpr_strvec* sv) { memset(sv, 0, sizeof(*sv)); }
-void gpr_strvec_destroy(gpr_strvec *sv) {
+void gpr_strvec_destroy(gpr_strvec* sv) {
size_t i;
for (i = 0; i < sv->count; i++) {
gpr_free(sv->strs[i]);
@@ -239,19 +239,19 @@ void gpr_strvec_destroy(gpr_strvec *sv) {
gpr_free(sv->strs);
}
-void gpr_strvec_add(gpr_strvec *sv, char *str) {
+void gpr_strvec_add(gpr_strvec* sv, char* str) {
if (sv->count == sv->capacity) {
sv->capacity = GPR_MAX(sv->capacity + 8, sv->capacity * 2);
- sv->strs = (char **)gpr_realloc(sv->strs, sizeof(char *) * sv->capacity);
+ sv->strs = (char**)gpr_realloc(sv->strs, sizeof(char*) * sv->capacity);
}
sv->strs[sv->count++] = str;
}
-char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) {
- return gpr_strjoin((const char **)sv->strs, sv->count, final_length);
+char* gpr_strvec_flatten(gpr_strvec* sv, size_t* final_length) {
+ return gpr_strjoin((const char**)sv->strs, sv->count, final_length);
}
-int gpr_stricmp(const char *a, const char *b) {
+int gpr_stricmp(const char* a, const char* b) {
int ca, cb;
do {
ca = tolower(*a);
@@ -262,22 +262,22 @@ int gpr_stricmp(const char *a, const char *b) {
return ca - cb;
}
-static void add_string_to_split(const char *beg, const char *end, char ***strs,
- size_t *nstrs, size_t *capstrs) {
- char *out = (char *)gpr_malloc((size_t)(end - beg) + 1);
+static void add_string_to_split(const char* beg, const char* end, char*** strs,
+ size_t* nstrs, size_t* capstrs) {
+ char* out = (char*)gpr_malloc((size_t)(end - beg) + 1);
memcpy(out, beg, (size_t)(end - beg));
out[end - beg] = 0;
if (*nstrs == *capstrs) {
*capstrs = GPR_MAX(8, 2 * *capstrs);
- *strs = (char **)gpr_realloc(*strs, sizeof(*strs) * *capstrs);
+ *strs = (char**)gpr_realloc(*strs, sizeof(*strs) * *capstrs);
}
(*strs)[*nstrs] = out;
++*nstrs;
}
-void gpr_string_split(const char *input, const char *sep, char ***strs,
- size_t *nstrs) {
- const char *next;
+void gpr_string_split(const char* input, const char* sep, char*** strs,
+ size_t* nstrs) {
+ const char* next;
*strs = NULL;
*nstrs = 0;
size_t capstrs = 0;
@@ -288,9 +288,9 @@ void gpr_string_split(const char *input, const char *sep, char ***strs,
add_string_to_split(input, input + strlen(input), strs, nstrs, &capstrs);
}
-void *gpr_memrchr(const void *s, int c, size_t n) {
+void* gpr_memrchr(const void* s, int c, size_t n) {
if (s == NULL) return NULL;
- char *b = (char *)s;
+ char* b = (char*)s;
size_t i;
for (i = 0; i < n; i++) {
if (b[n - i - 1] == c) {
@@ -300,12 +300,12 @@ void *gpr_memrchr(const void *s, int c, size_t n) {
return NULL;
}
-bool gpr_is_true(const char *s) {
+bool gpr_is_true(const char* s) {
size_t i;
if (s == NULL) {
return false;
}
- static const char *truthy[] = {"yes", "true", "1"};
+ static const char* truthy[] = {"yes", "true", "1"};
for (i = 0; i < GPR_ARRAY_SIZE(truthy); i++) {
if (0 == gpr_stricmp(s, truthy[i])) {
return true;
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index 5a56fa3a0a..0b18ffcec1 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -36,12 +36,12 @@ extern "C" {
/* Converts array buf, of length len, into a C string according to the flags.
Result should be freed with gpr_free() */
-char *gpr_dump(const char *buf, size_t len, uint32_t flags);
+char* gpr_dump(const char* buf, size_t len, uint32_t flags);
/* Parses an array of bytes into an integer (base 10). Returns 1 on success,
0 on failure. */
-int gpr_parse_bytes_to_uint32(const char *data, size_t length,
- uint32_t *result);
+int gpr_parse_bytes_to_uint32(const char* data, size_t length,
+ uint32_t* result);
/* Minimum buffer size for calling ltoa */
#define GPR_LTOA_MIN_BUFSIZE (3 * sizeof(long))
@@ -49,7 +49,7 @@ int gpr_parse_bytes_to_uint32(const char *data, size_t length,
/* Convert a long to a string in base 10; returns the length of the
output string (or 0 on failure).
output must be at least GPR_LTOA_MIN_BUFSIZE bytes long. */
-int gpr_ltoa(long value, char *output);
+int gpr_ltoa(long value, char* output);
/* Minimum buffer size for calling int64toa */
#define GPR_INT64TOA_MIN_BUFSIZE (3 * sizeof(int64_t))
@@ -59,56 +59,56 @@ output string (or 0 on failure).
output must be at least GPR_INT64TOA_MIN_BUFSIZE bytes long.
NOTE: This function ensures sufficient bit width even on Win x64,
where long is 32bit is size.*/
-int int64_ttoa(int64_t value, char *output);
+int int64_ttoa(int64_t value, char* output);
// Parses a non-negative number from a value string. Returns -1 on error.
-int gpr_parse_nonnegative_int(const char *value);
+int gpr_parse_nonnegative_int(const char* value);
/* Reverse a run of bytes */
-void gpr_reverse_bytes(char *str, int len);
+void gpr_reverse_bytes(char* str, int len);
/* Pad a string with flag characters. The given length specifies the minimum
field width. The input string is never truncated. */
-char *gpr_leftpad(const char *str, char flag, size_t length);
+char* gpr_leftpad(const char* str, char flag, size_t length);
/* Join a set of strings, returning the resulting string.
Total combined length (excluding null terminator) is returned in total_length
if it is non-null. */
-char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length);
+char* gpr_strjoin(const char** strs, size_t nstrs, size_t* total_length);
/* Join a set of strings using a separator, returning the resulting string.
Total combined length (excluding null terminator) is returned in total_length
if it is non-null. */
-char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
- size_t *total_length);
+char* gpr_strjoin_sep(const char** strs, size_t nstrs, const char* sep,
+ size_t* total_length);
-void gpr_string_split(const char *input, const char *sep, char ***strs,
- size_t *nstrs);
+void gpr_string_split(const char* input, const char* sep, char*** strs,
+ size_t* nstrs);
/* A vector of strings... for building up a final string one piece at a time */
typedef struct {
- char **strs;
+ char** strs;
size_t count;
size_t capacity;
} gpr_strvec;
/* Initialize/destroy */
-void gpr_strvec_init(gpr_strvec *strs);
-void gpr_strvec_destroy(gpr_strvec *strs);
+void gpr_strvec_init(gpr_strvec* strs);
+void gpr_strvec_destroy(gpr_strvec* strs);
/* Add a string to a strvec, takes ownership of the string */
-void gpr_strvec_add(gpr_strvec *strs, char *add);
+void gpr_strvec_add(gpr_strvec* strs, char* add);
/* Return a joined string with all added substrings, optionally setting
total_length as per gpr_strjoin */
-char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length);
+char* gpr_strvec_flatten(gpr_strvec* strs, size_t* total_length);
/** Case insensitive string comparison... return <0 if lower(a)<lower(b), ==0 if
lower(a)==lower(b), >0 if lower(a)>lower(b) */
-int gpr_stricmp(const char *a, const char *b);
+int gpr_stricmp(const char* a, const char* b);
-void *gpr_memrchr(const void *s, int c, size_t n);
+void* gpr_memrchr(const void* s, int c, size_t n);
/** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */
-bool gpr_is_true(const char *s);
+bool gpr_is_true(const char* s);
#ifdef __cplusplus
}
#endif
diff --git a/src/core/lib/support/string_posix.cc b/src/core/lib/support/string_posix.cc
index 92de21a6e1..79c81656cc 100644
--- a/src/core/lib/support/string_posix.cc
+++ b/src/core/lib/support/string_posix.cc
@@ -27,7 +27,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
-int gpr_asprintf(char **strp, const char *format, ...) {
+int gpr_asprintf(char** strp, const char* format, ...) {
va_list args;
int ret;
char buf[64];
@@ -44,7 +44,7 @@ int gpr_asprintf(char **strp, const char *format, ...) {
/* Allocate a new buffer, with space for the NUL terminator. */
strp_buflen = (size_t)ret + 1;
- if ((*strp = (char *)gpr_malloc(strp_buflen)) == NULL) {
+ if ((*strp = (char*)gpr_malloc(strp_buflen)) == NULL) {
/* This shouldn't happen, because gpr_malloc() calls abort(). */
return -1;
}
diff --git a/src/core/lib/support/string_util_windows.cc b/src/core/lib/support/string_util_windows.cc
index b365512ee3..e2b386be55 100644
--- a/src/core/lib/support/string_util_windows.cc
+++ b/src/core/lib/support/string_util_windows.cc
@@ -65,9 +65,9 @@ LPSTR gpr_tchar_to_char(LPCTSTR input) { return (LPSTR)gpr_strdup(input); }
LPTSTR gpr_char_to_tchar(LPCTSTR input) { return (LPTSTR)gpr_strdup(input); }
#endif
-char *gpr_format_message(int messageid) {
+char* gpr_format_message(int messageid) {
LPTSTR tmessage;
- char *message;
+ char* message;
DWORD status = FormatMessage(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
diff --git a/src/core/lib/support/string_windows.cc b/src/core/lib/support/string_windows.cc
index d37863c066..ceb78f0054 100644
--- a/src/core/lib/support/string_windows.cc
+++ b/src/core/lib/support/string_windows.cc
@@ -31,7 +31,7 @@
#include "src/core/lib/support/string.h"
-int gpr_asprintf(char **strp, const char *format, ...) {
+int gpr_asprintf(char** strp, const char* format, ...) {
va_list args;
int ret;
size_t strp_buflen;
@@ -47,7 +47,7 @@ int gpr_asprintf(char **strp, const char *format, ...) {
/* Allocate a new buffer, with space for the NUL terminator. */
strp_buflen = (size_t)ret + 1;
- if ((*strp = (char *)gpr_malloc(strp_buflen)) == NULL) {
+ if ((*strp = (char*)gpr_malloc(strp_buflen)) == NULL) {
/* This shouldn't happen, because gpr_malloc() calls abort(). */
return -1;
}
diff --git a/src/core/lib/support/subprocess_posix.cc b/src/core/lib/support/subprocess_posix.cc
index af75162ee9..4d6972a0c4 100644
--- a/src/core/lib/support/subprocess_posix.cc
+++ b/src/core/lib/support/subprocess_posix.cc
@@ -41,19 +41,19 @@ struct gpr_subprocess {
bool joined;
};
-const char *gpr_subprocess_binary_extension() { return ""; }
+const char* gpr_subprocess_binary_extension() { return ""; }
-gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
- gpr_subprocess *r;
+gpr_subprocess* gpr_subprocess_create(int argc, const char** argv) {
+ gpr_subprocess* r;
int pid;
- char **exec_args;
+ char** exec_args;
pid = fork();
if (pid == -1) {
return NULL;
} else if (pid == 0) {
- exec_args = (char **)gpr_malloc(((size_t)argc + 1) * sizeof(char *));
- memcpy(exec_args, argv, (size_t)argc * sizeof(char *));
+ exec_args = (char**)gpr_malloc(((size_t)argc + 1) * sizeof(char*));
+ memcpy(exec_args, argv, (size_t)argc * sizeof(char*));
exec_args[argc] = NULL;
execv(exec_args[0], exec_args);
/* if we reach here, an error has occurred */
@@ -61,13 +61,13 @@ gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
_exit(1);
return NULL;
} else {
- r = (gpr_subprocess *)gpr_zalloc(sizeof(gpr_subprocess));
+ r = (gpr_subprocess*)gpr_zalloc(sizeof(gpr_subprocess));
r->pid = pid;
return r;
}
}
-void gpr_subprocess_destroy(gpr_subprocess *p) {
+void gpr_subprocess_destroy(gpr_subprocess* p) {
if (!p->joined) {
kill(p->pid, SIGKILL);
gpr_subprocess_join(p);
@@ -75,7 +75,7 @@ void gpr_subprocess_destroy(gpr_subprocess *p) {
gpr_free(p);
}
-int gpr_subprocess_join(gpr_subprocess *p) {
+int gpr_subprocess_join(gpr_subprocess* p) {
int status;
retry:
if (waitpid(p->pid, &status, 0) == -1) {
@@ -90,7 +90,7 @@ retry:
return status;
}
-void gpr_subprocess_interrupt(gpr_subprocess *p) {
+void gpr_subprocess_interrupt(gpr_subprocess* p) {
if (!p->joined) {
kill(p->pid, SIGINT);
}
diff --git a/src/core/lib/support/subprocess_windows.cc b/src/core/lib/support/subprocess_windows.cc
index 6769f1d3a4..dcdafb5a63 100644
--- a/src/core/lib/support/subprocess_windows.cc
+++ b/src/core/lib/support/subprocess_windows.cc
@@ -36,16 +36,16 @@ struct gpr_subprocess {
int interrupted;
};
-const char *gpr_subprocess_binary_extension() { return ".exe"; }
+const char* gpr_subprocess_binary_extension() { return ".exe"; }
-gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
- gpr_subprocess *r;
+gpr_subprocess* gpr_subprocess_create(int argc, const char** argv) {
+ gpr_subprocess* r;
STARTUPINFO si;
PROCESS_INFORMATION pi;
- char *args = gpr_strjoin_sep(argv, (size_t)argc, " ", NULL);
- TCHAR *args_tchar;
+ char* args = gpr_strjoin_sep(argv, (size_t)argc, " ", NULL);
+ TCHAR* args_tchar;
args_tchar = gpr_char_to_tchar(args);
gpr_free(args);
@@ -61,13 +61,13 @@ gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
}
gpr_free(args_tchar);
- r = (gpr_subprocess *)gpr_malloc(sizeof(gpr_subprocess));
+ r = (gpr_subprocess*)gpr_malloc(sizeof(gpr_subprocess));
memset(r, 0, sizeof(*r));
r->pi = pi;
return r;
}
-void gpr_subprocess_destroy(gpr_subprocess *p) {
+void gpr_subprocess_destroy(gpr_subprocess* p) {
if (p) {
if (!p->joined) {
gpr_subprocess_interrupt(p);
@@ -83,7 +83,7 @@ void gpr_subprocess_destroy(gpr_subprocess *p) {
}
}
-int gpr_subprocess_join(gpr_subprocess *p) {
+int gpr_subprocess_join(gpr_subprocess* p) {
DWORD dwExitCode;
if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
if (dwExitCode == STILL_ACTIVE) {
@@ -110,7 +110,7 @@ getExitCode:
}
}
-void gpr_subprocess_interrupt(gpr_subprocess *p) {
+void gpr_subprocess_interrupt(gpr_subprocess* p) {
DWORD dwExitCode;
if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
if (dwExitCode == STILL_ACTIVE) {
diff --git a/src/core/lib/support/sync.cc b/src/core/lib/support/sync.cc
index 994dcb0e14..1c051a1ca4 100644
--- a/src/core/lib/support/sync.cc
+++ b/src/core/lib/support/sync.cc
@@ -45,17 +45,17 @@ static void event_initialize(void) {
}
/* Hash ev into an element of sync_array[]. */
-static struct sync_array_s *hash(gpr_event *ev) {
+static struct sync_array_s* hash(gpr_event* ev) {
return &sync_array[((uintptr_t)ev) % event_sync_partitions];
}
-void gpr_event_init(gpr_event *ev) {
+void gpr_event_init(gpr_event* ev) {
gpr_once_init(&event_once, &event_initialize);
ev->state = 0;
}
-void gpr_event_set(gpr_event *ev, void *value) {
- struct sync_array_s *s = hash(ev);
+void gpr_event_set(gpr_event* ev, void* value) {
+ struct sync_array_s* s = hash(ev);
gpr_mu_lock(&s->mu);
GPR_ASSERT(gpr_atm_acq_load(&ev->state) == 0);
gpr_atm_rel_store(&ev->state, (gpr_atm)value);
@@ -64,28 +64,28 @@ void gpr_event_set(gpr_event *ev, void *value) {
GPR_ASSERT(value != NULL);
}
-void *gpr_event_get(gpr_event *ev) {
- return (void *)gpr_atm_acq_load(&ev->state);
+void* gpr_event_get(gpr_event* ev) {
+ return (void*)gpr_atm_acq_load(&ev->state);
}
-void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline) {
- void *result = (void *)gpr_atm_acq_load(&ev->state);
+void* gpr_event_wait(gpr_event* ev, gpr_timespec abs_deadline) {
+ void* result = (void*)gpr_atm_acq_load(&ev->state);
if (result == NULL) {
- struct sync_array_s *s = hash(ev);
+ struct sync_array_s* s = hash(ev);
gpr_mu_lock(&s->mu);
do {
- result = (void *)gpr_atm_acq_load(&ev->state);
+ result = (void*)gpr_atm_acq_load(&ev->state);
} while (result == NULL && !gpr_cv_wait(&s->cv, &s->mu, abs_deadline));
gpr_mu_unlock(&s->mu);
}
return result;
}
-void gpr_ref_init(gpr_refcount *r, int n) { gpr_atm_rel_store(&r->count, n); }
+void gpr_ref_init(gpr_refcount* r, int n) { gpr_atm_rel_store(&r->count, n); }
-void gpr_ref(gpr_refcount *r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); }
+void gpr_ref(gpr_refcount* r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); }
-void gpr_ref_non_zero(gpr_refcount *r) {
+void gpr_ref_non_zero(gpr_refcount* r) {
#ifndef NDEBUG
gpr_atm prior = gpr_atm_no_barrier_fetch_add(&r->count, 1);
assert(prior > 0);
@@ -94,29 +94,29 @@ void gpr_ref_non_zero(gpr_refcount *r) {
#endif
}
-void gpr_refn(gpr_refcount *r, int n) {
+void gpr_refn(gpr_refcount* r, int n) {
gpr_atm_no_barrier_fetch_add(&r->count, n);
}
-int gpr_unref(gpr_refcount *r) {
+int gpr_unref(gpr_refcount* r) {
gpr_atm prior = gpr_atm_full_fetch_add(&r->count, -1);
GPR_ASSERT(prior > 0);
return prior == 1;
}
-int gpr_ref_is_unique(gpr_refcount *r) {
+int gpr_ref_is_unique(gpr_refcount* r) {
return gpr_atm_acq_load(&r->count) == 1;
}
-void gpr_stats_init(gpr_stats_counter *c, intptr_t n) {
+void gpr_stats_init(gpr_stats_counter* c, intptr_t n) {
gpr_atm_rel_store(&c->value, n);
}
-void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc) {
+void gpr_stats_inc(gpr_stats_counter* c, intptr_t inc) {
gpr_atm_no_barrier_fetch_add(&c->value, inc);
}
-intptr_t gpr_stats_read(const gpr_stats_counter *c) {
+intptr_t gpr_stats_read(const gpr_stats_counter* c) {
/* don't need acquire-load, but we have no no-barrier load yet */
return gpr_atm_acq_load(&c->value);
}
diff --git a/src/core/lib/support/sync_windows.cc b/src/core/lib/support/sync_windows.cc
index 62fdd40af7..7cd41633d5 100644
--- a/src/core/lib/support/sync_windows.cc
+++ b/src/core/lib/support/sync_windows.cc
@@ -26,25 +26,25 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
-void gpr_mu_init(gpr_mu *mu) {
+void gpr_mu_init(gpr_mu* mu) {
InitializeCriticalSection(&mu->cs);
mu->locked = 0;
}
-void gpr_mu_destroy(gpr_mu *mu) { DeleteCriticalSection(&mu->cs); }
+void gpr_mu_destroy(gpr_mu* mu) { DeleteCriticalSection(&mu->cs); }
-void gpr_mu_lock(gpr_mu *mu) {
+void gpr_mu_lock(gpr_mu* mu) {
EnterCriticalSection(&mu->cs);
GPR_ASSERT(!mu->locked);
mu->locked = 1;
}
-void gpr_mu_unlock(gpr_mu *mu) {
+void gpr_mu_unlock(gpr_mu* mu) {
mu->locked = 0;
LeaveCriticalSection(&mu->cs);
}
-int gpr_mu_trylock(gpr_mu *mu) {
+int gpr_mu_trylock(gpr_mu* mu) {
int result = TryEnterCriticalSection(&mu->cs);
if (result) {
if (mu->locked) { /* This thread already holds the lock. */
@@ -58,13 +58,13 @@ int gpr_mu_trylock(gpr_mu *mu) {
/*----------------------------------------*/
-void gpr_cv_init(gpr_cv *cv) { InitializeConditionVariable(cv); }
+void gpr_cv_init(gpr_cv* cv) { InitializeConditionVariable(cv); }
-void gpr_cv_destroy(gpr_cv *cv) {
+void gpr_cv_destroy(gpr_cv* cv) {
/* Condition variables don't need destruction in Win32. */
}
-int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
+int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
int timeout = 0;
DWORD timeout_max_ms;
mu->locked = 0;
@@ -93,23 +93,23 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
return timeout;
}
-void gpr_cv_signal(gpr_cv *cv) { WakeConditionVariable(cv); }
+void gpr_cv_signal(gpr_cv* cv) { WakeConditionVariable(cv); }
-void gpr_cv_broadcast(gpr_cv *cv) { WakeAllConditionVariable(cv); }
+void gpr_cv_broadcast(gpr_cv* cv) { WakeAllConditionVariable(cv); }
/*----------------------------------------*/
-static void *dummy;
+static void* dummy;
struct run_once_func_arg {
void (*init_function)(void);
};
-static BOOL CALLBACK run_once_func(gpr_once *once, void *v, void **pv) {
- struct run_once_func_arg *arg = (struct run_once_func_arg *)v;
+static BOOL CALLBACK run_once_func(gpr_once* once, void* v, void** pv) {
+ struct run_once_func_arg* arg = (struct run_once_func_arg*)v;
(*arg->init_function)();
return 1;
}
-void gpr_once_init(gpr_once *once, void (*init_function)(void)) {
+void gpr_once_init(gpr_once* once, void (*init_function)(void)) {
struct run_once_func_arg arg;
arg.init_function = init_function;
InitOnceExecuteOnce(once, run_once_func, &arg, &dummy);
diff --git a/src/core/lib/support/thd_posix.cc b/src/core/lib/support/thd_posix.cc
index 98afd10df7..297714e659 100644
--- a/src/core/lib/support/thd_posix.cc
+++ b/src/core/lib/support/thd_posix.cc
@@ -31,26 +31,26 @@
#include <string.h>
struct thd_arg {
- void (*body)(void *arg); /* body of a thread */
- void *arg; /* argument to a thread */
+ void (*body)(void* arg); /* body of a thread */
+ void* arg; /* argument to a thread */
};
/* Body of every thread started via gpr_thd_new. */
-static void *thread_body(void *v) {
- struct thd_arg a = *(struct thd_arg *)v;
+static void* thread_body(void* v) {
+ struct thd_arg a = *(struct thd_arg*)v;
free(v);
(*a.body)(a.arg);
return NULL;
}
-int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
- const gpr_thd_options *options) {
+int gpr_thd_new(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg,
+ const gpr_thd_options* options) {
int thread_started;
pthread_attr_t attr;
pthread_t p;
/* don't use gpr_malloc as we may cause an infinite recursion with
* the profiling code */
- struct thd_arg *a = (struct thd_arg *)malloc(sizeof(*a));
+ struct thd_arg* a = (struct thd_arg*)malloc(sizeof(*a));
GPR_ASSERT(a != NULL);
a->body = thd_body;
a->arg = arg;
diff --git a/src/core/lib/support/thd_windows.cc b/src/core/lib/support/thd_windows.cc
index 1a82805dd9..5bda7f440c 100644
--- a/src/core/lib/support/thd_windows.cc
+++ b/src/core/lib/support/thd_windows.cc
@@ -36,23 +36,23 @@
#endif
struct thd_info {
- void (*body)(void *arg); /* body of a thread */
- void *arg; /* argument to a thread */
+ void (*body)(void* arg); /* body of a thread */
+ void* arg; /* argument to a thread */
HANDLE join_event; /* if joinable, the join event */
int joinable; /* true if not detached */
};
-static thread_local struct thd_info *g_thd_info;
+static thread_local struct thd_info* g_thd_info;
/* Destroys a thread info */
-static void destroy_thread(struct thd_info *t) {
+static void destroy_thread(struct thd_info* t) {
if (t->joinable) CloseHandle(t->join_event);
gpr_free(t);
}
/* Body of every thread started via gpr_thd_new. */
-static DWORD WINAPI thread_body(void *v) {
- g_thd_info = (struct thd_info *)v;
+static DWORD WINAPI thread_body(void* v) {
+ g_thd_info = (struct thd_info*)v;
g_thd_info->body(g_thd_info->arg);
if (g_thd_info->joinable) {
BOOL ret = SetEvent(g_thd_info->join_event);
@@ -63,10 +63,10 @@ static DWORD WINAPI thread_body(void *v) {
return 0;
}
-int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
- const gpr_thd_options *options) {
+int gpr_thd_new(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg,
+ const gpr_thd_options* options) {
HANDLE handle;
- struct thd_info *info = (struct thd_info *)gpr_malloc(sizeof(*info));
+ struct thd_info* info = (struct thd_info*)gpr_malloc(sizeof(*info));
info->body = thd_body;
info->arg = arg;
*t = 0;
@@ -93,7 +93,7 @@ int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)g_thd_info; }
void gpr_thd_join(gpr_thd_id t) {
- struct thd_info *info = (struct thd_info *)t;
+ struct thd_info* info = (struct thd_info*)t;
DWORD ret = WaitForSingleObject(info->join_event, INFINITE);
GPR_ASSERT(ret == WAIT_OBJECT_0);
destroy_thread(info);
diff --git a/src/core/lib/support/time_posix.cc b/src/core/lib/support/time_posix.cc
index 3f8a9094fd..3674ef7184 100644
--- a/src/core/lib/support/time_posix.cc
+++ b/src/core/lib/support/time_posix.cc
@@ -81,7 +81,7 @@ static gpr_timespec now_impl(gpr_clock_type clock_type) {
}
}
#else
-/* For some reason Apple's OSes haven't implemented clock_gettime. */
+ /* For some reason Apple's OSes haven't implemented clock_gettime. */
#include <mach/mach.h>
#include <mach/mach_time.h>
diff --git a/src/core/lib/support/time_precise.cc b/src/core/lib/support/time_precise.cc
index 05ef7c59bc..b7372df1b8 100644
--- a/src/core/lib/support/time_precise.cc
+++ b/src/core/lib/support/time_precise.cc
@@ -24,7 +24,7 @@
#ifdef GRPC_TIMERS_RDTSC
#if defined(__i386__)
-static void gpr_get_cycle_counter(int64_t int *clk) {
+static void gpr_get_cycle_counter(int64_t int* clk) {
int64_t int ret;
__asm__ volatile("rdtsc" : "=A"(ret));
*clk = ret;
@@ -32,7 +32,7 @@ static void gpr_get_cycle_counter(int64_t int *clk) {
// ----------------------------------------------------------------
#elif defined(__x86_64__) || defined(__amd64__)
-static void gpr_get_cycle_counter(int64_t *clk) {
+static void gpr_get_cycle_counter(int64_t* clk) {
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
*clk = (int64_t)(high << 32) | (int64_t)low;
@@ -56,7 +56,7 @@ void gpr_precise_clock_init(void) {
gpr_log(GPR_DEBUG, "... cycles_per_second = %f\n", cycles_per_second);
}
-void gpr_precise_clock_now(gpr_timespec *clk) {
+void gpr_precise_clock_now(gpr_timespec* clk) {
int64_t counter;
double secs;
gpr_get_cycle_counter(&counter);
@@ -69,7 +69,7 @@ void gpr_precise_clock_now(gpr_timespec *clk) {
#else /* GRPC_TIMERS_RDTSC */
void gpr_precise_clock_init(void) {}
-void gpr_precise_clock_now(gpr_timespec *clk) {
+void gpr_precise_clock_now(gpr_timespec* clk) {
*clk = gpr_now(GPR_CLOCK_REALTIME);
clk->clock_type = GPR_CLOCK_PRECISE;
}
diff --git a/src/core/lib/support/time_precise.h b/src/core/lib/support/time_precise.h
index cb15cdf919..3befda3d86 100644
--- a/src/core/lib/support/time_precise.h
+++ b/src/core/lib/support/time_precise.h
@@ -26,7 +26,7 @@ extern "C" {
#endif
void gpr_precise_clock_init(void);
-void gpr_precise_clock_now(gpr_timespec *clk);
+void gpr_precise_clock_now(gpr_timespec* clk);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/tls_pthread.cc b/src/core/lib/support/tls_pthread.cc
index 9ebee577fe..ebeef2a8c2 100644
--- a/src/core/lib/support/tls_pthread.cc
+++ b/src/core/lib/support/tls_pthread.cc
@@ -22,8 +22,8 @@
#include <grpc/support/tls.h>
-intptr_t gpr_tls_set(struct gpr_pthread_thread_local *tls, intptr_t value) {
- GPR_ASSERT(0 == pthread_setspecific(tls->key, (void *)value));
+intptr_t gpr_tls_set(struct gpr_pthread_thread_local* tls, intptr_t value) {
+ GPR_ASSERT(0 == pthread_setspecific(tls->key, (void*)value));
return value;
}
diff --git a/src/core/lib/support/tmpfile.h b/src/core/lib/support/tmpfile.h
index caa1d0f4d2..437d871786 100644
--- a/src/core/lib/support/tmpfile.h
+++ b/src/core/lib/support/tmpfile.h
@@ -29,7 +29,7 @@ extern "C" {
If tmp_filename is not NULL, *tmp_filename is assigned the name of the
created file and it is the responsibility of the caller to gpr_free it
unless an error occurs in which case it will be set to NULL. */
-FILE *gpr_tmpfile(const char *prefix, char **tmp_filename);
+FILE* gpr_tmpfile(const char* prefix, char** tmp_filename);
#ifdef __cplusplus
}
diff --git a/src/core/lib/support/tmpfile_msys.cc b/src/core/lib/support/tmpfile_msys.cc
index 614c0a4a18..430e866629 100644
--- a/src/core/lib/support/tmpfile_msys.cc
+++ b/src/core/lib/support/tmpfile_msys.cc
@@ -32,8 +32,8 @@
#include "src/core/lib/support/string_windows.h"
#include "src/core/lib/support/tmpfile.h"
-FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) {
- FILE *result = NULL;
+FILE* gpr_tmpfile(const char* prefix, char** tmp_filename_out) {
+ FILE* result = NULL;
char tmp_filename[MAX_PATH];
UINT success;
diff --git a/src/core/lib/support/tmpfile_posix.cc b/src/core/lib/support/tmpfile_posix.cc
index 7ad3af0a57..2e14d28598 100644
--- a/src/core/lib/support/tmpfile_posix.cc
+++ b/src/core/lib/support/tmpfile_posix.cc
@@ -33,9 +33,9 @@
#include "src/core/lib/support/string.h"
-FILE *gpr_tmpfile(const char *prefix, char **tmp_filename) {
- FILE *result = NULL;
- char *filename_template;
+FILE* gpr_tmpfile(const char* prefix, char** tmp_filename) {
+ FILE* result = NULL;
+ char* filename_template;
int fd;
if (tmp_filename != NULL) *tmp_filename = NULL;
diff --git a/src/core/lib/support/tmpfile_windows.cc b/src/core/lib/support/tmpfile_windows.cc
index 47b4510a72..2b10bcde43 100644
--- a/src/core/lib/support/tmpfile_windows.cc
+++ b/src/core/lib/support/tmpfile_windows.cc
@@ -32,8 +32,8 @@
#include "src/core/lib/support/string_windows.h"
#include "src/core/lib/support/tmpfile.h"
-FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) {
- FILE *result = NULL;
+FILE* gpr_tmpfile(const char* prefix, char** tmp_filename_out) {
+ FILE* result = NULL;
LPTSTR template_string = NULL;
TCHAR tmp_path[MAX_PATH];
TCHAR tmp_filename[MAX_PATH];
diff --git a/src/core/lib/support/wrap_memcpy.cc b/src/core/lib/support/wrap_memcpy.cc
index c2362bf5b8..9b8608e056 100644
--- a/src/core/lib/support/wrap_memcpy.cc
+++ b/src/core/lib/support/wrap_memcpy.cc
@@ -30,11 +30,11 @@ extern "C" {
#ifdef __linux__
#if defined(__x86_64__) && !defined(GPR_MUSL_LIBC_COMPAT)
__asm__(".symver memcpy,memcpy@GLIBC_2.2.5");
-void *__wrap_memcpy(void *destination, const void *source, size_t num) {
+void* __wrap_memcpy(void* destination, const void* source, size_t num) {
return memcpy(destination, source, num);
}
#else /* !__x86_64__ */
-void *__wrap_memcpy(void *destination, const void *source, size_t num) {
+void* __wrap_memcpy(void* destination, const void* source, size_t num) {
return memmove(destination, source, num);
}
#endif
diff --git a/src/core/lib/surface/alarm.cc b/src/core/lib/surface/alarm.cc
index 16a16bfd93..bc38692ec8 100644
--- a/src/core/lib/surface/alarm.cc
+++ b/src/core/lib/surface/alarm.cc
@@ -38,14 +38,14 @@ struct grpc_alarm {
grpc_closure on_alarm;
grpc_cq_completion completion;
/** completion queue where events about this alarm will be posted */
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
/** user supplied tag */
- void *tag;
+ void* tag;
};
-static void alarm_ref(grpc_alarm *alarm) { gpr_ref(&alarm->refs); }
+static void alarm_ref(grpc_alarm* alarm) { gpr_ref(&alarm->refs); }
-static void alarm_unref(grpc_alarm *alarm) {
+static void alarm_unref(grpc_alarm* alarm) {
if (gpr_unref(&alarm->refs)) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (alarm->cq != NULL) {
@@ -57,8 +57,8 @@ static void alarm_unref(grpc_alarm *alarm) {
}
#ifndef NDEBUG
-static void alarm_ref_dbg(grpc_alarm *alarm, const char *reason,
- const char *file, int line) {
+static void alarm_ref_dbg(grpc_alarm* alarm, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&alarm->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -69,8 +69,8 @@ static void alarm_ref_dbg(grpc_alarm *alarm, const char *reason,
alarm_ref(alarm);
}
-static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
- const char *file, int line) {
+static void alarm_unref_dbg(grpc_alarm* alarm, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&alarm->refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -82,25 +82,25 @@ static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
}
#endif
-static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_cq_completion *c) {
- grpc_alarm *alarm = (grpc_alarm *)arg;
+static void alarm_end_completion(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_cq_completion* c) {
+ grpc_alarm* alarm = (grpc_alarm*)arg;
GRPC_ALARM_UNREF(alarm, "dequeue-end-op");
}
-static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- grpc_alarm *alarm = (grpc_alarm *)arg;
+static void alarm_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ grpc_alarm* alarm = (grpc_alarm*)arg;
/* We are queuing an op on completion queue. This means, the alarm's structure
cannot be destroyed until the op is dequeued. Adding an extra ref
here and unref'ing when the op is dequeued will achieve this */
GRPC_ALARM_REF(alarm, "queue-end-op");
grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, error, alarm_end_completion,
- (void *)alarm, &alarm->completion);
+ (void*)alarm, &alarm->completion);
}
-grpc_alarm *grpc_alarm_create(void *reserved) {
- grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm));
+grpc_alarm* grpc_alarm_create(void* reserved) {
+ grpc_alarm* alarm = (grpc_alarm*)gpr_malloc(sizeof(grpc_alarm));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
@@ -116,8 +116,8 @@ grpc_alarm *grpc_alarm_create(void *reserved) {
return alarm;
}
-void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
- gpr_timespec deadline, void *tag, void *reserved) {
+void grpc_alarm_set(grpc_alarm* alarm, grpc_completion_queue* cq,
+ gpr_timespec deadline, void* tag, void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_CQ_INTERNAL_REF(cq, "alarm");
@@ -130,13 +130,13 @@ void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved) {
+void grpc_alarm_cancel(grpc_alarm* alarm, void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_timer_cancel(&exec_ctx, &alarm->alarm);
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved) {
+void grpc_alarm_destroy(grpc_alarm* alarm, void* reserved) {
grpc_alarm_cancel(alarm, reserved);
GRPC_ALARM_UNREF(alarm, "alarm_destroy");
}
diff --git a/src/core/lib/surface/byte_buffer.cc b/src/core/lib/surface/byte_buffer.cc
index 7ed550ef87..a640f782a7 100644
--- a/src/core/lib/surface/byte_buffer.cc
+++ b/src/core/lib/surface/byte_buffer.cc
@@ -22,18 +22,18 @@
#include "src/core/lib/slice/slice_internal.h"
-grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
+grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slices,
size_t nslices) {
return grpc_raw_compressed_byte_buffer_create(slices, nslices,
GRPC_COMPRESS_NONE);
}
-grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
- grpc_slice *slices, size_t nslices,
+grpc_byte_buffer* grpc_raw_compressed_byte_buffer_create(
+ grpc_slice* slices, size_t nslices,
grpc_compression_algorithm compression) {
size_t i;
- grpc_byte_buffer *bb =
- (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
+ grpc_byte_buffer* bb =
+ (grpc_byte_buffer*)gpr_malloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = compression;
grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
@@ -44,10 +44,10 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
return bb;
}
-grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
- grpc_byte_buffer_reader *reader) {
- grpc_byte_buffer *bb =
- (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer));
+grpc_byte_buffer* grpc_raw_byte_buffer_from_reader(
+ grpc_byte_buffer_reader* reader) {
+ grpc_byte_buffer* bb =
+ (grpc_byte_buffer*)gpr_malloc(sizeof(grpc_byte_buffer));
grpc_slice slice;
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = GRPC_COMPRESS_NONE;
@@ -59,7 +59,7 @@ grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
return bb;
}
-grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
+grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb) {
switch (bb->type) {
case GRPC_BB_RAW:
return grpc_raw_compressed_byte_buffer_create(
@@ -69,7 +69,7 @@ grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
GPR_UNREACHABLE_CODE(return NULL);
}
-void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
+void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
if (!bb) return;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
switch (bb->type) {
@@ -81,7 +81,7 @@ void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
grpc_exec_ctx_finish(&exec_ctx);
}
-size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) {
+size_t grpc_byte_buffer_length(grpc_byte_buffer* bb) {
switch (bb->type) {
case GRPC_BB_RAW:
return bb->data.raw.slice_buffer.length;
diff --git a/src/core/lib/surface/byte_buffer_reader.cc b/src/core/lib/surface/byte_buffer_reader.cc
index 87bd3239c0..001227a2aa 100644
--- a/src/core/lib/surface/byte_buffer_reader.cc
+++ b/src/core/lib/surface/byte_buffer_reader.cc
@@ -29,7 +29,7 @@
#include "src/core/lib/compression/message_compress.h"
#include "src/core/lib/slice/slice_internal.h"
-static int is_compressed(grpc_byte_buffer *buffer) {
+static int is_compressed(grpc_byte_buffer* buffer) {
switch (buffer->type) {
case GRPC_BB_RAW:
if (buffer->data.raw.compression == GRPC_COMPRESS_NONE) {
@@ -40,8 +40,8 @@ static int is_compressed(grpc_byte_buffer *buffer) {
return 1 /* GPR_TRUE */;
}
-int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
- grpc_byte_buffer *buffer) {
+int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+ grpc_byte_buffer* buffer) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_slice_buffer decompressed_slices_buffer;
reader->buffer_in = buffer;
@@ -76,7 +76,7 @@ int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
return 1;
}
-void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) {
+void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader) {
switch (reader->buffer_in->type) {
case GRPC_BB_RAW:
/* keeping the same if-else structure as in the init function */
@@ -87,11 +87,11 @@ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) {
}
}
-int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
- grpc_slice *slice) {
+int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
+ grpc_slice* slice) {
switch (reader->buffer_in->type) {
case GRPC_BB_RAW: {
- grpc_slice_buffer *slice_buffer;
+ grpc_slice_buffer* slice_buffer;
slice_buffer = &reader->buffer_out->data.raw.slice_buffer;
if (reader->current.index < slice_buffer->count) {
*slice = grpc_slice_ref_internal(
@@ -105,12 +105,12 @@ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
return 0;
}
-grpc_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) {
+grpc_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader* reader) {
grpc_slice in_slice;
size_t bytes_read = 0;
const size_t input_size = grpc_byte_buffer_length(reader->buffer_out);
grpc_slice out_slice = GRPC_SLICE_MALLOC(input_size);
- uint8_t *const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */
+ uint8_t* const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) {
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index 8216aa0ec8..9fd4fdbef9 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -86,7 +86,7 @@ typedef enum {
typedef struct {
bool is_set;
- grpc_error *error;
+ grpc_error* error;
} received_status;
static gpr_atm pack_received_status(received_status r) {
@@ -97,14 +97,14 @@ static received_status unpack_received_status(gpr_atm atm) {
if ((atm & 1) == 0) {
return {false, GRPC_ERROR_NONE};
} else {
- return {true, (grpc_error *)(atm & ~(gpr_atm)1)};
+ return {true, (grpc_error*)(atm & ~(gpr_atm)1)};
}
}
#define MAX_ERRORS_PER_BATCH 4
typedef struct batch_control {
- grpc_call *call;
+ grpc_call* call;
/* Share memory for cq_completion and notify_tag as they are never needed
simultaneously. Each byte used in this data structure count as six bytes
per call, so any savings we can make are worthwhile,
@@ -120,7 +120,7 @@ typedef struct batch_control {
\a is_closure is true, \a tag indicates a closure to be invoked;
otherwise, \a tag indicates the tag to be used in the notification to
be sent to the completion queue. */
- void *tag;
+ void* tag;
bool is_closure;
} notify_tag;
} completion_data;
@@ -128,7 +128,7 @@ typedef struct batch_control {
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
- grpc_error *errors[MAX_ERRORS_PER_BATCH];
+ grpc_error* errors[MAX_ERRORS_PER_BATCH];
gpr_atm num_errors;
grpc_transport_stream_op_batch op;
@@ -136,16 +136,16 @@ typedef struct batch_control {
typedef struct {
gpr_mu child_list_mu;
- grpc_call *first_child;
+ grpc_call* first_child;
} parent_call;
typedef struct {
- grpc_call *parent;
+ grpc_call* parent;
/** siblings: children of the same parent form a list, and this list is
protected under
parent->mu */
- grpc_call *sibling_next;
- grpc_call *sibling_prev;
+ grpc_call* sibling_next;
+ grpc_call* sibling_prev;
} child_call;
#define RECV_NONE ((gpr_atm)0)
@@ -153,14 +153,14 @@ typedef struct {
struct grpc_call {
gpr_refcount ext_ref;
- gpr_arena *arena;
+ gpr_arena* arena;
grpc_call_combiner call_combiner;
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
grpc_polling_entity pollent;
- grpc_channel *channel;
+ grpc_channel* channel;
gpr_timespec start_time;
/* parent_call* */ gpr_atm parent_call_atm;
- child_call *child;
+ child_call* child;
/* client or server call */
bool is_client;
@@ -178,7 +178,7 @@ struct grpc_call {
gpr_atm any_ops_sent_atm;
gpr_atm received_final_op_atm;
- batch_control *active_batches[MAX_CONCURRENT_BATCHES];
+ batch_control* active_batches[MAX_CONCURRENT_BATCHES];
grpc_transport_stream_op_batch_payload stream_op_payload;
/* first idx: is_receiving, second idx: is_trailing */
@@ -186,7 +186,7 @@ struct grpc_call {
/* Buffered read metadata waiting to be returned to the application.
Element 0 is initial metadata, element 1 is trailing metadata. */
- grpc_metadata_array *buffered_metadata[2];
+ grpc_metadata_array* buffered_metadata[2];
grpc_metadata compression_md;
@@ -220,8 +220,8 @@ struct grpc_call {
grpc_slice_buffer_stream sending_stream;
- grpc_byte_stream *receiving_stream;
- grpc_byte_buffer **receiving_buffer;
+ grpc_byte_stream* receiving_stream;
+ grpc_byte_buffer** receiving_buffer;
grpc_slice receiving_slice;
grpc_closure receiving_slice_ready;
grpc_closure receiving_stream_ready;
@@ -232,11 +232,11 @@ struct grpc_call {
union {
struct {
- grpc_status_code *status;
- grpc_slice *status_details;
+ grpc_status_code* status;
+ grpc_slice* status_details;
} client;
struct {
- int *cancelled;
+ int* cancelled;
} server;
} final_op;
@@ -264,78 +264,78 @@ grpc_tracer_flag grpc_call_error_trace =
grpc_tracer_flag grpc_compression_trace =
GRPC_TRACER_INITIALIZER(false, "compression");
-#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1))
-#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call *)(call_stack)) - 1)
+#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack*)((call) + 1))
+#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call*)(call_stack)) - 1)
#define CALL_ELEM_FROM_CALL(call, idx) \
grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx)
#define CALL_FROM_TOP_ELEM(top_elem) \
CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
-static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *op,
- grpc_closure *start_batch_closure);
-static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+static void execute_batch(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ grpc_transport_stream_op_batch* op,
+ grpc_closure* start_batch_closure);
+static void cancel_with_status(grpc_exec_ctx* exec_ctx, grpc_call* c,
status_source source, grpc_status_code status,
- const char *description);
-static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
- status_source source, grpc_error *error);
-static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack,
- grpc_error *error);
-static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
- grpc_error *error);
-static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
+ const char* description);
+static void cancel_with_error(grpc_exec_ctx* exec_ctx, grpc_call* c,
+ status_source source, grpc_error* error);
+static void destroy_call(grpc_exec_ctx* exec_ctx, void* call_stack,
+ grpc_error* error);
+static void receiving_slice_ready(grpc_exec_ctx* exec_ctx, void* bctlp,
+ grpc_error* error);
+static void get_final_status(grpc_exec_ctx* exec_ctx, grpc_call* call,
void (*set_value)(grpc_status_code code,
- void *user_data),
- void *set_value_user_data, grpc_slice *details);
-static void set_status_value_directly(grpc_status_code status, void *dest);
-static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
- status_source source, grpc_error *error);
-static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl);
-static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl);
-static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
- grpc_error *error, bool has_cancelled);
-
-static void add_init_error(grpc_error **composite, grpc_error *new_err) {
+ void* user_data),
+ void* set_value_user_data, grpc_slice* details);
+static void set_status_value_directly(grpc_status_code status, void* dest);
+static void set_status_from_error(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ status_source source, grpc_error* error);
+static void process_data_after_md(grpc_exec_ctx* exec_ctx, batch_control* bctl);
+static void post_batch_completion(grpc_exec_ctx* exec_ctx, batch_control* bctl);
+static void add_batch_error(grpc_exec_ctx* exec_ctx, batch_control* bctl,
+ grpc_error* error, bool has_cancelled);
+
+static void add_init_error(grpc_error** composite, grpc_error* new_err) {
if (new_err == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
*composite = grpc_error_add_child(*composite, new_err);
}
-void *grpc_call_arena_alloc(grpc_call *call, size_t size) {
+void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
return gpr_arena_alloc(call->arena, size);
}
-static parent_call *get_or_create_parent_call(grpc_call *call) {
- parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
+static parent_call* get_or_create_parent_call(grpc_call* call) {
+ parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
if (p == NULL) {
- p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p));
+ p = (parent_call*)gpr_arena_alloc(call->arena, sizeof(*p));
gpr_mu_init(&p->child_list_mu);
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
gpr_mu_destroy(&p->child_list_mu);
- p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
+ p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
}
}
return p;
}
-static parent_call *get_parent_call(grpc_call *call) {
- return (parent_call *)gpr_atm_acq_load(&call->parent_call_atm);
+static parent_call* get_parent_call(grpc_call* call) {
+ return (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
}
-grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
- const grpc_call_create_args *args,
- grpc_call **out_call) {
+grpc_error* grpc_call_create(grpc_exec_ctx* exec_ctx,
+ const grpc_call_create_args* args,
+ grpc_call** out_call) {
size_t i, j;
- grpc_error *error = GRPC_ERROR_NONE;
- grpc_channel_stack *channel_stack =
+ grpc_error* error = GRPC_ERROR_NONE;
+ grpc_channel_stack* channel_stack =
grpc_channel_get_channel_stack(args->channel);
- grpc_call *call;
+ grpc_call* call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
- gpr_arena *arena = gpr_arena_create(initial_size);
- call = (grpc_call *)gpr_arena_alloc(
+ gpr_arena* arena = gpr_arena_create(initial_size);
+ call = (grpc_call*)gpr_arena_alloc(
arena, sizeof(grpc_call) + channel_stack->call_stack_size);
gpr_ref_init(&call->ext_ref, 1);
call->arena = arena;
@@ -380,15 +380,15 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
bool immediately_cancel = false;
if (args->parent != NULL) {
- child_call *cc = call->child =
- (child_call *)gpr_arena_alloc(arena, sizeof(child_call));
+ child_call* cc = call->child =
+ (child_call*)gpr_arena_alloc(arena, sizeof(child_call));
call->child->parent = args->parent;
GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
GPR_ASSERT(!args->parent->is_client);
- parent_call *pc = get_or_create_parent_call(args->parent);
+ parent_call* pc = get_or_create_parent_call(args->parent);
gpr_mu_lock(&pc->child_list_mu);
@@ -478,8 +478,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
return error;
}
-void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_completion_queue *cq) {
+void grpc_call_set_completion_queue(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ grpc_completion_queue* cq) {
GPR_ASSERT(cq);
if (grpc_polling_entity_pollset_set(&call->pollent) != NULL) {
@@ -495,34 +495,34 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
#ifndef NDEBUG
#define REF_REASON reason
-#define REF_ARG , const char *reason
+#define REF_ARG , const char* reason
#else
#define REF_REASON ""
#define REF_ARG
#endif
-void grpc_call_internal_ref(grpc_call *c REF_ARG) {
+void grpc_call_internal_ref(grpc_call* c REF_ARG) {
GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON);
}
-void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
+void grpc_call_internal_unref(grpc_exec_ctx* exec_ctx, grpc_call* c REF_ARG) {
GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
-static void release_call(grpc_exec_ctx *exec_ctx, void *call,
- grpc_error *error) {
- grpc_call *c = (grpc_call *)call;
- grpc_channel *channel = c->channel;
+static void release_call(grpc_exec_ctx* exec_ctx, void* call,
+ grpc_error* error) {
+ grpc_call* c = (grpc_call*)call;
+ grpc_channel* channel = c->channel;
grpc_call_combiner_destroy(&c->call_combiner);
- gpr_free((char *)c->peer_string);
+ gpr_free((char*)c->peer_string);
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
}
-static void set_status_value_directly(grpc_status_code status, void *dest);
-static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
- grpc_error *error) {
+static void set_status_value_directly(grpc_status_code status, void* dest);
+static void destroy_call(grpc_exec_ctx* exec_ctx, void* call,
+ grpc_error* error) {
size_t i;
int ii;
- grpc_call *c = (grpc_call *)call;
+ grpc_call* c = (grpc_call*)call;
GPR_TIMER_BEGIN("destroy_call", 0);
for (i = 0; i < 2; i++) {
grpc_metadata_batch_destroy(
@@ -531,7 +531,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
if (c->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
}
- parent_call *pc = get_parent_call(c);
+ parent_call* pc = get_parent_call(c);
if (pc != NULL) {
gpr_mu_destroy(&pc->child_list_mu);
}
@@ -563,19 +563,19 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
GPR_TIMER_END("destroy_call", 0);
}
-void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); }
+void grpc_call_ref(grpc_call* c) { gpr_ref(&c->ext_ref); }
-void grpc_call_unref(grpc_call *c) {
+void grpc_call_unref(grpc_call* c) {
if (!gpr_unref(&c->ext_ref)) return;
- child_call *cc = c->child;
+ child_call* cc = c->child;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_unref", 0);
GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
if (cc) {
- parent_call *pc = get_parent_call(cc->parent);
+ parent_call* pc = get_parent_call(cc->parent);
gpr_mu_lock(&pc->child_list_mu);
if (c == pc->first_child) {
pc->first_child = cc->sibling_next;
@@ -608,7 +608,7 @@ void grpc_call_unref(grpc_call *c) {
GPR_TIMER_END("grpc_call_unref", 0);
}
-grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
+grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
GPR_ASSERT(!reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -620,12 +620,12 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
// This is called via the call combiner to start sending a batch down
// the filter stack.
-static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *ignored) {
- grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg;
- grpc_call *call = (grpc_call *)batch->handler_private.extra_arg;
+static void execute_batch_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* ignored) {
+ grpc_transport_stream_op_batch* batch = (grpc_transport_stream_op_batch*)arg;
+ grpc_call* call = (grpc_call*)batch->handler_private.extra_arg;
GPR_TIMER_BEGIN("execute_batch", 0);
- grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
+ grpc_call_element* elem = CALL_ELEM_FROM_CALL(call, 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
GPR_TIMER_END("execute_batch", 0);
@@ -633,9 +633,9 @@ static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg,
// start_batch_closure points to a caller-allocated closure to be used
// for entering the call combiner.
-static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_transport_stream_op_batch *batch,
- grpc_closure *start_batch_closure) {
+static void execute_batch(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ grpc_transport_stream_op_batch* batch,
+ grpc_closure* start_batch_closure) {
batch->handler_private.extra_arg = call;
GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
grpc_schedule_on_exec_ctx);
@@ -643,15 +643,15 @@ static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call,
GRPC_ERROR_NONE, "executing batch");
}
-char *grpc_call_get_peer(grpc_call *call) {
- char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string);
+char* grpc_call_get_peer(grpc_call* call) {
+ char* peer_string = (char*)gpr_atm_acq_load(&call->peer_string);
if (peer_string != NULL) return gpr_strdup(peer_string);
peer_string = grpc_channel_get_target(call->channel);
if (peer_string != NULL) return peer_string;
return gpr_strdup("unknown");
}
-grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
+grpc_call* grpc_call_from_top_element(grpc_call_element* elem) {
return CALL_FROM_TOP_ELEM(elem);
}
@@ -659,10 +659,10 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
* CANCELLATION
*/
-grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
+grpc_call_error grpc_call_cancel_with_status(grpc_call* c,
grpc_status_code status,
- const char *description,
- void *reserved) {
+ const char* description,
+ void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE(
"grpc_call_cancel_with_status("
@@ -676,24 +676,24 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
}
typedef struct {
- grpc_call *call;
+ grpc_call* call;
grpc_closure start_batch;
grpc_closure finish_batch;
} cancel_state;
// The on_complete callback used when sending a cancel_stream batch down
// the filter stack. Yields the call combiner when the batch is done.
-static void done_termination(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- cancel_state *state = (cancel_state *)arg;
+static void done_termination(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ cancel_state* state = (cancel_state*)arg;
GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
"on_complete for cancel_stream op");
GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
gpr_free(state);
}
-static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
- status_source source, grpc_error *error) {
+static void cancel_with_error(grpc_exec_ctx* exec_ctx, grpc_call* c,
+ status_source source, grpc_error* error) {
GRPC_CALL_INTERNAL_REF(c, "termination");
// Inform the call combiner of the cancellation, so that it can cancel
// any in-flight asynchronous actions that may be holding the call
@@ -701,19 +701,19 @@ static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
// down the filter stack in a timely manner.
grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
- cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state));
+ cancel_state* state = (cancel_state*)gpr_malloc(sizeof(*state));
state->call = c;
GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
grpc_schedule_on_exec_ctx);
- grpc_transport_stream_op_batch *op =
+ grpc_transport_stream_op_batch* op =
grpc_make_transport_stream_op(&state->finish_batch);
op->cancel_stream = true;
op->payload->cancel_stream.cancel_error = error;
execute_batch(exec_ctx, c, op, &state->start_batch);
}
-static grpc_error *error_from_status(grpc_status_code status,
- const char *description) {
+static grpc_error* error_from_status(grpc_status_code status,
+ const char* description) {
// copying 'description' is needed to ensure the grpc_call_cancel_with_status
// guarantee that can be short-lived.
return grpc_error_set_int(
@@ -723,9 +723,9 @@ static grpc_error *error_from_status(grpc_status_code status,
GRPC_ERROR_INT_GRPC_STATUS, status);
}
-static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
+static void cancel_with_status(grpc_exec_ctx* exec_ctx, grpc_call* c,
status_source source, grpc_status_code status,
- const char *description) {
+ const char* description) {
cancel_with_error(exec_ctx, c, source,
error_from_status(status, description));
}
@@ -734,12 +734,12 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c,
* FINAL STATUS CODE MANIPULATION
*/
-static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_error *error, bool allow_ok_status,
+static bool get_final_status_from(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ grpc_error* error, bool allow_ok_status,
void (*set_value)(grpc_status_code code,
- void *user_data),
- void *set_value_user_data,
- grpc_slice *details) {
+ void* user_data),
+ void* set_value_user_data,
+ grpc_slice* details) {
grpc_status_code code;
grpc_slice slice = grpc_empty_slice();
grpc_error_get_status(exec_ctx, error, call->send_deadline, &code, &slice,
@@ -755,10 +755,10 @@ static bool get_final_status_from(grpc_exec_ctx *exec_ctx, grpc_call *call,
return true;
}
-static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
+static void get_final_status(grpc_exec_ctx* exec_ctx, grpc_call* call,
void (*set_value)(grpc_status_code code,
- void *user_data),
- void *set_value_user_data, grpc_slice *details) {
+ void* user_data),
+ void* set_value_user_data, grpc_slice* details) {
int i;
received_status status[STATUS_SOURCE_COUNT];
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
@@ -806,8 +806,8 @@ static void get_final_status(grpc_exec_ctx *exec_ctx, grpc_call *call,
}
}
-static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
- status_source source, grpc_error *error) {
+static void set_status_from_error(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ status_source source, grpc_error* error) {
if (!gpr_atm_rel_cas(&call->status[source],
pack_received_status({false, GRPC_ERROR_NONE}),
pack_received_status({true, error}))) {
@@ -820,52 +820,52 @@ static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call,
*/
static void set_incoming_compression_algorithm(
- grpc_call *call, grpc_compression_algorithm algo) {
+ grpc_call* call, grpc_compression_algorithm algo) {
GPR_ASSERT(algo < GRPC_COMPRESS_ALGORITHMS_COUNT);
call->incoming_compression_algorithm = algo;
}
static void set_incoming_stream_compression_algorithm(
- grpc_call *call, grpc_stream_compression_algorithm algo) {
+ grpc_call* call, grpc_stream_compression_algorithm algo) {
GPR_ASSERT(algo < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT);
call->incoming_stream_compression_algorithm = algo;
}
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
- grpc_call *call) {
+ grpc_call* call) {
grpc_compression_algorithm algorithm;
algorithm = call->incoming_compression_algorithm;
return algorithm;
}
static grpc_compression_algorithm compression_algorithm_for_level_locked(
- grpc_call *call, grpc_compression_level level) {
+ grpc_call* call, grpc_compression_level level) {
return grpc_compression_algorithm_for_level(level,
call->encodings_accepted_by_peer);
}
static grpc_stream_compression_algorithm
stream_compression_algorithm_for_level_locked(
- grpc_call *call, grpc_stream_compression_level level) {
+ grpc_call* call, grpc_stream_compression_level level) {
return grpc_stream_compression_algorithm_for_level(
level, call->stream_encodings_accepted_by_peer);
}
-uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) {
+uint32_t grpc_call_test_only_get_message_flags(grpc_call* call) {
uint32_t flags;
flags = call->test_only_last_message_flags;
return flags;
}
-static void destroy_encodings_accepted_by_peer(void *p) { return; }
+static void destroy_encodings_accepted_by_peer(void* p) { return; }
-static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
- grpc_call *call, grpc_mdelem mdel) {
+static void set_encodings_accepted_by_peer(grpc_exec_ctx* exec_ctx,
+ grpc_call* call, grpc_mdelem mdel) {
size_t i;
grpc_compression_algorithm algorithm;
grpc_slice_buffer accept_encoding_parts;
grpc_slice accept_encoding_slice;
- void *accepted_user_data;
+ void* accepted_user_data;
accepted_user_data =
grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
@@ -889,7 +889,7 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
&algorithm)) {
GPR_BITSET(&call->encodings_accepted_by_peer, algorithm);
} else {
- char *accept_encoding_entry_str =
+ char* accept_encoding_entry_str =
grpc_slice_to_c_string(accept_encoding_entry_slice);
gpr_log(GPR_ERROR,
"Invalid entry in accept encoding metadata: '%s'. Ignoring.",
@@ -902,17 +902,17 @@ static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
grpc_mdelem_set_user_data(
mdel, destroy_encodings_accepted_by_peer,
- (void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
+ (void*)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
}
-static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
- grpc_call *call,
+static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx* exec_ctx,
+ grpc_call* call,
grpc_mdelem mdel) {
size_t i;
grpc_stream_compression_algorithm algorithm;
grpc_slice_buffer accept_encoding_parts;
grpc_slice accept_encoding_slice;
- void *accepted_user_data;
+ void* accepted_user_data;
accepted_user_data =
grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
@@ -935,7 +935,7 @@ static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
&algorithm)) {
GPR_BITSET(&call->stream_encodings_accepted_by_peer, algorithm);
} else {
- char *accept_encoding_entry_str =
+ char* accept_encoding_entry_str =
grpc_slice_to_c_string(accept_encoding_entry_slice);
gpr_log(GPR_ERROR,
"Invalid entry in accept encoding metadata: '%s'. Ignoring.",
@@ -948,52 +948,52 @@ static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx,
grpc_mdelem_set_user_data(
mdel, destroy_encodings_accepted_by_peer,
- (void *)(((uintptr_t)call->stream_encodings_accepted_by_peer) + 1));
+ (void*)(((uintptr_t)call->stream_encodings_accepted_by_peer) + 1));
}
-uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
+uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call* call) {
uint32_t encodings_accepted_by_peer;
encodings_accepted_by_peer = call->encodings_accepted_by_peer;
return encodings_accepted_by_peer;
}
uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer(
- grpc_call *call) {
+ grpc_call* call) {
uint32_t stream_encodings_accepted_by_peer;
stream_encodings_accepted_by_peer = call->stream_encodings_accepted_by_peer;
return stream_encodings_accepted_by_peer;
}
grpc_stream_compression_algorithm
-grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call) {
+grpc_call_test_only_get_incoming_stream_encodings(grpc_call* call) {
return call->incoming_stream_compression_algorithm;
}
-static grpc_linked_mdelem *linked_from_md(const grpc_metadata *md) {
- return (grpc_linked_mdelem *)&md->internal_data;
+static grpc_linked_mdelem* linked_from_md(const grpc_metadata* md) {
+ return (grpc_linked_mdelem*)&md->internal_data;
}
-static grpc_metadata *get_md_elem(grpc_metadata *metadata,
- grpc_metadata *additional_metadata, int i,
+static grpc_metadata* get_md_elem(grpc_metadata* metadata,
+ grpc_metadata* additional_metadata, int i,
int count) {
- grpc_metadata *res =
+ grpc_metadata* res =
i < count ? &metadata[i] : &additional_metadata[i - count];
GPR_ASSERT(res);
return res;
}
static int prepare_application_metadata(
- grpc_exec_ctx *exec_ctx, grpc_call *call, int count,
- grpc_metadata *metadata, int is_trailing, int prepend_extra_metadata,
- grpc_metadata *additional_metadata, int additional_metadata_count) {
+ grpc_exec_ctx* exec_ctx, grpc_call* call, int count,
+ grpc_metadata* metadata, int is_trailing, int prepend_extra_metadata,
+ grpc_metadata* additional_metadata, int additional_metadata_count) {
int total_count = count + additional_metadata_count;
int i;
- grpc_metadata_batch *batch =
+ grpc_metadata_batch* batch =
&call->metadata_batch[0 /* is_receiving */][is_trailing];
for (i = 0; i < total_count; i++) {
- const grpc_metadata *md =
+ const grpc_metadata* md =
get_md_elem(metadata, additional_metadata, i, count);
- grpc_linked_mdelem *l = linked_from_md(md);
+ grpc_linked_mdelem* l = linked_from_md(md);
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
if (!GRPC_LOG_IF_ERROR("validate_metadata",
grpc_validate_header_key_is_legal(md->key))) {
@@ -1004,13 +1004,13 @@ static int prepare_application_metadata(
grpc_validate_header_nonbin_value_is_legal(md->value))) {
break;
}
- l->md = grpc_mdelem_from_grpc_metadata(exec_ctx, (grpc_metadata *)md);
+ l->md = grpc_mdelem_from_grpc_metadata(exec_ctx, (grpc_metadata*)md);
}
if (i != total_count) {
for (int j = 0; j < i; j++) {
- const grpc_metadata *md =
+ const grpc_metadata* md =
get_md_elem(metadata, additional_metadata, j, count);
- grpc_linked_mdelem *l = linked_from_md(md);
+ grpc_linked_mdelem* l = linked_from_md(md);
GRPC_MDELEM_UNREF(exec_ctx, l->md);
}
return 0;
@@ -1027,9 +1027,9 @@ static int prepare_application_metadata(
}
}
for (i = 0; i < total_count; i++) {
- grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count);
- grpc_linked_mdelem *l = linked_from_md(md);
- grpc_error *error = grpc_metadata_batch_link_tail(exec_ctx, batch, l);
+ grpc_metadata* md = get_md_elem(metadata, additional_metadata, i, count);
+ grpc_linked_mdelem* l = linked_from_md(md);
+ grpc_error* error = grpc_metadata_batch_link_tail(exec_ctx, batch, l);
if (error != GRPC_ERROR_NONE) {
GRPC_MDELEM_UNREF(exec_ctx, l->md);
}
@@ -1044,11 +1044,11 @@ static int prepare_application_metadata(
as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
*/
#define STATUS_OFFSET 1
-static void destroy_status(void *ignored) {}
+static void destroy_status(void* ignored) {}
static uint32_t decode_status(grpc_mdelem md) {
uint32_t status;
- void *user_data;
+ void* user_data;
if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) return 0;
if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) return 1;
if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) return 2;
@@ -1060,7 +1060,7 @@ static uint32_t decode_status(grpc_mdelem md) {
status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
}
grpc_mdelem_set_user_data(md, destroy_status,
- (void *)(intptr_t)(status + STATUS_OFFSET));
+ (void*)(intptr_t)(status + STATUS_OFFSET));
}
return status;
}
@@ -1069,7 +1069,7 @@ static grpc_compression_algorithm decode_compression(grpc_mdelem md) {
grpc_compression_algorithm algorithm =
grpc_compression_algorithm_from_slice(GRPC_MDVALUE(md));
if (algorithm == GRPC_COMPRESS_ALGORITHMS_COUNT) {
- char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid incoming compression algorithm: '%s'. Interpreting "
"incoming data as uncompressed.",
@@ -1085,7 +1085,7 @@ static grpc_stream_compression_algorithm decode_stream_compression(
grpc_stream_compression_algorithm algorithm =
grpc_stream_compression_algorithm_from_slice(GRPC_MDVALUE(md));
if (algorithm == GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) {
- char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
+ char* md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR,
"Invalid incoming stream compression algorithm: '%s'. Interpreting "
"incoming data as uncompressed.",
@@ -1096,20 +1096,20 @@ static grpc_stream_compression_algorithm decode_stream_compression(
return algorithm;
}
-static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
+static void publish_app_metadata(grpc_call* call, grpc_metadata_batch* b,
int is_trailing) {
if (b->list.count == 0) return;
GPR_TIMER_BEGIN("publish_app_metadata", 0);
- grpc_metadata_array *dest;
- grpc_metadata *mdusr;
+ grpc_metadata_array* dest;
+ grpc_metadata* mdusr;
dest = call->buffered_metadata[is_trailing];
if (dest->count + b->list.count > dest->capacity) {
dest->capacity =
GPR_MAX(dest->capacity + b->list.count, dest->capacity * 3 / 2);
- dest->metadata = (grpc_metadata *)gpr_realloc(
+ dest->metadata = (grpc_metadata*)gpr_realloc(
dest->metadata, sizeof(grpc_metadata) * dest->capacity);
}
- for (grpc_linked_mdelem *l = b->list.head; l != NULL; l = l->next) {
+ for (grpc_linked_mdelem* l = b->list.head; l != NULL; l = l->next) {
mdusr = &dest->metadata[dest->count++];
/* we pass back borrowed slices that are valid whilst the call is valid */
mdusr->key = GRPC_MDKEY(l->md);
@@ -1118,8 +1118,8 @@ static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b,
GPR_TIMER_END("publish_app_metadata", 0);
}
-static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_metadata_batch *b) {
+static void recv_initial_filter(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ grpc_metadata_batch* b) {
if (b->idx.named.content_encoding != NULL) {
if (b->idx.named.grpc_encoding != NULL) {
gpr_log(GPR_ERROR,
@@ -1156,12 +1156,12 @@ static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
publish_app_metadata(call, b, false);
}
-static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
- grpc_metadata_batch *b) {
- grpc_call *call = (grpc_call *)args;
+static void recv_trailing_filter(grpc_exec_ctx* exec_ctx, void* args,
+ grpc_metadata_batch* b) {
+ grpc_call* call = (grpc_call*)args;
if (b->idx.named.grpc_status != NULL) {
uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
- grpc_error *error =
+ grpc_error* error =
status_code == GRPC_STATUS_OK
? GRPC_ERROR_NONE
: grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -1183,7 +1183,7 @@ static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args,
publish_app_metadata(call, b, true);
}
-grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
+grpc_call_stack* grpc_call_get_call_stack(grpc_call* call) {
return CALL_STACK_FROM_CALL(call);
}
@@ -1191,12 +1191,12 @@ grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
* BATCH API IMPLEMENTATION
*/
-static void set_status_value_directly(grpc_status_code status, void *dest) {
- *(grpc_status_code *)dest = status;
+static void set_status_value_directly(grpc_status_code status, void* dest) {
+ *(grpc_status_code*)dest = status;
}
-static void set_cancelled_value(grpc_status_code status, void *dest) {
- *(int *)dest = (status != GRPC_STATUS_OK);
+static void set_cancelled_value(grpc_status_code status, void* dest) {
+ *(int*)dest = (status != GRPC_STATUS_OK);
}
static bool are_write_flags_valid(uint32_t flags) {
@@ -1236,16 +1236,16 @@ static int batch_slot_for_op(grpc_op_type type) {
GPR_UNREACHABLE_CODE(return 123456789);
}
-static batch_control *allocate_batch_control(grpc_call *call,
- const grpc_op *ops,
+static batch_control* allocate_batch_control(grpc_call* call,
+ const grpc_op* ops,
size_t num_ops) {
int slot = batch_slot_for_op(ops[0].op);
- batch_control **pslot = &call->active_batches[slot];
+ batch_control** pslot = &call->active_batches[slot];
if (*pslot == NULL) {
*pslot =
- (batch_control *)gpr_arena_alloc(call->arena, sizeof(batch_control));
+ (batch_control*)gpr_arena_alloc(call->arena, sizeof(batch_control));
}
- batch_control *bctl = *pslot;
+ batch_control* bctl = *pslot;
if (bctl->call != NULL) {
return NULL;
}
@@ -1255,26 +1255,26 @@ static batch_control *allocate_batch_control(grpc_call *call,
return bctl;
}
-static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_cq_completion *storage) {
- batch_control *bctl = (batch_control *)user_data;
- grpc_call *call = bctl->call;
+static void finish_batch_completion(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_cq_completion* storage) {
+ batch_control* bctl = (batch_control*)user_data;
+ grpc_call* call = bctl->call;
bctl->call = NULL;
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
}
-static grpc_error *consolidate_batch_errors(batch_control *bctl) {
+static grpc_error* consolidate_batch_errors(batch_control* bctl) {
size_t n = (size_t)gpr_atm_acq_load(&bctl->num_errors);
if (n == 0) {
return GRPC_ERROR_NONE;
} else if (n == 1) {
/* Skip creating a composite error in the case that only one error was
logged */
- grpc_error *e = bctl->errors[0];
+ grpc_error* e = bctl->errors[0];
bctl->errors[0] = NULL;
return e;
} else {
- grpc_error *error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ grpc_error* error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Call batch failed", bctl->errors, n);
for (size_t i = 0; i < n; i++) {
GRPC_ERROR_UNREF(bctl->errors[i]);
@@ -1284,11 +1284,11 @@ static grpc_error *consolidate_batch_errors(batch_control *bctl) {
}
}
-static void post_batch_completion(grpc_exec_ctx *exec_ctx,
- batch_control *bctl) {
- grpc_call *next_child_call;
- grpc_call *call = bctl->call;
- grpc_error *error = consolidate_batch_errors(bctl);
+static void post_batch_completion(grpc_exec_ctx* exec_ctx,
+ batch_control* bctl) {
+ grpc_call* next_child_call;
+ grpc_call* call = bctl->call;
+ grpc_error* error = consolidate_batch_errors(bctl);
if (bctl->op.send_initial_metadata) {
grpc_metadata_batch_destroy(
@@ -1304,15 +1304,15 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
&call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
}
if (bctl->op.recv_trailing_metadata) {
- grpc_metadata_batch *md =
+ grpc_metadata_batch* md =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
recv_trailing_filter(exec_ctx, call, md);
/* propagate cancellation to any interested children */
gpr_atm_rel_store(&call->received_final_op_atm, 1);
- parent_call *pc = get_parent_call(call);
+ parent_call* pc = get_parent_call(call);
if (pc != NULL) {
- grpc_call *child;
+ grpc_call* child;
gpr_mu_lock(&pc->child_list_mu);
child = pc->first_child;
if (child != NULL) {
@@ -1352,7 +1352,7 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
/* unrefs bctl->error */
bctl->call = NULL;
GRPC_CLOSURE_RUN(
- exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error);
+ exec_ctx, (grpc_closure*)bctl->completion_data.notify_tag.tag, error);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
} else {
/* unrefs bctl->error */
@@ -1362,16 +1362,16 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx,
}
}
-static void finish_batch_step(grpc_exec_ctx *exec_ctx, batch_control *bctl) {
+static void finish_batch_step(grpc_exec_ctx* exec_ctx, batch_control* bctl) {
if (gpr_unref(&bctl->steps_to_complete)) {
post_batch_completion(exec_ctx, bctl);
}
}
-static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
- batch_control *bctl) {
- grpc_error *error;
- grpc_call *call = bctl->call;
+static void continue_receiving_slices(grpc_exec_ctx* exec_ctx,
+ batch_control* bctl) {
+ grpc_error* error;
+ grpc_call* call = bctl->call;
for (;;) {
size_t remaining = call->receiving_stream->length -
(*call->receiving_buffer)->data.raw.slice_buffer.length;
@@ -1404,11 +1404,11 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx,
}
}
-static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
- grpc_error *error) {
- batch_control *bctl = (batch_control *)bctlp;
- grpc_call *call = bctl->call;
- grpc_byte_stream *bs = call->receiving_stream;
+static void receiving_slice_ready(grpc_exec_ctx* exec_ctx, void* bctlp,
+ grpc_error* error) {
+ batch_control* bctl = (batch_control*)bctlp;
+ grpc_call* call = bctl->call;
+ grpc_byte_stream* bs = call->receiving_stream;
bool release_error = false;
if (error == GRPC_ERROR_NONE) {
@@ -1441,9 +1441,9 @@ static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
}
}
-static void process_data_after_md(grpc_exec_ctx *exec_ctx,
- batch_control *bctl) {
- grpc_call *call = bctl->call;
+static void process_data_after_md(grpc_exec_ctx* exec_ctx,
+ batch_control* bctl) {
+ grpc_call* call = bctl->call;
if (call->receiving_stream == NULL) {
*call->receiving_buffer = NULL;
call->receiving_message = 0;
@@ -1463,10 +1463,10 @@ static void process_data_after_md(grpc_exec_ctx *exec_ctx,
}
}
-static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
- grpc_error *error) {
- batch_control *bctl = (batch_control *)bctlp;
- grpc_call *call = bctl->call;
+static void receiving_stream_ready(grpc_exec_ctx* exec_ctx, void* bctlp,
+ grpc_error* error) {
+ batch_control* bctl = (batch_control*)bctlp;
+ grpc_call* call = bctl->call;
if (error != GRPC_ERROR_NONE) {
if (call->receiving_stream != NULL) {
grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
@@ -1488,24 +1488,24 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp,
// The recv_message_ready callback used when sending a batch containing
// a recv_message op down the filter stack. Yields the call combiner
// before processing the received message.
-static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx,
- void *bctlp,
- grpc_error *error) {
- batch_control *bctl = (batch_control *)bctlp;
- grpc_call *call = bctl->call;
+static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx* exec_ctx,
+ void* bctlp,
+ grpc_error* error) {
+ batch_control* bctl = (batch_control*)bctlp;
+ grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
receiving_stream_ready(exec_ctx, bctlp, error);
}
-static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
- batch_control *bctl) {
- grpc_call *call = bctl->call;
+static void validate_filtered_metadata(grpc_exec_ctx* exec_ctx,
+ batch_control* bctl) {
+ grpc_call* call = bctl->call;
/* validate compression algorithms */
if (call->incoming_stream_compression_algorithm !=
GRPC_STREAM_COMPRESS_NONE) {
const grpc_stream_compression_algorithm algo =
call->incoming_stream_compression_algorithm;
- char *error_msg = NULL;
+ char* error_msg = NULL;
const grpc_compression_options compression_options =
grpc_channel_compression_options(call->channel);
if (algo >= GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) {
@@ -1517,7 +1517,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- const char *algo_name = NULL;
+ const char* algo_name = NULL;
grpc_stream_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
algo_name);
@@ -1531,7 +1531,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->stream_encodings_accepted_by_peer,
call->incoming_stream_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- const char *algo_name = NULL;
+ const char* algo_name = NULL;
grpc_stream_compression_algorithm_name(
call->incoming_stream_compression_algorithm, &algo_name);
gpr_log(
@@ -1545,7 +1545,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) {
const grpc_compression_algorithm algo =
call->incoming_compression_algorithm;
- char *error_msg = NULL;
+ char* error_msg = NULL;
const grpc_compression_options compression_options =
grpc_channel_compression_options(call->channel);
/* check if algorithm is known */
@@ -1558,7 +1558,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, algo) == 0) {
/* check if algorithm is supported by current channel config */
- const char *algo_name = NULL;
+ const char* algo_name = NULL;
grpc_compression_algorithm_name(algo, &algo_name);
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
@@ -1574,7 +1574,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
if (!GPR_BITGET(call->encodings_accepted_by_peer,
call->incoming_compression_algorithm)) {
if (GRPC_TRACER_ON(grpc_compression_trace)) {
- const char *algo_name = NULL;
+ const char* algo_name = NULL;
grpc_compression_algorithm_name(call->incoming_compression_algorithm,
&algo_name);
gpr_log(GPR_ERROR,
@@ -1587,8 +1587,8 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx,
}
}
-static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
- grpc_error *error, bool has_cancelled) {
+static void add_batch_error(grpc_exec_ctx* exec_ctx, batch_control* bctl,
+ grpc_error* error, bool has_cancelled) {
if (error == GRPC_ERROR_NONE) return;
int idx = (int)gpr_atm_full_fetch_add(&bctl->num_errors, 1);
if (idx == 0 && !has_cancelled) {
@@ -1598,17 +1598,17 @@ static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
bctl->errors[idx] = error;
}
-static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
- void *bctlp, grpc_error *error) {
- batch_control *bctl = (batch_control *)bctlp;
- grpc_call *call = bctl->call;
+static void receiving_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
+ void* bctlp, grpc_error* error) {
+ batch_control* bctl = (batch_control*)bctlp;
+ grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
"recv_initial_metadata_ready");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
- grpc_metadata_batch *md =
+ grpc_metadata_batch* md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
recv_initial_filter(exec_ctx, call, md);
@@ -1622,7 +1622,7 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
}
}
- grpc_closure *saved_rsr_closure = NULL;
+ grpc_closure* saved_rsr_closure = NULL;
while (true) {
gpr_atm rsr_bctlp = gpr_atm_acq_load(&call->recv_state);
/* Should only receive initial metadata once */
@@ -1639,9 +1639,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
}
} else {
/* Already received messages */
- saved_rsr_closure = GRPC_CLOSURE_CREATE(receiving_stream_ready,
- (batch_control *)rsr_bctlp,
- grpc_schedule_on_exec_ctx);
+ saved_rsr_closure =
+ GRPC_CLOSURE_CREATE(receiving_stream_ready, (batch_control*)rsr_bctlp,
+ grpc_schedule_on_exec_ctx);
/* No need to modify recv_state */
break;
}
@@ -1653,31 +1653,31 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
finish_batch_step(exec_ctx, bctl);
}
-static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
- grpc_error *error) {
- batch_control *bctl = (batch_control *)bctlp;
- grpc_call *call = bctl->call;
+static void finish_batch(grpc_exec_ctx* exec_ctx, void* bctlp,
+ grpc_error* error) {
+ batch_control* bctl = (batch_control*)bctlp;
+ grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
finish_batch_step(exec_ctx, bctl);
}
-static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p,
- grpc_cq_completion *completion) {
+static void free_no_op_completion(grpc_exec_ctx* exec_ctx, void* p,
+ grpc_cq_completion* completion) {
gpr_free(completion);
}
-static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
- grpc_call *call, const grpc_op *ops,
- size_t nops, void *notify_tag,
+static grpc_call_error call_start_batch(grpc_exec_ctx* exec_ctx,
+ grpc_call* call, const grpc_op* ops,
+ size_t nops, void* notify_tag,
int is_notify_tag_closure) {
size_t i;
- const grpc_op *op;
- batch_control *bctl;
+ const grpc_op* op;
+ batch_control* bctl;
int num_completion_callbacks_needed = 1;
grpc_call_error error = GRPC_CALL_OK;
- grpc_transport_stream_op_batch *stream_op;
- grpc_transport_stream_op_batch_payload *stream_op_payload;
+ grpc_transport_stream_op_batch* stream_op;
+ grpc_transport_stream_op_batch_payload* stream_op_payload;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
@@ -1688,9 +1688,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_cq_end_op(
exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
free_no_op_completion, NULL,
- (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
+ (grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
} else {
- GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE);
+ GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)notify_tag, GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;
goto done;
@@ -1886,7 +1886,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
exec_ctx, call->channel, op->data.send_status_from_server.status);
{
- grpc_error *override_error = GRPC_ERROR_NONE;
+ grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Error from server send status");
@@ -1897,7 +1897,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
grpc_slice_ref_internal(
*op->data.send_status_from_server.status_details));
call->send_extra_metadata_count++;
- char *msg = grpc_slice_to_c_string(
+ char* msg = grpc_slice_to_c_string(
GRPC_MDVALUE(call->send_extra_metadata[1].md));
override_error =
grpc_error_set_str(override_error, GRPC_ERROR_STR_GRPC_MESSAGE,
@@ -2072,8 +2072,8 @@ done_with_error:
goto done;
}
-grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
- size_t nops, void *tag, void *reserved) {
+grpc_call_error grpc_call_start_batch(grpc_call* call, const grpc_op* ops,
+ size_t nops, void* tag, void* reserved) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call_error err;
@@ -2092,16 +2092,16 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
return err;
}
-grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
- grpc_call *call,
- const grpc_op *ops,
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx* exec_ctx,
+ grpc_call* call,
+ const grpc_op* ops,
size_t nops,
- grpc_closure *closure) {
+ grpc_closure* closure) {
return call_start_batch(exec_ctx, call, ops, nops, closure, 1);
}
-void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
- void *value, void (*destroy)(void *value)) {
+void grpc_call_context_set(grpc_call* call, grpc_context_index elem,
+ void* value, void (*destroy)(void* value)) {
if (call->context[elem].destroy) {
call->context[elem].destroy(call->context[elem].value);
}
@@ -2109,20 +2109,20 @@ void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
call->context[elem].destroy = destroy;
}
-void *grpc_call_context_get(grpc_call *call, grpc_context_index elem) {
+void* grpc_call_context_get(grpc_call* call, grpc_context_index elem) {
return call->context[elem].value;
}
-uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; }
+uint8_t grpc_call_is_client(grpc_call* call) { return call->is_client; }
grpc_compression_algorithm grpc_call_compression_for_level(
- grpc_call *call, grpc_compression_level level) {
+ grpc_call* call, grpc_compression_level level) {
grpc_compression_algorithm algo =
compression_algorithm_for_level_locked(call, level);
return algo;
}
-const char *grpc_call_error_to_string(grpc_call_error error) {
+const char* grpc_call_error_to_string(grpc_call_error error) {
switch (error) {
case GRPC_CALL_ERROR:
return "GRPC_CALL_ERROR";
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 27c2f5243c..d4e596f84b 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -30,23 +30,23 @@ extern "C" {
#include <grpc/grpc.h>
#include <grpc/impl/codegen/compression_types.h>
-typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx,
- grpc_call *call, int success,
- void *user_data);
+typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx* exec_ctx,
+ grpc_call* call, int success,
+ void* user_data);
typedef struct grpc_call_create_args {
- grpc_channel *channel;
+ grpc_channel* channel;
- grpc_call *parent;
+ grpc_call* parent;
uint32_t propagation_mask;
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
/* if not NULL, it'll be used in lieu of cq */
- grpc_pollset_set *pollset_set_alternative;
+ grpc_pollset_set* pollset_set_alternative;
- const void *server_transport_data;
+ const void* server_transport_data;
- grpc_mdelem *add_initial_metadata;
+ grpc_mdelem* add_initial_metadata;
size_t add_initial_metadata_count;
grpc_millis send_deadline;
@@ -55,62 +55,62 @@ typedef struct grpc_call_create_args {
/* Create a new call based on \a args.
Regardless of success or failure, always returns a valid new call into *call
*/
-grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
- const grpc_call_create_args *args,
- grpc_call **call);
+grpc_error* grpc_call_create(grpc_exec_ctx* exec_ctx,
+ const grpc_call_create_args* args,
+ grpc_call** call);
-void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
- grpc_completion_queue *cq);
+void grpc_call_set_completion_queue(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ grpc_completion_queue* cq);
#ifndef NDEBUG
-void grpc_call_internal_ref(grpc_call *call, const char *reason);
-void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call,
- const char *reason);
+void grpc_call_internal_ref(grpc_call* call, const char* reason);
+void grpc_call_internal_unref(grpc_exec_ctx* exec_ctx, grpc_call* call,
+ const char* reason);
#define GRPC_CALL_INTERNAL_REF(call, reason) \
grpc_call_internal_ref(call, reason)
#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
grpc_call_internal_unref(exec_ctx, call, reason)
#else
-void grpc_call_internal_ref(grpc_call *call);
-void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call);
+void grpc_call_internal_ref(grpc_call* call);
+void grpc_call_internal_unref(grpc_exec_ctx* exec_ctx, grpc_call* call);
#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call)
#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
grpc_call_internal_unref(exec_ctx, call)
#endif
-grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
+grpc_call_stack* grpc_call_get_call_stack(grpc_call* call);
-grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx,
- grpc_call *call,
- const grpc_op *ops,
+grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx* exec_ctx,
+ grpc_call* call,
+ const grpc_op* ops,
size_t nops,
- grpc_closure *closure);
+ grpc_closure* closure);
/* Given the top call_element, get the call object. */
-grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);
+grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element);
-void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity,
- grpc_call *call, const grpc_op *ops, size_t nops,
- void *tag);
+void grpc_call_log_batch(const char* file, int line, gpr_log_severity severity,
+ grpc_call* call, const grpc_op* ops, size_t nops,
+ void* tag);
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
/* TODO(#9731): add exec_ctx to destroy */
-void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
- void *value, void (*destroy)(void *value));
+void grpc_call_context_set(grpc_call* call, grpc_context_index elem,
+ void* value, void (*destroy)(void* value));
/* Get a context pointer. */
-void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
+void* grpc_call_context_get(grpc_call* call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
if (GRPC_TRACER_ON(grpc_api_trace)) \
grpc_call_log_batch(sev, call, ops, nops, tag)
-uint8_t grpc_call_is_client(grpc_call *call);
+uint8_t grpc_call_is_client(grpc_call* call);
/* Return an appropriate compression algorithm for the requested compression \a
* level in the context of \a call. */
grpc_compression_algorithm grpc_call_compression_for_level(
- grpc_call *call, grpc_compression_level level);
+ grpc_call* call, grpc_compression_level level);
extern grpc_tracer_flag grpc_call_error_trace;
extern grpc_tracer_flag grpc_compression_trace;
diff --git a/src/core/lib/surface/call_log_batch.cc b/src/core/lib/surface/call_log_batch.cc
index 5557927b7c..030964675d 100644
--- a/src/core/lib/surface/call_log_batch.cc
+++ b/src/core/lib/surface/call_log_batch.cc
@@ -25,7 +25,7 @@
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
-static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) {
+static void add_metadata(gpr_strvec* b, const grpc_metadata* md, size_t count) {
size_t i;
if (md == NULL) {
gpr_strvec_add(b, gpr_strdup("(nil)"));
@@ -41,9 +41,9 @@ static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) {
}
}
-char *grpc_op_string(const grpc_op *op) {
- char *tmp;
- char *out;
+char* grpc_op_string(const grpc_op* op) {
+ char* tmp;
+ char* out;
gpr_strvec b;
gpr_strvec_init(&b);
@@ -105,10 +105,10 @@ char *grpc_op_string(const grpc_op *op) {
return out;
}
-void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity,
- grpc_call *call, const grpc_op *ops, size_t nops,
- void *tag) {
- char *tmp;
+void grpc_call_log_batch(const char* file, int line, gpr_log_severity severity,
+ grpc_call* call, const grpc_op* ops, size_t nops,
+ void* tag) {
+ char* tmp;
size_t i;
for (i = 0; i < nops; i++) {
tmp = grpc_op_string(&ops[i]);
diff --git a/src/core/lib/surface/call_test_only.h b/src/core/lib/surface/call_test_only.h
index a5a01b3679..2ff4a487d5 100644
--- a/src/core/lib/surface/call_test_only.h
+++ b/src/core/lib/surface/call_test_only.h
@@ -29,30 +29,30 @@ extern "C" {
*
* \warning This function should \b only be used in test code. */
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
- grpc_call *call);
+ grpc_call* call);
/** Return the message flags from \a call.
*
* \warning This function should \b only be used in test code. */
-uint32_t grpc_call_test_only_get_message_flags(grpc_call *call);
+uint32_t grpc_call_test_only_get_message_flags(grpc_call* call);
/** Returns a bitset for the encodings (compression algorithms) supported by \a
* call's peer.
*
* To be indexed by grpc_compression_algorithm enum values. */
-uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
+uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call* call);
/** Returns a bitset for the stream encodings (stream compression algorithms)
* supported by \a call's peer.
*
* To be indexed by grpc_stream_compression_algorithm enum values. */
uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer(
- grpc_call *call);
+ grpc_call* call);
/** Returns the incoming stream compression algorithm (content-encoding header)
* received by a call. */
grpc_stream_compression_algorithm
-grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call);
+grpc_call_test_only_get_incoming_stream_encodings(grpc_call* call);
#ifdef __cplusplus
}
diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc
index 860dcc82db..832cc07858 100644
--- a/src/core/lib/surface/channel.cc
+++ b/src/core/lib/surface/channel.cc
@@ -47,7 +47,7 @@
typedef struct registered_call {
grpc_mdelem path;
grpc_mdelem authority;
- struct registered_call *next;
+ struct registered_call* next;
} registered_call;
struct grpc_channel {
@@ -58,35 +58,35 @@ struct grpc_channel {
gpr_atm call_size_estimate;
gpr_mu registered_call_mu;
- registered_call *registered_calls;
+ registered_call* registered_calls;
- char *target;
+ char* target;
};
-#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
+#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1))
#define CHANNEL_FROM_CHANNEL_STACK(channel_stack) \
- (((grpc_channel *)(channel_stack)) - 1)
+ (((grpc_channel*)(channel_stack)) - 1)
#define CHANNEL_FROM_TOP_ELEM(top_elem) \
CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem))
-static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error);
+static void destroy_channel(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error);
-grpc_channel *grpc_channel_create_with_builder(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+grpc_channel* grpc_channel_create_with_builder(
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
grpc_channel_stack_type channel_stack_type) {
- char *target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
- grpc_channel_args *args = grpc_channel_args_copy(
+ char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
+ grpc_channel_args* args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
- grpc_channel *channel;
+ grpc_channel* channel;
if (channel_stack_type == GRPC_SERVER_CHANNEL) {
GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
} else {
GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
}
- grpc_error *error = grpc_channel_stack_builder_finish(
+ grpc_error* error = grpc_channel_stack_builder_finish(
exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL,
- (void **)&channel);
+ (void**)&channel);
if (error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "channel stack builder failed: %s",
grpc_error_string(error));
@@ -195,11 +195,11 @@ done:
return channel;
}
-grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *input_args,
+grpc_channel* grpc_channel_create(grpc_exec_ctx* exec_ctx, const char* target,
+ const grpc_channel_args* input_args,
grpc_channel_stack_type channel_stack_type,
- grpc_transport *optional_transport) {
- grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_transport* optional_transport) {
+ grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
input_args);
grpc_channel_stack_builder_set_target(builder, target);
@@ -212,7 +212,7 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
channel_stack_type);
}
-size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) {
+size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) {
#define ROUND_UP_SIZE 256
/* We round up our current estimate to the NEXT value of ROUND_UP_SIZE.
This ensures:
@@ -225,7 +225,7 @@ size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) {
~(size_t)(ROUND_UP_SIZE - 1);
}
-void grpc_channel_update_call_size_estimate(grpc_channel *channel,
+void grpc_channel_update_call_size_estimate(grpc_channel* channel,
size_t size) {
size_t cur = (size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate);
if (cur < size) {
@@ -244,24 +244,24 @@ void grpc_channel_update_call_size_estimate(grpc_channel *channel,
}
}
-char *grpc_channel_get_target(grpc_channel *channel) {
+char* grpc_channel_get_target(grpc_channel* channel) {
GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel));
return gpr_strdup(channel->target);
}
-void grpc_channel_get_info(grpc_channel *channel,
- const grpc_channel_info *channel_info) {
+void grpc_channel_get_info(grpc_channel* channel,
+ const grpc_channel_info* channel_info) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_channel_element *elem =
+ grpc_channel_element* elem =
grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
elem->filter->get_channel_info(&exec_ctx, elem, channel_info);
grpc_exec_ctx_finish(&exec_ctx);
}
-static grpc_call *grpc_channel_create_call_internal(
- grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask, grpc_completion_queue *cq,
- grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem,
+static grpc_call* grpc_channel_create_call_internal(
+ grpc_exec_ctx* exec_ctx, grpc_channel* channel, grpc_call* parent_call,
+ uint32_t propagation_mask, grpc_completion_queue* cq,
+ grpc_pollset_set* pollset_set_alternative, grpc_mdelem path_mdelem,
grpc_mdelem authority_mdelem, grpc_millis deadline) {
grpc_mdelem send_metadata[2];
size_t num_metadata = 0;
@@ -288,20 +288,20 @@ static grpc_call *grpc_channel_create_call_internal(
args.add_initial_metadata_count = num_metadata;
args.send_deadline = deadline;
- grpc_call *call;
+ grpc_call* call;
GRPC_LOG_IF_ERROR("call_create", grpc_call_create(exec_ctx, &args, &call));
return call;
}
-grpc_call *grpc_channel_create_call(grpc_channel *channel,
- grpc_call *parent_call,
+grpc_call* grpc_channel_create_call(grpc_channel* channel,
+ grpc_call* parent_call,
uint32_t propagation_mask,
- grpc_completion_queue *cq,
- grpc_slice method, const grpc_slice *host,
- gpr_timespec deadline, void *reserved) {
+ grpc_completion_queue* cq,
+ grpc_slice method, const grpc_slice* host,
+ gpr_timespec deadline, void* reserved) {
GPR_ASSERT(!reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_call *call = grpc_channel_create_call_internal(
+ grpc_call* call = grpc_channel_create_call_internal(
&exec_ctx, channel, parent_call, propagation_mask, cq, NULL,
grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_PATH,
grpc_slice_ref_internal(method)),
@@ -313,10 +313,10 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
return call;
}
-grpc_call *grpc_channel_create_pollset_set_call(
- grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
- const grpc_slice *host, grpc_millis deadline, void *reserved) {
+grpc_call* grpc_channel_create_pollset_set_call(
+ grpc_exec_ctx* exec_ctx, grpc_channel* channel, grpc_call* parent_call,
+ uint32_t propagation_mask, grpc_pollset_set* pollset_set, grpc_slice method,
+ const grpc_slice* host, grpc_millis deadline, void* reserved) {
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set,
@@ -328,9 +328,9 @@ grpc_call *grpc_channel_create_pollset_set_call(
deadline);
}
-void *grpc_channel_register_call(grpc_channel *channel, const char *method,
- const char *host, void *reserved) {
- registered_call *rc = (registered_call *)gpr_malloc(sizeof(registered_call));
+void* grpc_channel_register_call(grpc_channel* channel, const char* method,
+ const char* host, void* reserved) {
+ registered_call* rc = (registered_call*)gpr_malloc(sizeof(registered_call));
GRPC_API_TRACE(
"grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
4, (channel, method, host, reserved));
@@ -353,11 +353,11 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
return rc;
}
-grpc_call *grpc_channel_create_registered_call(
- grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
- grpc_completion_queue *completion_queue, void *registered_call_handle,
- gpr_timespec deadline, void *reserved) {
- registered_call *rc = (registered_call *)registered_call_handle;
+grpc_call* grpc_channel_create_registered_call(
+ grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
+ grpc_completion_queue* completion_queue, void* registered_call_handle,
+ gpr_timespec deadline, void* reserved) {
+ registered_call* rc = (registered_call*)registered_call_handle;
GRPC_API_TRACE(
"grpc_channel_create_registered_call("
"channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
@@ -365,12 +365,13 @@ grpc_call *grpc_channel_create_registered_call(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
- 9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
- registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
- (int)deadline.clock_type, reserved));
+ 9,
+ (channel, parent_call, (unsigned)propagation_mask, completion_queue,
+ registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
+ (int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_call *call = grpc_channel_create_call_internal(
+ grpc_call* call = grpc_channel_create_call_internal(
&exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL,
GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
grpc_timespec_to_millis_round_up(deadline));
@@ -380,26 +381,26 @@ grpc_call *grpc_channel_create_registered_call(
#ifndef NDEBUG
#define REF_REASON reason
-#define REF_ARG , const char *reason
+#define REF_ARG , const char* reason
#else
#define REF_REASON ""
#define REF_ARG
#endif
-void grpc_channel_internal_ref(grpc_channel *c REF_ARG) {
+void grpc_channel_internal_ref(grpc_channel* c REF_ARG) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
}
-void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
- grpc_channel *c REF_ARG) {
+void grpc_channel_internal_unref(grpc_exec_ctx* exec_ctx,
+ grpc_channel* c REF_ARG) {
GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
}
-static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_channel *channel = (grpc_channel *)arg;
+static void destroy_channel(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_channel* channel = (grpc_channel*)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {
- registered_call *rc = channel->registered_calls;
+ registered_call* rc = channel->registered_calls;
channel->registered_calls = rc->next;
GRPC_MDELEM_UNREF(exec_ctx, rc->path);
GRPC_MDELEM_UNREF(exec_ctx, rc->authority);
@@ -411,9 +412,9 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(channel);
}
-void grpc_channel_destroy(grpc_channel *channel) {
- grpc_transport_op *op = grpc_make_transport_op(NULL);
- grpc_channel_element *elem;
+void grpc_channel_destroy(grpc_channel* channel) {
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
+ grpc_channel_element* elem;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
op->disconnect_with_error =
@@ -426,17 +427,17 @@ void grpc_channel_destroy(grpc_channel *channel) {
grpc_exec_ctx_finish(&exec_ctx);
}
-grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) {
+grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel) {
return CHANNEL_STACK_FROM_CHANNEL(channel);
}
grpc_compression_options grpc_channel_compression_options(
- const grpc_channel *channel) {
+ const grpc_channel* channel) {
return channel->compression_options;
}
-grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel *channel, int i) {
+grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel* channel, int i) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
switch (i) {
case 0:
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 4d1c7e369f..063e685f6b 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -27,13 +27,13 @@
extern "C" {
#endif
-grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *args,
+grpc_channel* grpc_channel_create(grpc_exec_ctx* exec_ctx, const char* target,
+ const grpc_channel_args* args,
grpc_channel_stack_type channel_stack_type,
- grpc_transport *optional_transport);
+ grpc_transport* optional_transport);
-grpc_channel *grpc_channel_create_with_builder(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+grpc_channel* grpc_channel_create_with_builder(
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
grpc_channel_stack_type channel_stack_type);
/** Create a call given a grpc_channel, in order to call \a method.
@@ -44,37 +44,37 @@ grpc_channel *grpc_channel_create_with_builder(
non-NULL, it must be a server-side call. It will be used to propagate
properties from the server call to this new client call, depending on the
value of \a propagation_mask (see propagation_bits.h for possible values) */
-grpc_call *grpc_channel_create_pollset_set_call(
- grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call,
- uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method,
- const grpc_slice *host, grpc_millis deadline, void *reserved);
+grpc_call* grpc_channel_create_pollset_set_call(
+ grpc_exec_ctx* exec_ctx, grpc_channel* channel, grpc_call* parent_call,
+ uint32_t propagation_mask, grpc_pollset_set* pollset_set, grpc_slice method,
+ const grpc_slice* host, grpc_millis deadline, void* reserved);
/** Get a (borrowed) pointer to this channels underlying channel stack */
-grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
+grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel);
/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
status_code.
The returned elem is owned by the caller. */
-grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel *channel,
+grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel* channel,
int status_code);
-size_t grpc_channel_get_call_size_estimate(grpc_channel *channel);
-void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size);
+size_t grpc_channel_get_call_size_estimate(grpc_channel* channel);
+void grpc_channel_update_call_size_estimate(grpc_channel* channel, size_t size);
#ifndef NDEBUG
-void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
-void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
- const char *reason);
+void grpc_channel_internal_ref(grpc_channel* channel, const char* reason);
+void grpc_channel_internal_unref(grpc_exec_ctx* exec_ctx, grpc_channel* channel,
+ const char* reason);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel, reason)
#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
grpc_channel_internal_unref(exec_ctx, channel, reason)
#else
-void grpc_channel_internal_ref(grpc_channel *channel);
-void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
- grpc_channel *channel);
+void grpc_channel_internal_ref(grpc_channel* channel);
+void grpc_channel_internal_unref(grpc_exec_ctx* exec_ctx,
+ grpc_channel* channel);
#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
grpc_channel_internal_ref(channel)
#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
@@ -83,7 +83,7 @@ void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
/** Return the channel's compression options. */
grpc_compression_options grpc_channel_compression_options(
- const grpc_channel *channel);
+ const grpc_channel* channel);
#ifdef __cplusplus
}
diff --git a/src/core/lib/surface/channel_init.cc b/src/core/lib/surface/channel_init.cc
index 33f444b89e..17d8caf56f 100644
--- a/src/core/lib/surface/channel_init.cc
+++ b/src/core/lib/surface/channel_init.cc
@@ -23,13 +23,13 @@
typedef struct stage_slot {
grpc_channel_init_stage fn;
- void *arg;
+ void* arg;
int priority;
size_t insertion_order;
} stage_slot;
typedef struct stage_slots {
- stage_slot *slots;
+ stage_slot* slots;
size_t num_slots;
size_t cap_slots;
} stage_slots;
@@ -49,24 +49,24 @@ void grpc_channel_init_init(void) {
void grpc_channel_init_register_stage(grpc_channel_stack_type type,
int priority,
grpc_channel_init_stage stage,
- void *stage_arg) {
+ void* stage_arg) {
GPR_ASSERT(!g_finalized);
if (g_slots[type].cap_slots == g_slots[type].num_slots) {
g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2);
- g_slots[type].slots = (stage_slot *)gpr_realloc(
+ g_slots[type].slots = (stage_slot*)gpr_realloc(
g_slots[type].slots,
g_slots[type].cap_slots * sizeof(*g_slots[type].slots));
}
- stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++];
+ stage_slot* s = &g_slots[type].slots[g_slots[type].num_slots++];
s->insertion_order = g_slots[type].num_slots;
s->priority = priority;
s->fn = stage;
s->arg = stage_arg;
}
-static int compare_slots(const void *a, const void *b) {
- const stage_slot *sa = (const stage_slot *)a;
- const stage_slot *sb = (const stage_slot *)b;
+static int compare_slots(const void* a, const void* b) {
+ const stage_slot* sa = (const stage_slot*)a;
+ const stage_slot* sb = (const stage_slot*)b;
int c = GPR_ICMP(sa->priority, sb->priority);
if (c != 0) return c;
@@ -85,12 +85,12 @@ void grpc_channel_init_finalize(void) {
void grpc_channel_init_shutdown(void) {
for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
gpr_free(g_slots[i].slots);
- g_slots[i].slots = (stage_slot *)(void *)(uintptr_t)0xdeadbeef;
+ g_slots[i].slots = (stage_slot*)(void*)(uintptr_t)0xdeadbeef;
}
}
-bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
+bool grpc_channel_init_create_stack(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
grpc_channel_stack_type type) {
GPR_ASSERT(g_finalized);
@@ -98,7 +98,7 @@ bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_type_string(type));
for (size_t i = 0; i < g_slots[type].num_slots; i++) {
- const stage_slot *slot = &g_slots[type].slots[i];
+ const stage_slot* slot = &g_slots[type].slots[i];
if (!slot->fn(exec_ctx, builder, slot->arg)) {
return false;
}
diff --git a/src/core/lib/surface/channel_init.h b/src/core/lib/surface/channel_init.h
index 5f109332ad..9932781081 100644
--- a/src/core/lib/surface/channel_init.h
+++ b/src/core/lib/surface/channel_init.h
@@ -36,9 +36,9 @@ extern "C" {
/// One stage of mutation: call functions against \a builder to influence the
/// finally constructed channel stack
-typedef bool (*grpc_channel_init_stage)(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
- void *arg);
+typedef bool (*grpc_channel_init_stage)(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
+ void* arg);
/// Global initialization of the system
void grpc_channel_init_init(void);
@@ -51,7 +51,7 @@ void grpc_channel_init_init(void);
void grpc_channel_init_register_stage(grpc_channel_stack_type type,
int priority,
grpc_channel_init_stage stage_fn,
- void *stage_arg);
+ void* stage_arg);
/// Finalize registration. No more calls to grpc_channel_init_register_stage are
/// allowed.
@@ -70,8 +70,8 @@ void grpc_channel_init_shutdown(void);
/// \a optional_transport is either NULL or a constructed transport object
/// Returns a pointer to the base of the memory allocated (the actual channel
/// stack object will be prefix_bytes past that pointer)
-bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder,
+bool grpc_channel_init_create_stack(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder,
grpc_channel_stack_type type);
#ifdef __cplusplus
diff --git a/src/core/lib/surface/channel_ping.cc b/src/core/lib/surface/channel_ping.cc
index f45b568958..5660bb812e 100644
--- a/src/core/lib/surface/channel_ping.cc
+++ b/src/core/lib/surface/channel_ping.cc
@@ -28,29 +28,29 @@
typedef struct {
grpc_closure closure;
- void *tag;
- grpc_completion_queue *cq;
+ void* tag;
+ grpc_completion_queue* cq;
grpc_cq_completion completion_storage;
} ping_result;
-static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_cq_completion *storage) {
+static void ping_destroy(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_cq_completion* storage) {
gpr_free(arg);
}
-static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
- ping_result *pr = (ping_result *)arg;
+static void ping_done(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+ ping_result* pr = (ping_result*)arg;
grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy,
pr, &pr->completion_storage);
}
-void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
- void *tag, void *reserved) {
+void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
+ void* tag, void* reserved) {
GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4,
(channel, cq, tag, reserved));
- grpc_transport_op *op = grpc_make_transport_op(NULL);
- ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr));
- grpc_channel_element *top_elem =
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
+ ping_result* pr = (ping_result*)gpr_malloc(sizeof(*pr));
+ grpc_channel_element* top_elem =
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(reserved == NULL);
diff --git a/src/core/lib/surface/channel_stack_type.cc b/src/core/lib/surface/channel_stack_type.cc
index 5f5c877727..366c452942 100644
--- a/src/core/lib/surface/channel_stack_type.cc
+++ b/src/core/lib/surface/channel_stack_type.cc
@@ -38,7 +38,7 @@ bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type) {
GPR_UNREACHABLE_CODE(return true;);
}
-const char *grpc_channel_stack_type_string(grpc_channel_stack_type type) {
+const char* grpc_channel_stack_type_string(grpc_channel_stack_type type) {
switch (type) {
case GRPC_CLIENT_CHANNEL:
return "CLIENT_CHANNEL";
diff --git a/src/core/lib/surface/channel_stack_type.h b/src/core/lib/surface/channel_stack_type.h
index c77848794c..feecd3aa44 100644
--- a/src/core/lib/surface/channel_stack_type.h
+++ b/src/core/lib/surface/channel_stack_type.h
@@ -44,7 +44,7 @@ typedef enum {
bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type);
-const char *grpc_channel_stack_type_string(grpc_channel_stack_type type);
+const char* grpc_channel_stack_type_string(grpc_channel_stack_type type);
#ifdef __cplusplus
}
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 5009f786e6..9dabe76510 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -58,62 +58,62 @@ GPR_TLS_DECL(g_cached_event);
GPR_TLS_DECL(g_cached_cq);
typedef struct {
- grpc_pollset_worker **worker;
- void *tag;
+ grpc_pollset_worker** worker;
+ void* tag;
} plucker;
typedef struct {
bool can_get_pollset;
bool can_listen;
size_t (*size)(void);
- void (*init)(grpc_pollset *pollset, gpr_mu **mu);
- grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker);
- grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker **worker, grpc_millis deadline);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_closure *closure);
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset);
+ void (*init)(grpc_pollset* pollset, gpr_mu** mu);
+ grpc_error* (*kick)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker);
+ grpc_error* (*work)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker** worker, grpc_millis deadline);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_closure* closure);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
} cq_poller_vtable;
typedef struct non_polling_worker {
gpr_cv cv;
bool kicked;
- struct non_polling_worker *next;
- struct non_polling_worker *prev;
+ struct non_polling_worker* next;
+ struct non_polling_worker* prev;
} non_polling_worker;
typedef struct {
gpr_mu mu;
- non_polling_worker *root;
- grpc_closure *shutdown;
+ non_polling_worker* root;
+ grpc_closure* shutdown;
} non_polling_poller;
static size_t non_polling_poller_size(void) {
return sizeof(non_polling_poller);
}
-static void non_polling_poller_init(grpc_pollset *pollset, gpr_mu **mu) {
- non_polling_poller *npp = (non_polling_poller *)pollset;
+static void non_polling_poller_init(grpc_pollset* pollset, gpr_mu** mu) {
+ non_polling_poller* npp = (non_polling_poller*)pollset;
gpr_mu_init(&npp->mu);
*mu = &npp->mu;
}
-static void non_polling_poller_destroy(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset) {
- non_polling_poller *npp = (non_polling_poller *)pollset;
+static void non_polling_poller_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset) {
+ non_polling_poller* npp = (non_polling_poller*)pollset;
gpr_mu_destroy(&npp->mu);
}
-static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_pollset_worker **worker,
+static grpc_error* non_polling_poller_work(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
+ grpc_pollset_worker** worker,
grpc_millis deadline) {
- non_polling_poller *npp = (non_polling_poller *)pollset;
+ non_polling_poller* npp = (non_polling_poller*)pollset;
if (npp->shutdown) return GRPC_ERROR_NONE;
non_polling_worker w;
gpr_cv_init(&w.cv);
- if (worker != NULL) *worker = (grpc_pollset_worker *)&w;
+ if (worker != NULL) *worker = (grpc_pollset_worker*)&w;
if (npp->root == NULL) {
npp->root = w.next = w.prev = &w;
} else {
@@ -143,13 +143,13 @@ static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static grpc_error *non_polling_poller_kick(
- grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
- grpc_pollset_worker *specific_worker) {
- non_polling_poller *p = (non_polling_poller *)pollset;
- if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
+static grpc_error* non_polling_poller_kick(
+ grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+ grpc_pollset_worker* specific_worker) {
+ non_polling_poller* p = (non_polling_poller*)pollset;
+ if (specific_worker == NULL) specific_worker = (grpc_pollset_worker*)p->root;
if (specific_worker != NULL) {
- non_polling_worker *w = (non_polling_worker *)specific_worker;
+ non_polling_worker* w = (non_polling_worker*)specific_worker;
if (!w->kicked) {
w->kicked = true;
gpr_cv_signal(&w->cv);
@@ -158,16 +158,16 @@ static grpc_error *non_polling_poller_kick(
return GRPC_ERROR_NONE;
}
-static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_pollset *pollset,
- grpc_closure *closure) {
- non_polling_poller *p = (non_polling_poller *)pollset;
+static void non_polling_poller_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_pollset* pollset,
+ grpc_closure* closure) {
+ non_polling_poller* p = (non_polling_poller*)pollset;
GPR_ASSERT(closure != NULL);
p->shutdown = closure;
if (p->root == NULL) {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
} else {
- non_polling_worker *w = p->root;
+ non_polling_worker* w = p->root;
do {
gpr_cv_signal(&w->cv);
w = w->next;
@@ -191,19 +191,19 @@ static const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
typedef struct cq_vtable {
grpc_cq_completion_type cq_completion_type;
size_t data_size;
- void (*init)(void *data);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq);
- void (*destroy)(void *data);
- bool (*begin_op)(grpc_completion_queue *cq, void *tag);
- void (*end_op)(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, void *tag,
- grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage);
- grpc_event (*next)(grpc_completion_queue *cq, gpr_timespec deadline,
- void *reserved);
- grpc_event (*pluck)(grpc_completion_queue *cq, void *tag,
- gpr_timespec deadline, void *reserved);
+ void (*init)(void* data);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq);
+ void (*destroy)(void* data);
+ bool (*begin_op)(grpc_completion_queue* cq, void* tag);
+ void (*end_op)(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq, void* tag,
+ grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage);
+ grpc_event (*next)(grpc_completion_queue* cq, gpr_timespec deadline,
+ void* reserved);
+ grpc_event (*pluck)(grpc_completion_queue* cq, void* tag,
+ gpr_timespec deadline, void* reserved);
} cq_vtable;
/* Queue that holds the cq_completion_events. Internally uses gpr_mpscq queue
@@ -240,7 +240,7 @@ typedef struct cq_next_data {
typedef struct cq_pluck_data {
/** Completed events for completion-queues of type GRPC_CQ_PLUCK */
grpc_cq_completion completed_head;
- grpc_cq_completion *completed_tail;
+ grpc_cq_completion* completed_tail;
/** Number of pending events (+1 if we're not shutdown) */
gpr_atm pending_events;
@@ -267,13 +267,13 @@ struct grpc_completion_queue {
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
- gpr_mu *mu;
+ gpr_mu* mu;
- const cq_vtable *vtable;
- const cq_poller_vtable *poller_vtable;
+ const cq_vtable* vtable;
+ const cq_poller_vtable* poller_vtable;
#ifndef NDEBUG
- void **outstanding_tags;
+ void** outstanding_tags;
size_t outstanding_tag_count;
size_t outstanding_tag_capacity;
#endif
@@ -283,44 +283,44 @@ struct grpc_completion_queue {
};
/* Forward declarations */
-static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq);
-static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq);
-static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq);
-static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq);
-
-static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag);
-static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag);
-
-static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq, void *tag,
- grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx,
- void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage);
-
-static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq, void *tag,
- grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx,
- void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage);
-
-static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
- void *reserved);
-
-static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
- gpr_timespec deadline, void *reserved);
-
-static void cq_init_next(void *data);
-static void cq_init_pluck(void *data);
-static void cq_destroy_next(void *data);
-static void cq_destroy_pluck(void *data);
+static void cq_finish_shutdown_next(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq);
+static void cq_finish_shutdown_pluck(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq);
+static void cq_shutdown_next(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq);
+static void cq_shutdown_pluck(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq);
+
+static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag);
+static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag);
+
+static void cq_end_op_for_next(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq, void* tag,
+ grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx,
+ void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage);
+
+static void cq_end_op_for_pluck(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq, void* tag,
+ grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx,
+ void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage);
+
+static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
+ void* reserved);
+
+static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
+ gpr_timespec deadline, void* reserved);
+
+static void cq_init_next(void* data);
+static void cq_init_pluck(void* data);
+static void cq_destroy_next(void* data);
+static void cq_destroy_pluck(void* data);
/* Completion queue vtables based on the completion-type */
static const cq_vtable g_cq_vtable[] = {
@@ -333,9 +333,9 @@ static const cq_vtable g_cq_vtable[] = {
cq_pluck},
};
-#define DATA_FROM_CQ(cq) ((void *)(cq + 1))
+#define DATA_FROM_CQ(cq) ((void*)(cq + 1))
#define POLLSET_FROM_CQ(cq) \
- ((grpc_pollset *)(cq->vtable->data_size + (char *)DATA_FROM_CQ(cq)))
+ ((grpc_pollset*)(cq->vtable->data_size + (char*)DATA_FROM_CQ(cq)))
grpc_tracer_flag grpc_cq_pluck_trace =
GRPC_TRACER_INITIALIZER(true, "queue_pluck");
@@ -346,39 +346,39 @@ grpc_tracer_flag grpc_cq_event_timeout_trace =
if (GRPC_TRACER_ON(grpc_api_trace) && \
(GRPC_TRACER_ON(grpc_cq_pluck_trace) || \
(event)->type != GRPC_QUEUE_TIMEOUT)) { \
- char *_ev = grpc_event_string(event); \
+ char* _ev = grpc_event_string(event); \
gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \
gpr_free(_ev); \
}
-static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cq,
- grpc_error *error);
+static void on_pollset_shutdown_done(grpc_exec_ctx* exec_ctx, void* cq,
+ grpc_error* error);
void grpc_cq_global_init() {
gpr_tls_init(&g_cached_event);
gpr_tls_init(&g_cached_cq);
}
-void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue *cq) {
- if ((grpc_completion_queue *)gpr_tls_get(&g_cached_cq) == nullptr) {
+void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
+ if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == nullptr) {
gpr_tls_set(&g_cached_event, (intptr_t)0);
gpr_tls_set(&g_cached_cq, (intptr_t)cq);
}
}
-int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue *cq,
- void **tag, int *ok) {
- grpc_cq_completion *storage =
- (grpc_cq_completion *)gpr_tls_get(&g_cached_event);
+int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
+ void** tag, int* ok) {
+ grpc_cq_completion* storage =
+ (grpc_cq_completion*)gpr_tls_get(&g_cached_event);
int ret = 0;
if (storage != NULL &&
- (grpc_completion_queue *)gpr_tls_get(&g_cached_cq) == cq) {
+ (grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
*tag = storage->tag;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
storage->done(&exec_ctx, storage->done_arg, storage);
*ok = (storage->next & (uintptr_t)(1)) == 1;
ret = 1;
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
gpr_mu_lock(cq->mu);
@@ -394,30 +394,30 @@ int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue *cq,
return ret;
}
-static void cq_event_queue_init(grpc_cq_event_queue *q) {
+static void cq_event_queue_init(grpc_cq_event_queue* q) {
gpr_mpscq_init(&q->queue);
q->queue_lock = GPR_SPINLOCK_INITIALIZER;
gpr_atm_no_barrier_store(&q->num_queue_items, 0);
}
-static void cq_event_queue_destroy(grpc_cq_event_queue *q) {
+static void cq_event_queue_destroy(grpc_cq_event_queue* q) {
gpr_mpscq_destroy(&q->queue);
}
-static bool cq_event_queue_push(grpc_cq_event_queue *q, grpc_cq_completion *c) {
- gpr_mpscq_push(&q->queue, (gpr_mpscq_node *)c);
+static bool cq_event_queue_push(grpc_cq_event_queue* q, grpc_cq_completion* c) {
+ gpr_mpscq_push(&q->queue, (gpr_mpscq_node*)c);
return gpr_atm_no_barrier_fetch_add(&q->num_queue_items, 1) == 0;
}
-static grpc_cq_completion *cq_event_queue_pop(grpc_cq_event_queue *q) {
- grpc_cq_completion *c = NULL;
+static grpc_cq_completion* cq_event_queue_pop(grpc_cq_event_queue* q) {
+ grpc_cq_completion* c = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (gpr_spinlock_trylock(&q->queue_lock)) {
GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(&exec_ctx);
bool is_empty = false;
- c = (grpc_cq_completion *)gpr_mpscq_pop_and_check_end(&q->queue, &is_empty);
+ c = (grpc_cq_completion*)gpr_mpscq_pop_and_check_end(&q->queue, &is_empty);
gpr_spinlock_unlock(&q->queue_lock);
if (c == NULL && !is_empty) {
@@ -438,14 +438,14 @@ static grpc_cq_completion *cq_event_queue_pop(grpc_cq_event_queue *q) {
/* Note: The counter is not incremented/decremented atomically with push/pop.
* The count is only eventually consistent */
-static long cq_event_queue_num_items(grpc_cq_event_queue *q) {
+static long cq_event_queue_num_items(grpc_cq_event_queue* q) {
return (long)gpr_atm_no_barrier_load(&q->num_queue_items);
}
-grpc_completion_queue *grpc_completion_queue_create_internal(
+grpc_completion_queue* grpc_completion_queue_create_internal(
grpc_cq_completion_type completion_type,
grpc_cq_polling_type polling_type) {
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
GPR_TIMER_BEGIN("grpc_completion_queue_create_internal", 0);
@@ -454,17 +454,17 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
"polling_type=%d)",
2, (completion_type, polling_type));
- const cq_vtable *vtable = &g_cq_vtable[completion_type];
- const cq_poller_vtable *poller_vtable =
+ const cq_vtable* vtable = &g_cq_vtable[completion_type];
+ const cq_poller_vtable* poller_vtable =
&g_poller_vtable_by_poller_type[polling_type];
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
grpc_exec_ctx_finish(&exec_ctx);
- cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) +
- vtable->data_size +
- poller_vtable->size());
+ cq = (grpc_completion_queue*)gpr_zalloc(sizeof(grpc_completion_queue) +
+ vtable->data_size +
+ poller_vtable->size());
cq->vtable = vtable;
cq->poller_vtable = poller_vtable;
@@ -483,8 +483,8 @@ grpc_completion_queue *grpc_completion_queue_create_internal(
return cq;
}
-static void cq_init_next(void *ptr) {
- cq_next_data *cqd = (cq_next_data *)ptr;
+static void cq_init_next(void* ptr) {
+ cq_next_data* cqd = (cq_next_data*)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->shutdown_called = false;
@@ -492,14 +492,14 @@ static void cq_init_next(void *ptr) {
cq_event_queue_init(&cqd->queue);
}
-static void cq_destroy_next(void *ptr) {
- cq_next_data *cqd = (cq_next_data *)ptr;
+static void cq_destroy_next(void* ptr) {
+ cq_next_data* cqd = (cq_next_data*)ptr;
GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0);
cq_event_queue_destroy(&cqd->queue);
}
-static void cq_init_pluck(void *ptr) {
- cq_pluck_data *cqd = (cq_pluck_data *)ptr;
+static void cq_init_pluck(void* ptr) {
+ cq_pluck_data* cqd = (cq_pluck_data*)ptr;
/* Initial count is dropped by grpc_completion_queue_shutdown */
gpr_atm_no_barrier_store(&cqd->pending_events, 1);
cqd->completed_tail = &cqd->completed_head;
@@ -510,16 +510,16 @@ static void cq_init_pluck(void *ptr) {
gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0);
}
-static void cq_destroy_pluck(void *ptr) {
- cq_pluck_data *cqd = (cq_pluck_data *)ptr;
+static void cq_destroy_pluck(void* ptr) {
+ cq_pluck_data* cqd = (cq_pluck_data*)ptr;
GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head);
}
-grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue *cq) {
+grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) {
return cq->vtable->cq_completion_type;
}
-int grpc_get_cq_poll_num(grpc_completion_queue *cq) {
+int grpc_get_cq_poll_num(grpc_completion_queue* cq) {
int cur_num_polls;
gpr_mu_lock(cq->mu);
cur_num_polls = cq->num_polls;
@@ -528,8 +528,8 @@ int grpc_get_cq_poll_num(grpc_completion_queue *cq) {
}
#ifndef NDEBUG
-void grpc_cq_internal_ref(grpc_completion_queue *cq, const char *reason,
- const char *file, int line) {
+void grpc_cq_internal_ref(grpc_completion_queue* cq, const char* reason,
+ const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -537,20 +537,20 @@ void grpc_cq_internal_ref(grpc_completion_queue *cq, const char *reason,
reason);
}
#else
-void grpc_cq_internal_ref(grpc_completion_queue *cq) {
+void grpc_cq_internal_ref(grpc_completion_queue* cq) {
#endif
gpr_ref(&cq->owning_refs);
}
-static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_completion_queue *cq = (grpc_completion_queue *)arg;
+static void on_pollset_shutdown_done(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_completion_queue* cq = (grpc_completion_queue*)arg;
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy");
}
#ifndef NDEBUG
-void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq,
- const char *reason, const char *file, int line) {
+void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq,
+ const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -558,8 +558,8 @@ void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq,
reason);
}
#else
-void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq) {
+void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq) {
#endif
if (gpr_unref(&cq->owning_refs)) {
cq->vtable->destroy(DATA_FROM_CQ(cq));
@@ -572,7 +572,7 @@ void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx,
}
#ifndef NDEBUG
-static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {
+static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {
int found = 0;
if (lock_cq) {
gpr_mu_lock(cq->mu);
@@ -581,7 +581,7 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {
for (int i = 0; i < (int)cq->outstanding_tag_count; i++) {
if (cq->outstanding_tags[i] == tag) {
cq->outstanding_tag_count--;
- GPR_SWAP(void *, cq->outstanding_tags[i],
+ GPR_SWAP(void*, cq->outstanding_tags[i],
cq->outstanding_tags[cq->outstanding_tag_count]);
found = 1;
break;
@@ -595,12 +595,12 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {
GPR_ASSERT(found);
}
#else
-static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {}
+static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {}
#endif
/* Atomically increments a counter only if the counter is not zero. Returns
* true if the increment was successful; false if the counter is zero */
-static bool atm_inc_if_nonzero(gpr_atm *counter) {
+static bool atm_inc_if_nonzero(gpr_atm* counter) {
while (true) {
gpr_atm count = gpr_atm_acq_load(counter);
/* If zero, we are done. If not, we must to a CAS (instead of an atomic
@@ -616,22 +616,22 @@ static bool atm_inc_if_nonzero(gpr_atm *counter) {
return true;
}
-static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) {
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag) {
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
-static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) {
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag) {
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
return atm_inc_if_nonzero(&cqd->pending_events);
}
-bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) {
+bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) {
#ifndef NDEBUG
gpr_mu_lock(cq->mu);
if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) {
cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity);
- cq->outstanding_tags = (void **)gpr_realloc(
+ cq->outstanding_tags = (void**)gpr_realloc(
cq->outstanding_tags,
sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity);
}
@@ -644,19 +644,19 @@ bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) {
/* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
* completion
* type of GRPC_CQ_NEXT) */
-static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq, void *tag,
- grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx,
- void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage) {
+static void cq_end_op_for_next(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq, void* tag,
+ grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx,
+ void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage) {
GPR_TIMER_BEGIN("cq_end_op_for_next", 0);
if (GRPC_TRACER_ON(grpc_api_trace) ||
(GRPC_TRACER_ON(grpc_trace_operation_failures) &&
error != GRPC_ERROR_NONE)) {
- const char *errmsg = grpc_error_string(error);
+ const char* errmsg = grpc_error_string(error);
GRPC_API_TRACE(
"cq_end_op_for_next(exec_ctx=%p, cq=%p, tag=%p, error=%s, "
"done=%p, done_arg=%p, storage=%p)",
@@ -666,7 +666,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
}
}
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
storage->tag = tag;
@@ -676,8 +676,8 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
cq_check_tag(cq, tag, true); /* Used in debug builds only */
- if ((grpc_completion_queue *)gpr_tls_get(&g_cached_cq) == cq &&
- (grpc_cq_completion *)gpr_tls_get(&g_cached_event) == nullptr) {
+ if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq &&
+ (grpc_cq_completion*)gpr_tls_get(&g_cached_event) == nullptr) {
gpr_tls_set(&g_cached_event, (intptr_t)storage);
} else {
/* Add the completion to the queue */
@@ -695,12 +695,12 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
/* Only kick if this is the first item queued */
if (is_first) {
gpr_mu_lock(cq->mu);
- grpc_error *kick_error =
+ grpc_error* kick_error =
cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL);
gpr_mu_unlock(cq->mu);
if (kick_error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(kick_error);
+ const char* msg = grpc_error_string(kick_error);
gpr_log(GPR_ERROR, "Kick failed: %s", msg);
GRPC_ERROR_UNREF(kick_error);
}
@@ -730,14 +730,14 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx,
/* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
* completion
* type of GRPC_CQ_PLUCK) */
-static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq, void *tag,
- grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx,
- void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage) {
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static void cq_end_op_for_pluck(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq, void* tag,
+ grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx,
+ void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage) {
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
int is_success = (error == GRPC_ERROR_NONE);
GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0);
@@ -745,7 +745,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
if (GRPC_TRACER_ON(grpc_api_trace) ||
(GRPC_TRACER_ON(grpc_trace_operation_failures) &&
error != GRPC_ERROR_NONE)) {
- const char *errmsg = grpc_error_string(error);
+ const char* errmsg = grpc_error_string(error);
GRPC_API_TRACE(
"cq_end_op_for_pluck(exec_ctx=%p, cq=%p, tag=%p, error=%s, "
"done=%p, done_arg=%p, storage=%p)",
@@ -774,7 +774,7 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
cq_finish_shutdown_pluck(exec_ctx, cq);
gpr_mu_unlock(cq->mu);
} else {
- grpc_pollset_worker *pluck_worker = NULL;
+ grpc_pollset_worker* pluck_worker = NULL;
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag) {
pluck_worker = *cqd->pluckers[i].worker;
@@ -782,13 +782,13 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
}
}
- grpc_error *kick_error =
+ grpc_error* kick_error =
cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
gpr_mu_unlock(cq->mu);
if (kick_error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(kick_error);
+ const char* msg = grpc_error_string(kick_error);
gpr_log(GPR_ERROR, "Kick failed: %s", msg);
GRPC_ERROR_UNREF(kick_error);
@@ -800,27 +800,27 @@ static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq,
- void *tag, grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage) {
+void grpc_cq_end_op(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq,
+ void* tag, grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage) {
cq->vtable->end_op(exec_ctx, cq, tag, error, done, done_arg, storage);
}
typedef struct {
gpr_atm last_seen_things_queued_ever;
- grpc_completion_queue *cq;
+ grpc_completion_queue* cq;
grpc_millis deadline;
- grpc_cq_completion *stolen_completion;
- void *tag; /* for pluck */
+ grpc_cq_completion* stolen_completion;
+ void* tag; /* for pluck */
bool first_loop;
} cq_is_finished_arg;
-static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
- grpc_completion_queue *cq = a->cq;
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+static bool cq_is_next_finished(grpc_exec_ctx* exec_ctx, void* arg) {
+ cq_is_finished_arg* a = (cq_is_finished_arg*)arg;
+ grpc_completion_queue* cq = a->cq;
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -844,7 +844,7 @@ static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) {
}
#ifndef NDEBUG
-static void dump_pending_tags(grpc_completion_queue *cq) {
+static void dump_pending_tags(grpc_completion_queue* cq) {
if (!GRPC_TRACER_ON(grpc_trace_pending_tags)) return;
gpr_strvec v;
@@ -852,24 +852,24 @@ static void dump_pending_tags(grpc_completion_queue *cq) {
gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:"));
gpr_mu_lock(cq->mu);
for (size_t i = 0; i < cq->outstanding_tag_count; i++) {
- char *s;
+ char* s;
gpr_asprintf(&s, " %p", cq->outstanding_tags[i]);
gpr_strvec_add(&v, s);
}
gpr_mu_unlock(cq->mu);
- char *out = gpr_strvec_flatten(&v, NULL);
+ char* out = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
gpr_log(GPR_DEBUG, "%s", out);
gpr_free(out);
}
#else
-static void dump_pending_tags(grpc_completion_queue *cq) {}
+static void dump_pending_tags(grpc_completion_queue* cq) {}
#endif
-static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
- void *reserved) {
+static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
+ void* reserved) {
grpc_event ret;
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
@@ -879,8 +879,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
- 5, (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
- reserved));
+ 5,
+ (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
+ reserved));
GPR_ASSERT(!reserved);
dump_pending_tags(cq);
@@ -901,7 +902,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
grpc_millis iteration_deadline = deadline_millis;
if (is_finished_arg.stolen_completion != NULL) {
- grpc_cq_completion *c = is_finished_arg.stolen_completion;
+ grpc_cq_completion* c = is_finished_arg.stolen_completion;
is_finished_arg.stolen_completion = NULL;
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
@@ -910,7 +911,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
break;
}
- grpc_cq_completion *c = cq_event_queue_pop(&cqd->queue);
+ grpc_cq_completion* c = cq_event_queue_pop(&cqd->queue);
if (c != NULL) {
ret.type = GRPC_OP_COMPLETE;
@@ -957,12 +958,12 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
/* The main polling work happens in grpc_pollset_work */
gpr_mu_lock(cq->mu);
cq->num_polls++;
- grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
+ grpc_error* err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
NULL, iteration_deadline);
gpr_mu_unlock(cq->mu);
if (err != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(err);
+ const char* msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg);
GRPC_ERROR_UNREF(err);
@@ -997,9 +998,9 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
- Must be called only once in completion queue's lifetime
- grpc_completion_queue_shutdown() MUST have been called before calling
this function */
-static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq) {
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+static void cq_finish_shutdown_next(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq) {
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
@@ -1008,9 +1009,9 @@ static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx,
&cq->pollset_shutdown_done);
}
-static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq) {
- cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq);
+static void cq_shutdown_next(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq) {
+ cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_next() below, that would call pollset shutdown.
@@ -1036,14 +1037,14 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx,
GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
}
-grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
- gpr_timespec deadline, void *reserved) {
+grpc_event grpc_completion_queue_next(grpc_completion_queue* cq,
+ gpr_timespec deadline, void* reserved) {
return cq->vtable->next(cq, deadline, reserved);
}
-static int add_plucker(grpc_completion_queue *cq, void *tag,
- grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static int add_plucker(grpc_completion_queue* cq, void* tag,
+ grpc_pollset_worker** worker) {
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
@@ -1053,9 +1054,9 @@ static int add_plucker(grpc_completion_queue *cq, void *tag,
return 1;
}
-static void del_plucker(grpc_completion_queue *cq, void *tag,
- grpc_pollset_worker **worker) {
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static void del_plucker(grpc_completion_queue* cq, void* tag,
+ grpc_pollset_worker** worker) {
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
for (int i = 0; i < cqd->num_pluckers; i++) {
if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
cqd->num_pluckers--;
@@ -1066,10 +1067,10 @@ static void del_plucker(grpc_completion_queue *cq, void *tag,
GPR_UNREACHABLE_CODE(return );
}
-static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
- cq_is_finished_arg *a = (cq_is_finished_arg *)arg;
- grpc_completion_queue *cq = a->cq;
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static bool cq_is_pluck_finished(grpc_exec_ctx* exec_ctx, void* arg) {
+ cq_is_finished_arg* a = (cq_is_finished_arg*)arg;
+ grpc_completion_queue* cq = a->cq;
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
GPR_ASSERT(a->stolen_completion == NULL);
gpr_atm current_last_seen_things_queued_ever =
@@ -1078,9 +1079,9 @@ static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
gpr_mu_lock(cq->mu);
a->last_seen_things_queued_ever =
gpr_atm_no_barrier_load(&cqd->things_queued_ever);
- grpc_cq_completion *c;
- grpc_cq_completion *prev = &cqd->completed_head;
- while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
+ grpc_cq_completion* c;
+ grpc_cq_completion* prev = &cqd->completed_head;
+ while ((c = (grpc_cq_completion*)(prev->next & ~(uintptr_t)1)) !=
&cqd->completed_head) {
if (c->tag == a->tag) {
prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
@@ -1098,13 +1099,13 @@ static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) {
return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
}
-static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
- gpr_timespec deadline, void *reserved) {
+static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
+ gpr_timespec deadline, void* reserved) {
grpc_event ret;
- grpc_cq_completion *c;
- grpc_cq_completion *prev;
- grpc_pollset_worker *worker = NULL;
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+ grpc_cq_completion* c;
+ grpc_cq_completion* prev;
+ grpc_pollset_worker* worker = NULL;
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
@@ -1115,8 +1116,9 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
- 6, (cq, tag, deadline.tv_sec, deadline.tv_nsec,
- (int)deadline.clock_type, reserved));
+ 6,
+ (cq, tag, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
+ reserved));
}
GPR_ASSERT(!reserved);
@@ -1146,7 +1148,7 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
break;
}
prev = &cqd->completed_head;
- while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
+ while ((c = (grpc_cq_completion*)(prev->next & ~(uintptr_t)1)) !=
&cqd->completed_head) {
if (c->tag == tag) {
prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
@@ -1190,12 +1192,12 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
break;
}
cq->num_polls++;
- grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
+ grpc_error* err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
&worker, deadline_millis);
if (err != GRPC_ERROR_NONE) {
del_plucker(cq, tag, &worker);
gpr_mu_unlock(cq->mu);
- const char *msg = grpc_error_string(err);
+ const char* msg = grpc_error_string(err);
gpr_log(GPR_ERROR, "Completion queue pluck failed: %s", msg);
GRPC_ERROR_UNREF(err);
@@ -1218,14 +1220,14 @@ done:
return ret;
}
-grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
- gpr_timespec deadline, void *reserved) {
+grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
+ gpr_timespec deadline, void* reserved) {
return cq->vtable->pluck(cq, tag, deadline, reserved);
}
-static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq) {
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static void cq_finish_shutdown_pluck(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq) {
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
GPR_ASSERT(cqd->shutdown_called);
GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
@@ -1237,9 +1239,9 @@ static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx,
/* NOTE: This function is almost exactly identical to cq_shutdown_next() but
* merging them is a bit tricky and probably not worth it */
-static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
- grpc_completion_queue *cq) {
- cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq);
+static void cq_shutdown_pluck(grpc_exec_ctx* exec_ctx,
+ grpc_completion_queue* cq) {
+ cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
@@ -1264,7 +1266,7 @@ static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx,
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
to zero here, then enter shutdown mode and wake up any waiters */
-void grpc_completion_queue_shutdown(grpc_completion_queue *cq) {
+void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq));
@@ -1273,7 +1275,7 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cq) {
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
}
-void grpc_completion_queue_destroy(grpc_completion_queue *cq) {
+void grpc_completion_queue_destroy(grpc_completion_queue* cq) {
GRPC_API_TRACE("grpc_completion_queue_destroy(cq=%p)", 1, (cq));
GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0);
grpc_completion_queue_shutdown(cq);
@@ -1284,10 +1286,10 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cq) {
GPR_TIMER_END("grpc_completion_queue_destroy", 0);
}
-grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cq) {
+grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cq) {
return cq->poller_vtable->can_get_pollset ? POLLSET_FROM_CQ(cq) : NULL;
}
-bool grpc_cq_can_listen(grpc_completion_queue *cq) {
+bool grpc_cq_can_listen(grpc_completion_queue* cq) {
return cq->poller_vtable->can_listen;
}
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index c02bc5da07..0ed9875f58 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -44,28 +44,28 @@ typedef struct grpc_cq_completion {
gpr_mpscq_node node;
/** user supplied tag */
- void *tag;
+ void* tag;
/** done callback - called when this queue element is no longer
needed by the completion queue */
- void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
- struct grpc_cq_completion *c);
- void *done_arg;
+ void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
+ struct grpc_cq_completion* c);
+ void* done_arg;
/** next pointer; low bit is used to indicate success or not */
uintptr_t next;
} grpc_cq_completion;
#ifndef NDEBUG
-void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason,
- const char *file, int line);
-void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
- const char *reason, const char *file, int line);
+void grpc_cq_internal_ref(grpc_completion_queue* cc, const char* reason,
+ const char* file, int line);
+void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cc,
+ const char* reason, const char* file, int line);
#define GRPC_CQ_INTERNAL_REF(cc, reason) \
grpc_cq_internal_ref(cc, reason, __FILE__, __LINE__)
#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) \
grpc_cq_internal_unref(ec, cc, reason, __FILE__, __LINE__)
#else
-void grpc_cq_internal_ref(grpc_completion_queue *cc);
-void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc);
+void grpc_cq_internal_ref(grpc_completion_queue* cc);
+void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cc);
#define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc)
#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) grpc_cq_internal_unref(ec, cc)
#endif
@@ -77,25 +77,25 @@ void grpc_cq_global_init();
shutdown until a corrensponding grpc_cq_end_* call is made.
\a tag is currently used only in debug builds. Return true on success, and
false if completion_queue has been shutdown. */
-bool grpc_cq_begin_op(grpc_completion_queue *cc, void *tag);
+bool grpc_cq_begin_op(grpc_completion_queue* cc, void* tag);
/* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to
grpc_cq_begin_op */
-void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
- void *tag, grpc_error *error,
- void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg,
- grpc_cq_completion *storage),
- void *done_arg, grpc_cq_completion *storage);
+void grpc_cq_end_op(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cc,
+ void* tag, grpc_error* error,
+ void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
+ grpc_cq_completion* storage),
+ void* done_arg, grpc_cq_completion* storage);
-grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
+grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cc);
-bool grpc_cq_can_listen(grpc_completion_queue *cc);
+bool grpc_cq_can_listen(grpc_completion_queue* cc);
-grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue *cc);
+grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cc);
-int grpc_get_cq_poll_num(grpc_completion_queue *cc);
+int grpc_get_cq_poll_num(grpc_completion_queue* cc);
-grpc_completion_queue *grpc_completion_queue_create_internal(
+grpc_completion_queue* grpc_completion_queue_create_internal(
grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type);
#ifdef __cplusplus
diff --git a/src/core/lib/surface/event_string.cc b/src/core/lib/surface/event_string.cc
index f236272e2a..b92ee6ad07 100644
--- a/src/core/lib/surface/event_string.cc
+++ b/src/core/lib/surface/event_string.cc
@@ -24,22 +24,22 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/support/string.h"
-static void addhdr(gpr_strvec *buf, grpc_event *ev) {
- char *tmp;
+static void addhdr(gpr_strvec* buf, grpc_event* ev) {
+ char* tmp;
gpr_asprintf(&tmp, "tag:%p", ev->tag);
gpr_strvec_add(buf, tmp);
}
-static const char *errstr(int success) { return success ? "OK" : "ERROR"; }
+static const char* errstr(int success) { return success ? "OK" : "ERROR"; }
-static void adderr(gpr_strvec *buf, int success) {
- char *tmp;
+static void adderr(gpr_strvec* buf, int success) {
+ char* tmp;
gpr_asprintf(&tmp, " %s", errstr(success));
gpr_strvec_add(buf, tmp);
}
-char *grpc_event_string(grpc_event *ev) {
- char *out;
+char* grpc_event_string(grpc_event* ev) {
+ char* out;
gpr_strvec buf;
if (ev == NULL) return gpr_strdup("null");
diff --git a/src/core/lib/surface/event_string.h b/src/core/lib/surface/event_string.h
index 2d53cf0fac..4bdb11f35e 100644
--- a/src/core/lib/surface/event_string.h
+++ b/src/core/lib/surface/event_string.h
@@ -26,7 +26,7 @@ extern "C" {
#endif
/* Returns a string describing an event. Must be later freed with gpr_free() */
-char *grpc_event_string(grpc_event *ev);
+char* grpc_event_string(grpc_event* ev);
#ifdef __cplusplus
}
diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc
index 058e88f804..233bd7a6b8 100644
--- a/src/core/lib/surface/init.cc
+++ b/src/core/lib/surface/init.cc
@@ -68,16 +68,16 @@ static void do_basic_init(void) {
g_initializations = 0;
}
-static bool append_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder, void *arg) {
+static bool append_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_append_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
-static bool prepend_filter(grpc_exec_ctx *exec_ctx,
- grpc_channel_stack_builder *builder, void *arg) {
+static bool prepend_filter(grpc_exec_ctx* exec_ctx,
+ grpc_channel_stack_builder* builder, void* arg) {
return grpc_channel_stack_builder_prepend_filter(
- builder, (const grpc_channel_filter *)arg, NULL, NULL);
+ builder, (const grpc_channel_filter*)arg, NULL, NULL);
}
static void register_builtin_channel_init() {
@@ -92,9 +92,9 @@ static void register_builtin_channel_init() {
grpc_add_connected_filter, NULL);
grpc_channel_init_register_stage(GRPC_CLIENT_LAME_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
- append_filter, (void *)&grpc_lame_filter);
+ append_filter, (void*)&grpc_lame_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
- (void *)&grpc_server_top_filter);
+ (void*)&grpc_server_top_filter);
}
typedef struct grpc_plugin {
@@ -107,7 +107,7 @@ static int g_number_of_plugins = 0;
void grpc_register_plugin(void (*init)(void), void (*destroy)(void)) {
GRPC_API_TRACE("grpc_register_plugin(init=%p, destroy=%p)", 2,
- ((void *)(intptr_t)init, (void *)(intptr_t)destroy));
+ ((void*)(intptr_t)init, (void*)(intptr_t)destroy));
GPR_ASSERT(g_number_of_plugins != MAX_PLUGINS);
g_all_of_the_plugins[g_number_of_plugins].init = init;
g_all_of_the_plugins[g_number_of_plugins].destroy = destroy;
diff --git a/src/core/lib/surface/init_secure.cc b/src/core/lib/surface/init_secure.cc
index 8fbde3d1b4..bcb91d7df8 100644
--- a/src/core/lib/surface/init_secure.cc
+++ b/src/core/lib/surface/init_secure.cc
@@ -47,8 +47,8 @@ void grpc_security_pre_init(void) {
}
static bool maybe_prepend_client_auth_filter(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
- const grpc_channel_args *args =
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (args) {
for (size_t i = 0; i < args->num_args; i++) {
@@ -62,8 +62,8 @@ static bool maybe_prepend_client_auth_filter(
}
static bool maybe_prepend_server_auth_filter(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
- const grpc_channel_args *args =
+ grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+ const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (args) {
for (size_t i = 0; i < args->num_args; i++) {
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index 88e26cbeb7..add7be2e08 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -40,7 +40,7 @@ namespace grpc_core {
namespace {
struct CallData {
- grpc_call_combiner *call_combiner;
+ grpc_call_combiner* call_combiner;
grpc_linked_mdelem status;
grpc_linked_mdelem details;
grpc_core::atomic<bool> filled_metadata;
@@ -48,19 +48,19 @@ struct CallData {
struct ChannelData {
grpc_status_code error_code;
- const char *error_message;
+ const char* error_message;
};
-static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_metadata_batch *mdb) {
- CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+static void fill_metadata(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_metadata_batch* mdb) {
+ CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) {
return;
}
- ChannelData *chand = reinterpret_cast<ChannelData *>(elem->channel_data);
+ ChannelData* chand = reinterpret_cast<ChannelData*>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
@@ -78,9 +78,9 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
static void lame_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
if (op->recv_initial_metadata) {
fill_metadata(exec_ctx, elem,
op->payload->recv_initial_metadata.recv_initial_metadata);
@@ -93,13 +93,13 @@ static void lame_start_transport_stream_op_batch(
calld->call_combiner);
}
-static void lame_get_channel_info(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- const grpc_channel_info *channel_info) {}
+static void lame_get_channel_info(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ const grpc_channel_info* channel_info) {}
-static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_transport_op *op) {
+static void lame_start_transport_op(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_transport_op* op) {
if (op->on_connectivity_state_change) {
GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
*op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
@@ -117,30 +117,30 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
}
}
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- CallData *calld = reinterpret_cast<CallData *>(elem->call_data);
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *then_schedule_closure) {
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* then_schedule_closure) {
GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
GPR_ASSERT(args->is_first);
GPR_ASSERT(args->is_last);
return GRPC_ERROR_NONE;
}
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {}
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {}
} // namespace
@@ -160,14 +160,14 @@ extern "C" const grpc_channel_filter grpc_lame_filter = {
"lame-client",
};
-#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
+#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1))
-grpc_channel *grpc_lame_client_channel_create(const char *target,
+grpc_channel* grpc_lame_client_channel_create(const char* target,
grpc_status_code error_code,
- const char *error_message) {
+ const char* error_message) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_channel_element *elem;
- grpc_channel *channel = grpc_channel_create(&exec_ctx, target, NULL,
+ grpc_channel_element* elem;
+ grpc_channel* channel = grpc_channel_create(&exec_ctx, target, NULL,
GRPC_CLIENT_LAME_CHANNEL, NULL);
elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
GRPC_API_TRACE(
@@ -175,7 +175,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target,
"error_message=%s)",
3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter);
- auto chand = reinterpret_cast<grpc_core::ChannelData *>(elem->channel_data);
+ auto chand = reinterpret_cast<grpc_core::ChannelData*>(elem->channel_data);
chand->error_code = error_code;
chand->error_message = error_message;
grpc_exec_ctx_finish(&exec_ctx);
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index dd09cb91de..eb7a4e2d30 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -44,12 +44,12 @@
#include "src/core/lib/transport/static_metadata.h"
typedef struct listener {
- void *arg;
- void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- grpc_pollset **pollsets, size_t pollset_count);
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- grpc_closure *closure);
- struct listener *next;
+ void* arg;
+ void (*start)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ grpc_pollset** pollsets, size_t pollset_count);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ grpc_closure* closure);
+ struct listener* next;
grpc_closure destroy_done;
} listener;
@@ -65,26 +65,26 @@ grpc_tracer_flag grpc_server_channel_trace =
typedef struct requested_call {
requested_call_type type;
size_t cq_idx;
- void *tag;
- grpc_server *server;
- grpc_completion_queue *cq_bound_to_call;
- grpc_call **call;
+ void* tag;
+ grpc_server* server;
+ grpc_completion_queue* cq_bound_to_call;
+ grpc_call** call;
grpc_cq_completion completion;
- grpc_metadata_array *initial_metadata;
+ grpc_metadata_array* initial_metadata;
union {
struct {
- grpc_call_details *details;
+ grpc_call_details* details;
} batch;
struct {
- registered_method *method;
- gpr_timespec *deadline;
- grpc_byte_buffer **optional_payload;
+ registered_method* method;
+ gpr_timespec* deadline;
+ grpc_byte_buffer** optional_payload;
} registered;
} data;
} requested_call;
typedef struct channel_registered_method {
- registered_method *server_registered_method;
+ registered_method* server_registered_method;
uint32_t flags;
bool has_host;
grpc_slice method;
@@ -92,14 +92,14 @@ typedef struct channel_registered_method {
} channel_registered_method;
struct channel_data {
- grpc_server *server;
+ grpc_server* server;
grpc_connectivity_state connectivity_state;
- grpc_channel *channel;
+ grpc_channel* channel;
size_t cq_idx;
/* linked list of all channels on a server */
- channel_data *next;
- channel_data *prev;
- channel_registered_method *registered_methods;
+ channel_data* next;
+ channel_data* prev;
+ channel_registered_method* registered_methods;
uint32_t registered_method_slots;
uint32_t registered_method_max_probes;
grpc_closure finish_destroy_channel_closure;
@@ -107,8 +107,8 @@ struct channel_data {
};
typedef struct shutdown_tag {
- void *tag;
- grpc_completion_queue *cq;
+ void* tag;
+ grpc_completion_queue* cq;
grpc_cq_completion completion;
} shutdown_tag;
@@ -126,7 +126,7 @@ typedef enum {
typedef struct request_matcher request_matcher;
struct call_data {
- grpc_call *call;
+ grpc_call* call;
/** protects state */
gpr_mu mu_state;
@@ -139,52 +139,52 @@ struct call_data {
grpc_slice host;
grpc_millis deadline;
- grpc_completion_queue *cq_new;
+ grpc_completion_queue* cq_new;
- grpc_metadata_batch *recv_initial_metadata;
+ grpc_metadata_batch* recv_initial_metadata;
uint32_t recv_initial_metadata_flags;
grpc_metadata_array initial_metadata;
- request_matcher *matcher;
- grpc_byte_buffer *payload;
+ request_matcher* matcher;
+ grpc_byte_buffer* payload;
grpc_closure got_initial_metadata;
grpc_closure server_on_recv_initial_metadata;
grpc_closure kill_zombie_closure;
- grpc_closure *on_done_recv_initial_metadata;
+ grpc_closure* on_done_recv_initial_metadata;
grpc_closure publish;
- call_data *pending_next;
+ call_data* pending_next;
};
struct request_matcher {
- grpc_server *server;
- call_data *pending_head;
- call_data *pending_tail;
- gpr_stack_lockfree **requests_per_cq;
+ grpc_server* server;
+ call_data* pending_head;
+ call_data* pending_tail;
+ gpr_stack_lockfree** requests_per_cq;
};
struct registered_method {
- char *method;
- char *host;
+ char* method;
+ char* host;
grpc_server_register_method_payload_handling payload_handling;
uint32_t flags;
/* one request matcher per method */
request_matcher matcher;
- registered_method *next;
+ registered_method* next;
};
typedef struct {
- grpc_channel **channels;
+ grpc_channel** channels;
size_t num_channels;
} channel_broadcaster;
struct grpc_server {
- grpc_channel_args *channel_args;
+ grpc_channel_args* channel_args;
- grpc_completion_queue **cqs;
- grpc_pollset **pollsets;
+ grpc_completion_queue** cqs;
+ grpc_pollset** pollsets;
size_t cq_count;
size_t pollset_count;
bool started;
@@ -204,23 +204,23 @@ struct grpc_server {
bool starting;
gpr_cv starting_cv;
- registered_method *registered_methods;
+ registered_method* registered_methods;
/** one request matcher for unregistered methods */
request_matcher unregistered_request_matcher;
/** free list of available requested_calls_per_cq indices */
- gpr_stack_lockfree **request_freelist_per_cq;
+ gpr_stack_lockfree** request_freelist_per_cq;
/** requested call backing data */
- requested_call **requested_calls_per_cq;
+ requested_call** requested_calls_per_cq;
int max_requested_calls_per_cq;
gpr_atm shutdown_flag;
uint8_t shutdown_published;
size_t num_shutdown_tags;
- shutdown_tag *shutdown_tags;
+ shutdown_tag* shutdown_tags;
channel_data root_channel_data;
- listener *listeners;
+ listener* listeners;
int listeners_destroyed;
gpr_refcount internal_refcount;
@@ -229,30 +229,30 @@ struct grpc_server {
};
#define SERVER_FROM_CALL_ELEM(elem) \
- (((channel_data *)(elem)->channel_data)->server)
+ (((channel_data*)(elem)->channel_data)->server)
-static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *calld,
- grpc_error *error);
-static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
- size_t cq_idx, requested_call *rc, grpc_error *error);
+static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* calld,
+ grpc_error* error);
+static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
+ size_t cq_idx, requested_call* rc, grpc_error* error);
/* Before calling maybe_finish_shutdown, we must hold mu_global and not
hold mu_call */
-static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_server *server);
+static void maybe_finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_server* server);
/*
* channel broadcaster
*/
/* assumes server locked */
-static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
- channel_data *c;
+static void channel_broadcaster_init(grpc_server* s, channel_broadcaster* cb) {
+ channel_data* c;
size_t count = 0;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
count++;
}
cb->num_channels = count;
cb->channels =
- (grpc_channel **)gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
+ (grpc_channel**)gpr_malloc(sizeof(*cb->channels) * cb->num_channels);
count = 0;
for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
cb->channels[count++] = c->channel;
@@ -265,21 +265,21 @@ struct shutdown_cleanup_args {
grpc_slice slice;
};
-static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- struct shutdown_cleanup_args *a = (struct shutdown_cleanup_args *)arg;
+static void shutdown_cleanup(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ struct shutdown_cleanup_args* a = (struct shutdown_cleanup_args*)arg;
grpc_slice_unref_internal(exec_ctx, a->slice);
gpr_free(a);
}
-static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
- bool send_goaway, grpc_error *send_disconnect) {
- struct shutdown_cleanup_args *sc =
- (struct shutdown_cleanup_args *)gpr_malloc(sizeof(*sc));
+static void send_shutdown(grpc_exec_ctx* exec_ctx, grpc_channel* channel,
+ bool send_goaway, grpc_error* send_disconnect) {
+ struct shutdown_cleanup_args* sc =
+ (struct shutdown_cleanup_args*)gpr_malloc(sizeof(*sc));
GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
grpc_schedule_on_exec_ctx);
- grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
- grpc_channel_element *elem;
+ grpc_transport_op* op = grpc_make_transport_op(&sc->closure);
+ grpc_channel_element* elem;
op->goaway_error =
send_goaway ? grpc_error_set_int(
@@ -294,10 +294,10 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
elem->filter->start_transport_op(exec_ctx, elem, op);
}
-static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx,
- channel_broadcaster *cb,
+static void channel_broadcaster_shutdown(grpc_exec_ctx* exec_ctx,
+ channel_broadcaster* cb,
bool send_goaway,
- grpc_error *force_disconnect) {
+ grpc_error* force_disconnect) {
size_t i;
for (i = 0; i < cb->num_channels; i++) {
@@ -313,18 +313,18 @@ static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx,
* request_matcher
*/
-static void request_matcher_init(request_matcher *rm, size_t entries,
- grpc_server *server) {
+static void request_matcher_init(request_matcher* rm, size_t entries,
+ grpc_server* server) {
memset(rm, 0, sizeof(*rm));
rm->server = server;
- rm->requests_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ rm->requests_per_cq = (gpr_stack_lockfree**)gpr_malloc(
sizeof(*rm->requests_per_cq) * server->cq_count);
for (size_t i = 0; i < server->cq_count; i++) {
rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries);
}
}
-static void request_matcher_destroy(request_matcher *rm) {
+static void request_matcher_destroy(request_matcher* rm) {
for (size_t i = 0; i < rm->server->cq_count; i++) {
GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests_per_cq[i]) == -1);
gpr_stack_lockfree_destroy(rm->requests_per_cq[i]);
@@ -332,15 +332,15 @@ static void request_matcher_destroy(request_matcher *rm) {
gpr_free(rm->requests_per_cq);
}
-static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem,
- grpc_error *error) {
- grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem));
+static void kill_zombie(grpc_exec_ctx* exec_ctx, void* elem,
+ grpc_error* error) {
+ grpc_call_unref(grpc_call_from_top_element((grpc_call_element*)elem));
}
-static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
- request_matcher *rm) {
+static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx* exec_ctx,
+ request_matcher* rm) {
while (rm->pending_head) {
- call_data *calld = rm->pending_head;
+ call_data* calld = rm->pending_head;
rm->pending_head = calld->pending_next;
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
@@ -353,10 +353,10 @@ static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx,
}
}
-static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
- grpc_server *server,
- request_matcher *rm,
- grpc_error *error) {
+static void request_matcher_kill_requests(grpc_exec_ctx* exec_ctx,
+ grpc_server* server,
+ request_matcher* rm,
+ grpc_error* error) {
int request_id;
for (size_t i = 0; i < server->cq_count; i++) {
while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) !=
@@ -373,12 +373,12 @@ static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx,
* server proper
*/
-static void server_ref(grpc_server *server) {
+static void server_ref(grpc_server* server) {
gpr_ref(&server->internal_refcount);
}
-static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
- registered_method *rm;
+static void server_delete(grpc_exec_ctx* exec_ctx, grpc_server* server) {
+ registered_method* rm;
size_t i;
grpc_channel_args_destroy(exec_ctx, server->channel_args);
gpr_mu_destroy(&server->mu_global);
@@ -411,32 +411,32 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) {
gpr_free(server);
}
-static void server_unref(grpc_exec_ctx *exec_ctx, grpc_server *server) {
+static void server_unref(grpc_exec_ctx* exec_ctx, grpc_server* server) {
if (gpr_unref(&server->internal_refcount)) {
server_delete(exec_ctx, server);
}
}
-static int is_channel_orphaned(channel_data *chand) {
+static int is_channel_orphaned(channel_data* chand) {
return chand->next == chand;
}
-static void orphan_channel(channel_data *chand) {
+static void orphan_channel(channel_data* chand) {
chand->next->prev = chand->prev;
chand->prev->next = chand->next;
chand->next = chand->prev = chand;
}
-static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd,
- grpc_error *error) {
- channel_data *chand = (channel_data *)cd;
- grpc_server *server = chand->server;
+static void finish_destroy_channel(grpc_exec_ctx* exec_ctx, void* cd,
+ grpc_error* error) {
+ channel_data* chand = (channel_data*)cd;
+ grpc_server* server = chand->server;
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
server_unref(exec_ctx, server);
}
-static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
- grpc_error *error) {
+static void destroy_channel(grpc_exec_ctx* exec_ctx, channel_data* chand,
+ grpc_error* error) {
if (is_channel_orphaned(chand)) return;
GPR_ASSERT(chand->server != NULL);
orphan_channel(chand);
@@ -446,12 +446,12 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
if (GRPC_TRACER_ON(grpc_server_channel_trace) && error != GRPC_ERROR_NONE) {
- const char *msg = grpc_error_string(error);
+ const char* msg = grpc_error_string(error);
gpr_log(GPR_INFO, "Disconnected client: %s", msg);
}
GRPC_ERROR_UNREF(error);
- grpc_transport_op *op =
+ grpc_transport_op* op =
grpc_make_transport_op(&chand->finish_destroy_channel_closure);
op->set_accept_stream = true;
grpc_channel_next_op(exec_ctx,
@@ -460,10 +460,10 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand,
op);
}
-static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
- grpc_cq_completion *c) {
- requested_call *rc = (requested_call *)req;
- grpc_server *server = rc->server;
+static void done_request_event(grpc_exec_ctx* exec_ctx, void* req,
+ grpc_cq_completion* c) {
+ requested_call* rc = (requested_call*)req;
+ grpc_server* server = rc->server;
if (rc >= server->requested_calls_per_cq[rc->cq_idx] &&
rc < server->requested_calls_per_cq[rc->cq_idx] +
@@ -479,10 +479,10 @@ static void done_request_event(grpc_exec_ctx *exec_ctx, void *req,
server_unref(exec_ctx, server);
}
-static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
- call_data *calld, size_t cq_idx, requested_call *rc) {
+static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
+ call_data* calld, size_t cq_idx, requested_call* rc) {
grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
- grpc_call *call = calld->call;
+ grpc_call* call = calld->call;
*rc->call = call;
calld->cq_new = server->cqs[cq_idx];
GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
@@ -508,21 +508,21 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
GPR_UNREACHABLE_CODE(return );
}
- grpc_call_element *elem =
+ grpc_call_element* elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
- channel_data *chand = (channel_data *)elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
server_ref(chand->server);
grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
done_request_event, rc, &rc->completion);
}
-static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- grpc_call_element *call_elem = (grpc_call_element *)arg;
- call_data *calld = (call_data *)call_elem->call_data;
- channel_data *chand = (channel_data *)call_elem->channel_data;
- request_matcher *rm = calld->matcher;
- grpc_server *server = rm->server;
+static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ grpc_call_element* call_elem = (grpc_call_element*)arg;
+ call_data* calld = (call_data*)call_elem->call_data;
+ channel_data* chand = (channel_data*)call_elem->channel_data;
+ request_matcher* rm = calld->matcher;
+ grpc_server* server = rm->server;
if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_lock(&calld->mu_state);
@@ -570,10 +570,10 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
static void finish_start_new_rpc(
- grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem,
- request_matcher *rm,
+ grpc_exec_ctx* exec_ctx, grpc_server* server, grpc_call_element* elem,
+ request_matcher* rm,
grpc_server_register_method_payload_handling payload_handling) {
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_lock(&calld->mu_state);
@@ -605,13 +605,13 @@ static void finish_start_new_rpc(
}
}
-static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
- grpc_server *server = chand->server;
+static void start_new_rpc(grpc_exec_ctx* exec_ctx, grpc_call_element* elem) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
+ grpc_server* server = chand->server;
uint32_t i;
uint32_t hash;
- channel_registered_method *rm;
+ channel_registered_method* rm;
if (chand->registered_methods && calld->path_set && calld->host_set) {
/* TODO(ctiller): unify these two searches */
@@ -659,8 +659,8 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
GRPC_SRM_PAYLOAD_NONE);
}
-static int num_listeners(grpc_server *server) {
- listener *l;
+static int num_listeners(grpc_server* server) {
+ listener* l;
int n = 0;
for (l = server->listeners; l; l = l->next) {
n++;
@@ -668,13 +668,13 @@ static int num_listeners(grpc_server *server) {
return n;
}
-static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server,
- grpc_cq_completion *completion) {
- server_unref(exec_ctx, (grpc_server *)server);
+static void done_shutdown_event(grpc_exec_ctx* exec_ctx, void* server,
+ grpc_cq_completion* completion) {
+ server_unref(exec_ctx, (grpc_server*)server);
}
-static int num_channels(grpc_server *server) {
- channel_data *chand;
+static int num_channels(grpc_server* server) {
+ channel_data* chand;
int n = 0;
for (chand = server->root_channel_data.next;
chand != &server->root_channel_data; chand = chand->next) {
@@ -683,15 +683,15 @@ static int num_channels(grpc_server *server) {
return n;
}
-static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
- grpc_server *server, grpc_error *error) {
+static void kill_pending_work_locked(grpc_exec_ctx* exec_ctx,
+ grpc_server* server, grpc_error* error) {
if (server->started) {
request_matcher_kill_requests(exec_ctx, server,
&server->unregistered_request_matcher,
GRPC_ERROR_REF(error));
request_matcher_zombify_all_pending_calls(
exec_ctx, &server->unregistered_request_matcher);
- for (registered_method *rm = server->registered_methods; rm;
+ for (registered_method* rm = server->registered_methods; rm;
rm = rm->next) {
request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
GRPC_ERROR_REF(error));
@@ -701,8 +701,8 @@ static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(error);
}
-static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_server *server) {
+static void maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_server* server) {
size_t i;
if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
return;
@@ -737,10 +737,10 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
}
}
-static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)ptr;
- call_data *calld = (call_data *)elem->call_data;
+static void server_on_recv_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)ptr;
+ call_data* calld = (call_data*)elem->call_data;
grpc_millis op_deadline;
if (error == GRPC_ERROR_NONE) {
@@ -767,7 +767,7 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
if (calld->host_set && calld->path_set) {
/* do nothing */
} else {
- grpc_error *src_error = error;
+ grpc_error* src_error = error;
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Missing :authority or :path", &error, 1);
GRPC_ERROR_UNREF(src_error);
@@ -776,9 +776,9 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error);
}
-static void server_mutate_op(grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- call_data *calld = (call_data *)elem->call_data;
+static void server_mutate_op(grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ call_data* calld = (call_data*)elem->call_data;
if (op->recv_initial_metadata) {
GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == NULL);
@@ -794,16 +794,16 @@ static void server_mutate_op(grpc_call_element *elem,
}
static void server_start_transport_stream_op_batch(
- grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
+ grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}
-static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
- grpc_error *error) {
- grpc_call_element *elem = (grpc_call_element *)ptr;
- call_data *calld = (call_data *)elem->call_data;
+static void got_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
+ grpc_error* error) {
+ grpc_call_element* elem = (grpc_call_element*)ptr;
+ call_data* calld = (call_data*)elem->call_data;
if (error == GRPC_ERROR_NONE) {
start_new_rpc(exec_ctx, elem);
} else {
@@ -826,26 +826,26 @@ static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
}
}
-static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
- grpc_transport *transport,
- const void *transport_server_data) {
- channel_data *chand = (channel_data *)cd;
+static void accept_stream(grpc_exec_ctx* exec_ctx, void* cd,
+ grpc_transport* transport,
+ const void* transport_server_data) {
+ channel_data* chand = (channel_data*)cd;
/* create a call */
grpc_call_create_args args;
memset(&args, 0, sizeof(args));
args.channel = chand->channel;
args.server_transport_data = transport_server_data;
args.send_deadline = GRPC_MILLIS_INF_FUTURE;
- grpc_call *call;
- grpc_error *error = grpc_call_create(exec_ctx, &args, &call);
- grpc_call_element *elem =
+ grpc_call* call;
+ grpc_error* error = grpc_call_create(exec_ctx, &args, &call);
+ grpc_call_element* elem =
grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
if (error != GRPC_ERROR_NONE) {
got_initial_metadata(exec_ctx, elem, error);
GRPC_ERROR_UNREF(error);
return;
}
- call_data *calld = (call_data *)elem->call_data;
+ call_data* calld = (call_data*)elem->call_data;
grpc_op op;
memset(&op, 0, sizeof(op));
op.op = GRPC_OP_RECV_INITIAL_METADATA;
@@ -857,12 +857,12 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd,
&calld->got_initial_metadata);
}
-static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
- grpc_error *error) {
- channel_data *chand = (channel_data *)cd;
- grpc_server *server = chand->server;
+static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* cd,
+ grpc_error* error) {
+ channel_data* chand = (channel_data*)cd;
+ grpc_server* server = chand->server;
if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
- grpc_transport_op *op = grpc_make_transport_op(NULL);
+ grpc_transport_op* op = grpc_make_transport_op(NULL);
op->on_connectivity_state_change = &chand->channel_connectivity_changed,
op->connectivity_state = &chand->connectivity_state;
grpc_channel_next_op(exec_ctx,
@@ -877,11 +877,11 @@ static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd,
}
}
-static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
- grpc_call_element *elem,
- const grpc_call_element_args *args) {
- call_data *calld = (call_data *)elem->call_data;
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
+ grpc_call_element* elem,
+ const grpc_call_element_args* args) {
+ call_data* calld = (call_data*)elem->call_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
memset(calld, 0, sizeof(call_data));
calld->deadline = GRPC_MILLIS_INF_FUTURE;
calld->call = grpc_call_from_top_element(elem);
@@ -895,11 +895,11 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
- const grpc_call_final_info *final_info,
- grpc_closure *ignored) {
- channel_data *chand = (channel_data *)elem->channel_data;
- call_data *calld = (call_data *)elem->call_data;
+static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+ const grpc_call_final_info* final_info,
+ grpc_closure* ignored) {
+ channel_data* chand = (channel_data*)elem->channel_data;
+ call_data* calld = (call_data*)elem->call_data;
GPR_ASSERT(calld->state != PENDING);
@@ -917,10 +917,10 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
server_unref(exec_ctx, chand->server);
}
-static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem,
- grpc_channel_element_args *args) {
- channel_data *chand = (channel_data *)elem->channel_data;
+static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem,
+ grpc_channel_element_args* args) {
+ channel_data* chand = (channel_data*)elem->channel_data;
GPR_ASSERT(args->is_first);
GPR_ASSERT(!args->is_last);
chand->server = NULL;
@@ -934,10 +934,10 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
- grpc_channel_element *elem) {
+static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
+ grpc_channel_element* elem) {
size_t i;
- channel_data *chand = (channel_data *)elem->channel_data;
+ channel_data* chand = (channel_data*)elem->channel_data;
if (chand->registered_methods) {
for (i = 0; i < chand->registered_method_slots; i++) {
grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method);
@@ -972,9 +972,9 @@ const grpc_channel_filter grpc_server_top_filter = {
"server",
};
-static void register_completion_queue(grpc_server *server,
- grpc_completion_queue *cq,
- void *reserved) {
+static void register_completion_queue(grpc_server* server,
+ grpc_completion_queue* cq,
+ void* reserved) {
size_t i, n;
GPR_ASSERT(!reserved);
for (i = 0; i < server->cq_count; i++) {
@@ -983,14 +983,14 @@ static void register_completion_queue(grpc_server *server,
GRPC_CQ_INTERNAL_REF(cq, "server");
n = server->cq_count++;
- server->cqs = (grpc_completion_queue **)gpr_realloc(
- server->cqs, server->cq_count * sizeof(grpc_completion_queue *));
+ server->cqs = (grpc_completion_queue**)gpr_realloc(
+ server->cqs, server->cq_count * sizeof(grpc_completion_queue*));
server->cqs[n] = cq;
}
-void grpc_server_register_completion_queue(grpc_server *server,
- grpc_completion_queue *cq,
- void *reserved) {
+void grpc_server_register_completion_queue(grpc_server* server,
+ grpc_completion_queue* cq,
+ void* reserved) {
GRPC_API_TRACE(
"grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
(server, cq, reserved));
@@ -1006,10 +1006,10 @@ void grpc_server_register_completion_queue(grpc_server *server,
register_completion_queue(server, cq, reserved);
}
-grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
+grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
- grpc_server *server = (grpc_server *)gpr_zalloc(sizeof(grpc_server));
+ grpc_server* server = (grpc_server*)gpr_zalloc(sizeof(grpc_server));
gpr_mu_init(&server->mu_global);
gpr_mu_init(&server->mu_call);
@@ -1027,18 +1027,18 @@ grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
return server;
}
-static int streq(const char *a, const char *b) {
+static int streq(const char* a, const char* b) {
if (a == NULL && b == NULL) return 1;
if (a == NULL) return 0;
if (b == NULL) return 0;
return 0 == strcmp(a, b);
}
-void *grpc_server_register_method(
- grpc_server *server, const char *method, const char *host,
+void* grpc_server_register_method(
+ grpc_server* server, const char* method, const char* host,
grpc_server_register_method_payload_handling payload_handling,
uint32_t flags) {
- registered_method *m;
+ registered_method* m;
GRPC_API_TRACE(
"grpc_server_register_method(server=%p, method=%s, host=%s, "
"flags=0x%08x)",
@@ -1060,7 +1060,7 @@ void *grpc_server_register_method(
flags);
return NULL;
}
- m = (registered_method *)gpr_zalloc(sizeof(registered_method));
+ m = (registered_method*)gpr_zalloc(sizeof(registered_method));
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@@ -1070,10 +1070,10 @@ void *grpc_server_register_method(
return m;
}
-static void start_listeners(grpc_exec_ctx *exec_ctx, void *s,
- grpc_error *error) {
- grpc_server *server = (grpc_server *)s;
- for (listener *l = server->listeners; l; l = l->next) {
+static void start_listeners(grpc_exec_ctx* exec_ctx, void* s,
+ grpc_error* error) {
+ grpc_server* server = (grpc_server*)s;
+ for (listener* l = server->listeners; l; l = l->next) {
l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count);
}
@@ -1085,7 +1085,7 @@ static void start_listeners(grpc_exec_ctx *exec_ctx, void *s,
server_unref(exec_ctx, server);
}
-void grpc_server_start(grpc_server *server) {
+void grpc_server_start(grpc_server* server) {
size_t i;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -1094,10 +1094,10 @@ void grpc_server_start(grpc_server *server) {
server->started = true;
server->pollset_count = 0;
server->pollsets =
- (grpc_pollset **)gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
- server->request_freelist_per_cq = (gpr_stack_lockfree **)gpr_malloc(
+ (grpc_pollset**)gpr_malloc(sizeof(grpc_pollset*) * server->cq_count);
+ server->request_freelist_per_cq = (gpr_stack_lockfree**)gpr_malloc(
sizeof(*server->request_freelist_per_cq) * server->cq_count);
- server->requested_calls_per_cq = (requested_call **)gpr_malloc(
+ server->requested_calls_per_cq = (requested_call**)gpr_malloc(
sizeof(*server->requested_calls_per_cq) * server->cq_count);
for (i = 0; i < server->cq_count; i++) {
if (grpc_cq_can_listen(server->cqs[i])) {
@@ -1109,13 +1109,13 @@ void grpc_server_start(grpc_server *server) {
for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
}
- server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc(
- (size_t)server->max_requested_calls_per_cq *
- sizeof(*server->requested_calls_per_cq[i]));
+ server->requested_calls_per_cq[i] =
+ (requested_call*)gpr_malloc((size_t)server->max_requested_calls_per_cq *
+ sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
(size_t)server->max_requested_calls_per_cq, server);
- for (registered_method *rm = server->registered_methods; rm; rm = rm->next) {
+ for (registered_method* rm = server->registered_methods; rm; rm = rm->next) {
request_matcher_init(&rm->matcher,
(size_t)server->max_requested_calls_per_cq, server);
}
@@ -1131,31 +1131,31 @@ void grpc_server_start(grpc_server *server) {
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets,
- size_t *pollset_count) {
+void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
+ size_t* pollset_count) {
*pollset_count = server->pollset_count;
*pollsets = server->pollsets;
}
-void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
- grpc_transport *transport,
- grpc_pollset *accepting_pollset,
- const grpc_channel_args *args) {
+void grpc_server_setup_transport(grpc_exec_ctx* exec_ctx, grpc_server* s,
+ grpc_transport* transport,
+ grpc_pollset* accepting_pollset,
+ const grpc_channel_args* args) {
size_t num_registered_methods;
size_t alloc;
- registered_method *rm;
- channel_registered_method *crm;
- grpc_channel *channel;
- channel_data *chand;
+ registered_method* rm;
+ channel_registered_method* crm;
+ grpc_channel* channel;
+ channel_data* chand;
uint32_t hash;
size_t slots;
uint32_t probes;
uint32_t max_probes = 0;
- grpc_transport_op *op = NULL;
+ grpc_transport_op* op = NULL;
channel =
grpc_channel_create(exec_ctx, NULL, args, GRPC_SERVER_CHANNEL, transport);
- chand = (channel_data *)grpc_channel_stack_element(
+ chand = (channel_data*)grpc_channel_stack_element(
grpc_channel_get_channel_stack(channel), 0)
->channel_data;
chand->server = s;
@@ -1181,7 +1181,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
if (num_registered_methods > 0) {
slots = 2 * num_registered_methods;
alloc = sizeof(channel_registered_method) * slots;
- chand->registered_methods = (channel_registered_method *)gpr_zalloc(alloc);
+ chand->registered_methods = (channel_registered_method*)gpr_zalloc(alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
grpc_slice host;
bool has_host;
@@ -1234,25 +1234,25 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
grpc_transport_perform_op(exec_ctx, transport, op);
}
-void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg,
- grpc_cq_completion *storage) {
+void done_published_shutdown(grpc_exec_ctx* exec_ctx, void* done_arg,
+ grpc_cq_completion* storage) {
(void)done_arg;
gpr_free(storage);
}
-static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s,
- grpc_error *error) {
- grpc_server *server = (grpc_server *)s;
+static void listener_destroy_done(grpc_exec_ctx* exec_ctx, void* s,
+ grpc_error* error) {
+ grpc_server* server = (grpc_server*)s;
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;
maybe_finish_shutdown(exec_ctx, server);
gpr_mu_unlock(&server->mu_global);
}
-void grpc_server_shutdown_and_notify(grpc_server *server,
- grpc_completion_queue *cq, void *tag) {
- listener *l;
- shutdown_tag *sdt;
+void grpc_server_shutdown_and_notify(grpc_server* server,
+ grpc_completion_queue* cq, void* tag) {
+ listener* l;
+ shutdown_tag* sdt;
channel_broadcaster broadcaster;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -1269,13 +1269,13 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
- grpc_cq_end_op(
- &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
- (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion)));
+ grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
+ NULL,
+ (grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
}
- server->shutdown_tags = (shutdown_tag *)gpr_realloc(
+ server->shutdown_tags = (shutdown_tag*)gpr_realloc(
server->shutdown_tags,
sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
@@ -1316,7 +1316,7 @@ done:
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_server_cancel_all_calls(grpc_server *server) {
+void grpc_server_cancel_all_calls(grpc_server* server) {
channel_broadcaster broadcaster;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -1332,8 +1332,8 @@ void grpc_server_cancel_all_calls(grpc_server *server) {
grpc_exec_ctx_finish(&exec_ctx);
}
-void grpc_server_destroy(grpc_server *server) {
- listener *l;
+void grpc_server_destroy(grpc_server* server) {
+ listener* l;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
@@ -1355,12 +1355,12 @@ void grpc_server_destroy(grpc_server *server) {
}
void grpc_server_add_listener(
- grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- grpc_pollset **pollsets, size_t pollset_count),
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- grpc_closure *on_done)) {
- listener *l = (listener *)gpr_malloc(sizeof(listener));
+ grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ void (*start)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ grpc_pollset** pollsets, size_t pollset_count),
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ grpc_closure* on_done)) {
+ listener* l = (listener*)gpr_malloc(sizeof(listener));
l->arg = arg;
l->start = start;
l->destroy = destroy;
@@ -1368,11 +1368,11 @@ void grpc_server_add_listener(
server->listeners = l;
}
-static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
- grpc_server *server, size_t cq_idx,
- requested_call *rc) {
- call_data *calld = NULL;
- request_matcher *rm = NULL;
+static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
+ grpc_server* server, size_t cq_idx,
+ requested_call* rc) {
+ call_data* calld = NULL;
+ request_matcher* rm = NULL;
int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(exec_ctx, server, cq_idx, rc,
@@ -1431,20 +1431,21 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
}
grpc_call_error grpc_server_request_call(
- grpc_server *server, grpc_call **call, grpc_call_details *details,
- grpc_metadata_array *initial_metadata,
- grpc_completion_queue *cq_bound_to_call,
- grpc_completion_queue *cq_for_notification, void *tag) {
+ grpc_server* server, grpc_call** call, grpc_call_details* details,
+ grpc_metadata_array* initial_metadata,
+ grpc_completion_queue* cq_bound_to_call,
+ grpc_completion_queue* cq_for_notification, void* tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
+ requested_call* rc = (requested_call*)gpr_malloc(sizeof(*rc));
GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
"cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
- 7, (server, call, details, initial_metadata, cq_bound_to_call,
- cq_for_notification, tag));
+ 7,
+ (server, call, details, initial_metadata, cq_bound_to_call,
+ cq_for_notification, tag));
size_t cq_idx;
for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
if (server->cqs[cq_idx] == cq_for_notification) {
@@ -1477,22 +1478,23 @@ done:
}
grpc_call_error grpc_server_request_registered_call(
- grpc_server *server, void *rmp, grpc_call **call, gpr_timespec *deadline,
- grpc_metadata_array *initial_metadata, grpc_byte_buffer **optional_payload,
- grpc_completion_queue *cq_bound_to_call,
- grpc_completion_queue *cq_for_notification, void *tag) {
+ grpc_server* server, void* rmp, grpc_call** call, gpr_timespec* deadline,
+ grpc_metadata_array* initial_metadata, grpc_byte_buffer** optional_payload,
+ grpc_completion_queue* cq_bound_to_call,
+ grpc_completion_queue* cq_for_notification, void* tag) {
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
- registered_method *rm = (registered_method *)rmp;
+ requested_call* rc = (requested_call*)gpr_malloc(sizeof(*rc));
+ registered_method* rm = (registered_method*)rmp;
GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
"optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
"tag=%p)",
- 9, (server, rmp, call, deadline, initial_metadata, optional_payload,
- cq_bound_to_call, cq_for_notification, tag));
+ 9,
+ (server, rmp, call, deadline, initial_metadata, optional_payload,
+ cq_bound_to_call, cq_for_notification, tag));
size_t cq_idx;
for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
@@ -1532,8 +1534,8 @@ done:
return error;
}
-static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
- size_t cq_idx, requested_call *rc, grpc_error *error) {
+static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
+ size_t cq_idx, requested_call* rc, grpc_error* error) {
*rc->call = NULL;
rc->initial_metadata->count = 0;
GPR_ASSERT(error != GRPC_ERROR_NONE);
@@ -1543,11 +1545,11 @@ static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server,
done_request_event, rc, &rc->completion);
}
-const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {
+const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) {
return server->channel_args;
}
-int grpc_server_has_open_connections(grpc_server *server) {
+int grpc_server_has_open_connections(grpc_server* server) {
int r;
gpr_mu_lock(&server->mu_global);
r = server->root_channel_data.next != &server->root_channel_data;
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index 375eab4a04..e3c43f957d 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -36,27 +36,27 @@ extern grpc_tracer_flag grpc_server_channel_trace;
/* Add a listener to the server: when the server starts, it will call start,
and when it shuts down, it will call destroy */
void grpc_server_add_listener(
- grpc_exec_ctx *exec_ctx, grpc_server *server, void *listener,
- void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- grpc_pollset **pollsets, size_t npollsets),
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg,
- grpc_closure *on_done));
+ grpc_exec_ctx* exec_ctx, grpc_server* server, void* listener,
+ void (*start)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ grpc_pollset** pollsets, size_t npollsets),
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
+ grpc_closure* on_done));
/* Setup a transport - creates a channel stack, binds the transport to the
server */
-void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server,
- grpc_transport *transport,
- grpc_pollset *accepting_pollset,
- const grpc_channel_args *args);
+void grpc_server_setup_transport(grpc_exec_ctx* exec_ctx, grpc_server* server,
+ grpc_transport* transport,
+ grpc_pollset* accepting_pollset,
+ const grpc_channel_args* args);
-const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server);
+const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server);
-int grpc_server_has_open_connections(grpc_server *server);
+int grpc_server_has_open_connections(grpc_server* server);
/* Do not call this before grpc_server_start. Returns the pollsets and the
* number of pollsets via 'pollsets' and 'pollset_count'. */
-void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets,
- size_t *pollset_count);
+void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
+ size_t* pollset_count);
#ifdef __cplusplus
}
diff --git a/src/core/lib/surface/validate_metadata.cc b/src/core/lib/surface/validate_metadata.cc
index 81d07fae44..fc94ea7dbe 100644
--- a/src/core/lib/surface/validate_metadata.cc
+++ b/src/core/lib/surface/validate_metadata.cc
@@ -28,17 +28,17 @@
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/validate_metadata.h"
-static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits,
- const char *err_desc) {
- const uint8_t *p = GRPC_SLICE_START_PTR(slice);
- const uint8_t *e = GRPC_SLICE_END_PTR(slice);
+static grpc_error* conforms_to(grpc_slice slice, const uint8_t* legal_bits,
+ const char* err_desc) {
+ const uint8_t* p = GRPC_SLICE_START_PTR(slice);
+ const uint8_t* e = GRPC_SLICE_END_PTR(slice);
for (; p != e; p++) {
int idx = *p;
int byte = idx / 8;
int bit = idx % 8;
if ((legal_bits[byte] & (1 << bit)) == 0) {
- char *dump = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- grpc_error *error = grpc_error_set_str(
+ char* dump = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
+ grpc_error* error = grpc_error_set_str(
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_desc),
GRPC_ERROR_INT_OFFSET,
p - GRPC_SLICE_START_PTR(slice)),
@@ -50,13 +50,13 @@ static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits,
return GRPC_ERROR_NONE;
}
-static int error2int(grpc_error *error) {
+static int error2int(grpc_error* error) {
int r = (error == GRPC_ERROR_NONE);
GRPC_ERROR_UNREF(error);
return r;
}
-grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice) {
+grpc_error* grpc_validate_header_key_is_legal(grpc_slice slice) {
static const uint8_t legal_header_bits[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00,
0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -76,7 +76,7 @@ int grpc_header_key_is_legal(grpc_slice slice) {
return error2int(grpc_validate_header_key_is_legal(slice));
}
-grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice) {
+grpc_error* grpc_validate_header_nonbin_value_is_legal(grpc_slice slice) {
static const uint8_t legal_header_bits[256 / 8] = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/src/core/lib/surface/validate_metadata.h b/src/core/lib/surface/validate_metadata.h
index afc8be6dfd..9ca20692b5 100644
--- a/src/core/lib/surface/validate_metadata.h
+++ b/src/core/lib/surface/validate_metadata.h
@@ -26,8 +26,8 @@
extern "C" {
#endif
-grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice);
-grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice);
+grpc_error* grpc_validate_header_key_is_legal(grpc_slice slice);
+grpc_error* grpc_validate_header_nonbin_value_is_legal(grpc_slice slice);
#ifdef __cplusplus
}
diff --git a/src/core/lib/surface/version.cc b/src/core/lib/surface/version.cc
index 6cb8e7e1a0..f4feadc640 100644
--- a/src/core/lib/surface/version.cc
+++ b/src/core/lib/surface/version.cc
@@ -21,6 +21,6 @@
#include <grpc/grpc.h>
-const char *grpc_version_string(void) { return "5.0.0-dev"; }
+const char* grpc_version_string(void) { return "5.0.0-dev"; }
-const char *grpc_g_stands_for(void) { return "generous"; }
+const char* grpc_g_stands_for(void) { return "generous"; }
diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc
index f1597014b1..e09ae8e6a6 100644
--- a/src/core/lib/transport/bdp_estimator.cc
+++ b/src/core/lib/transport/bdp_estimator.cc
@@ -28,7 +28,7 @@ grpc_tracer_flag grpc_bdp_estimator_trace =
namespace grpc_core {
-BdpEstimator::BdpEstimator(const char *name)
+BdpEstimator::BdpEstimator(const char* name)
: ping_state_(PingState::UNSCHEDULED),
accumulator_(0),
estimate_(65536),
@@ -38,15 +38,16 @@ BdpEstimator::BdpEstimator(const char *name)
bw_est_(0),
name_(name) {}
-grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx *exec_ctx) {
+grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx* exec_ctx) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec dt_ts = gpr_time_sub(now, ping_start_time_);
double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec;
double bw = dt > 0 ? ((double)accumulator_ / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_;
if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
- gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
- " dt=%lf bw=%lfMbs bw_est=%lfMbs",
+ gpr_log(GPR_DEBUG,
+ "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
+ " dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0,
bw_est_ / 125000.0);
}
diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h
index 750da39599..f7b94a81d3 100644
--- a/src/core/lib/transport/bdp_estimator.h
+++ b/src/core/lib/transport/bdp_estimator.h
@@ -37,7 +37,7 @@ namespace grpc_core {
class BdpEstimator {
public:
- explicit BdpEstimator(const char *name);
+ explicit BdpEstimator(const char* name);
~BdpEstimator() {}
int64_t EstimateBdp() const { return estimate_; }
@@ -73,7 +73,7 @@ class BdpEstimator {
}
// Completes a previously started ping, returns when to schedule the next one
- grpc_millis CompletePing(grpc_exec_ctx *exec_ctx);
+ grpc_millis CompletePing(grpc_exec_ctx* exec_ctx);
private:
enum class PingState { UNSCHEDULED, SCHEDULED, STARTED };
@@ -86,7 +86,7 @@ class BdpEstimator {
int inter_ping_delay_;
int stable_estimate_count_;
double bw_est_;
- const char *name_;
+ const char* name_;
};
} // namespace grpc_core
diff --git a/src/core/lib/transport/byte_stream.cc b/src/core/lib/transport/byte_stream.cc
index 08f61629a9..b8720250e7 100644
--- a/src/core/lib/transport/byte_stream.cc
+++ b/src/core/lib/transport/byte_stream.cc
@@ -25,45 +25,45 @@
#include "src/core/lib/slice/slice_internal.h"
-bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream, size_t max_size_hint,
- grpc_closure *on_complete) {
+bool grpc_byte_stream_next(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream, size_t max_size_hint,
+ grpc_closure* on_complete) {
return byte_stream->vtable->next(exec_ctx, byte_stream, max_size_hint,
on_complete);
}
-grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_slice *slice) {
+grpc_error* grpc_byte_stream_pull(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_slice* slice) {
return byte_stream->vtable->pull(exec_ctx, byte_stream, slice);
}
-void grpc_byte_stream_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_error *error) {
+void grpc_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_error* error) {
byte_stream->vtable->shutdown(exec_ctx, byte_stream, error);
}
-void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream) {
+void grpc_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream) {
byte_stream->vtable->destroy(exec_ctx, byte_stream);
}
// grpc_slice_buffer_stream
-static bool slice_buffer_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
+static bool slice_buffer_stream_next(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
size_t max_size_hint,
- grpc_closure *on_complete) {
- grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+ grpc_closure* on_complete) {
+ grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
GPR_ASSERT(stream->cursor < stream->backing_buffer->count);
return true;
}
-static grpc_error *slice_buffer_stream_pull(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_slice *slice) {
- grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+static grpc_error* slice_buffer_stream_pull(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_slice* slice) {
+ grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
if (stream->shutdown_error != GRPC_ERROR_NONE) {
return GRPC_ERROR_REF(stream->shutdown_error);
}
@@ -74,17 +74,17 @@ static grpc_error *slice_buffer_stream_pull(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void slice_buffer_stream_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_error *error) {
- grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+static void slice_buffer_stream_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_error* error) {
+ grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
GRPC_ERROR_UNREF(stream->shutdown_error);
stream->shutdown_error = error;
}
-static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream) {
- grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream;
+static void slice_buffer_stream_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream) {
+ grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, stream->backing_buffer);
GRPC_ERROR_UNREF(stream->shutdown_error);
}
@@ -93,8 +93,8 @@ static const grpc_byte_stream_vtable slice_buffer_stream_vtable = {
slice_buffer_stream_next, slice_buffer_stream_pull,
slice_buffer_stream_shutdown, slice_buffer_stream_destroy};
-void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
- grpc_slice_buffer *slice_buffer,
+void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream* stream,
+ grpc_slice_buffer* slice_buffer,
uint32_t flags) {
GPR_ASSERT(slice_buffer->length <= UINT32_MAX);
stream->base.length = (uint32_t)slice_buffer->length;
@@ -107,33 +107,33 @@ void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
// grpc_caching_byte_stream
-void grpc_byte_stream_cache_init(grpc_byte_stream_cache *cache,
- grpc_byte_stream *underlying_stream) {
+void grpc_byte_stream_cache_init(grpc_byte_stream_cache* cache,
+ grpc_byte_stream* underlying_stream) {
cache->underlying_stream = underlying_stream;
grpc_slice_buffer_init(&cache->cache_buffer);
}
-void grpc_byte_stream_cache_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream_cache *cache) {
+void grpc_byte_stream_cache_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream_cache* cache) {
grpc_byte_stream_destroy(exec_ctx, cache->underlying_stream);
grpc_slice_buffer_destroy_internal(exec_ctx, &cache->cache_buffer);
}
-static bool caching_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
+static bool caching_byte_stream_next(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
size_t max_size_hint,
- grpc_closure *on_complete) {
- grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream;
+ grpc_closure* on_complete) {
+ grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
if (stream->shutdown_error != GRPC_ERROR_NONE) return true;
if (stream->cursor < stream->cache->cache_buffer.count) return true;
return grpc_byte_stream_next(exec_ctx, stream->cache->underlying_stream,
max_size_hint, on_complete);
}
-static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_slice *slice) {
- grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream;
+static grpc_error* caching_byte_stream_pull(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_slice* slice) {
+ grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
if (stream->shutdown_error != GRPC_ERROR_NONE) {
return GRPC_ERROR_REF(stream->shutdown_error);
}
@@ -143,7 +143,7 @@ static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx,
++stream->cursor;
return GRPC_ERROR_NONE;
}
- grpc_error *error =
+ grpc_error* error =
grpc_byte_stream_pull(exec_ctx, stream->cache->underlying_stream, slice);
if (error == GRPC_ERROR_NONE) {
++stream->cursor;
@@ -153,18 +153,18 @@ static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx,
return error;
}
-static void caching_byte_stream_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_error *error) {
- grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream;
+static void caching_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_error* error) {
+ grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
GRPC_ERROR_UNREF(stream->shutdown_error);
stream->shutdown_error = GRPC_ERROR_REF(error);
grpc_byte_stream_shutdown(exec_ctx, stream->cache->underlying_stream, error);
}
-static void caching_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream) {
- grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream;
+static void caching_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream) {
+ grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
GRPC_ERROR_UNREF(stream->shutdown_error);
}
@@ -172,8 +172,8 @@ static const grpc_byte_stream_vtable caching_byte_stream_vtable = {
caching_byte_stream_next, caching_byte_stream_pull,
caching_byte_stream_shutdown, caching_byte_stream_destroy};
-void grpc_caching_byte_stream_init(grpc_caching_byte_stream *stream,
- grpc_byte_stream_cache *cache) {
+void grpc_caching_byte_stream_init(grpc_caching_byte_stream* stream,
+ grpc_byte_stream_cache* cache) {
memset(stream, 0, sizeof(*stream));
stream->base.length = cache->underlying_stream->length;
stream->base.flags = cache->underlying_stream->flags;
@@ -182,6 +182,6 @@ void grpc_caching_byte_stream_init(grpc_caching_byte_stream *stream,
stream->shutdown_error = GRPC_ERROR_NONE;
}
-void grpc_caching_byte_stream_reset(grpc_caching_byte_stream *stream) {
+void grpc_caching_byte_stream_reset(grpc_caching_byte_stream* stream) {
stream->cursor = 0;
}
diff --git a/src/core/lib/transport/byte_stream.h b/src/core/lib/transport/byte_stream.h
index c1d8ee543f..54ad4b9796 100644
--- a/src/core/lib/transport/byte_stream.h
+++ b/src/core/lib/transport/byte_stream.h
@@ -35,19 +35,19 @@ extern "C" {
typedef struct grpc_byte_stream grpc_byte_stream;
typedef struct {
- bool (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
- size_t max_size_hint, grpc_closure *on_complete);
- grpc_error *(*pull)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
- grpc_slice *slice);
- void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream,
- grpc_error *error);
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream);
+ bool (*next)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream,
+ size_t max_size_hint, grpc_closure* on_complete);
+ grpc_error* (*pull)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream,
+ grpc_slice* slice);
+ void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream,
+ grpc_error* error);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream);
} grpc_byte_stream_vtable;
struct grpc_byte_stream {
uint32_t length;
uint32_t flags;
- const grpc_byte_stream_vtable *vtable;
+ const grpc_byte_stream_vtable* vtable;
};
// Returns true if the bytes are available immediately (in which case
@@ -56,18 +56,18 @@ struct grpc_byte_stream {
//
// max_size_hint can be set as a hint as to the maximum number
// of bytes that would be acceptable to read.
-bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream, size_t max_size_hint,
- grpc_closure *on_complete);
+bool grpc_byte_stream_next(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream, size_t max_size_hint,
+ grpc_closure* on_complete);
// Returns the next slice in the byte stream when it is ready (indicated by
// either grpc_byte_stream_next returning true or on_complete passed to
// grpc_byte_stream_next is called).
//
// Once a slice is returned into *slice, it is owned by the caller.
-grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_slice *slice);
+grpc_error* grpc_byte_stream_pull(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_slice* slice);
// Shuts down the byte stream.
//
@@ -76,12 +76,12 @@ grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx,
//
// The next call to grpc_byte_stream_pull() (if any) will return the error
// passed to grpc_byte_stream_shutdown().
-void grpc_byte_stream_shutdown(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream,
- grpc_error *error);
+void grpc_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream,
+ grpc_error* error);
-void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream *byte_stream);
+void grpc_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream* byte_stream);
// grpc_slice_buffer_stream
//
@@ -91,13 +91,13 @@ void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
typedef struct grpc_slice_buffer_stream {
grpc_byte_stream base;
- grpc_slice_buffer *backing_buffer;
+ grpc_slice_buffer* backing_buffer;
size_t cursor;
- grpc_error *shutdown_error;
+ grpc_error* shutdown_error;
} grpc_slice_buffer_stream;
-void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
- grpc_slice_buffer *slice_buffer,
+void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream* stream,
+ grpc_slice_buffer* slice_buffer,
uint32_t flags);
// grpc_caching_byte_stream
@@ -114,30 +114,30 @@ void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream,
// grpc_byte_stream_cache at the same time.
typedef struct {
- grpc_byte_stream *underlying_stream;
+ grpc_byte_stream* underlying_stream;
grpc_slice_buffer cache_buffer;
} grpc_byte_stream_cache;
// Takes ownership of underlying_stream.
-void grpc_byte_stream_cache_init(grpc_byte_stream_cache *cache,
- grpc_byte_stream *underlying_stream);
+void grpc_byte_stream_cache_init(grpc_byte_stream_cache* cache,
+ grpc_byte_stream* underlying_stream);
// Must not be called while still in use by a grpc_caching_byte_stream.
-void grpc_byte_stream_cache_destroy(grpc_exec_ctx *exec_ctx,
- grpc_byte_stream_cache *cache);
+void grpc_byte_stream_cache_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_byte_stream_cache* cache);
typedef struct {
grpc_byte_stream base;
- grpc_byte_stream_cache *cache;
+ grpc_byte_stream_cache* cache;
size_t cursor;
- grpc_error *shutdown_error;
+ grpc_error* shutdown_error;
} grpc_caching_byte_stream;
-void grpc_caching_byte_stream_init(grpc_caching_byte_stream *stream,
- grpc_byte_stream_cache *cache);
+void grpc_caching_byte_stream_init(grpc_caching_byte_stream* stream,
+ grpc_byte_stream_cache* cache);
// Resets the byte stream to the start of the underlying stream.
-void grpc_caching_byte_stream_reset(grpc_caching_byte_stream *stream);
+void grpc_caching_byte_stream_reset(grpc_caching_byte_stream* stream);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/connectivity_state.cc b/src/core/lib/transport/connectivity_state.cc
index 652c26cf0a..bdaf0243f9 100644
--- a/src/core/lib/transport/connectivity_state.cc
+++ b/src/core/lib/transport/connectivity_state.cc
@@ -27,7 +27,7 @@
grpc_tracer_flag grpc_connectivity_state_trace =
GRPC_TRACER_INITIALIZER(false, "connectivity_state");
-const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
+const char* grpc_connectivity_state_name(grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_IDLE:
return "IDLE";
@@ -43,19 +43,19 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state) {
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
-void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
+void grpc_connectivity_state_init(grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state init_state,
- const char *name) {
+ const char* name) {
gpr_atm_no_barrier_store(&tracker->current_state_atm, init_state);
tracker->current_error = GRPC_ERROR_NONE;
tracker->watchers = NULL;
tracker->name = gpr_strdup(name);
}
-void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
- grpc_connectivity_state_tracker *tracker) {
- grpc_error *error;
- grpc_connectivity_state_watcher *w;
+void grpc_connectivity_state_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_connectivity_state_tracker* tracker) {
+ grpc_error* error;
+ grpc_connectivity_state_watcher* w;
while ((w = tracker->watchers)) {
tracker->watchers = w->next;
@@ -74,7 +74,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
}
grpc_connectivity_state grpc_connectivity_state_check(
- grpc_connectivity_state_tracker *tracker) {
+ grpc_connectivity_state_tracker* tracker) {
grpc_connectivity_state cur =
(grpc_connectivity_state)gpr_atm_no_barrier_load(
&tracker->current_state_atm);
@@ -86,7 +86,7 @@ grpc_connectivity_state grpc_connectivity_state_check(
}
grpc_connectivity_state grpc_connectivity_state_get(
- grpc_connectivity_state_tracker *tracker, grpc_error **error) {
+ grpc_connectivity_state_tracker* tracker, grpc_error** error) {
grpc_connectivity_state cur =
(grpc_connectivity_state)gpr_atm_no_barrier_load(
&tracker->current_state_atm);
@@ -101,13 +101,13 @@ grpc_connectivity_state grpc_connectivity_state_get(
}
bool grpc_connectivity_state_has_watchers(
- grpc_connectivity_state_tracker *connectivity_state) {
+ grpc_connectivity_state_tracker* connectivity_state) {
return connectivity_state->watchers != NULL;
}
bool grpc_connectivity_state_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
- grpc_connectivity_state *current, grpc_closure *notify) {
+ grpc_exec_ctx* exec_ctx, grpc_connectivity_state_tracker* tracker,
+ grpc_connectivity_state* current, grpc_closure* notify) {
grpc_connectivity_state cur =
(grpc_connectivity_state)gpr_atm_no_barrier_load(
&tracker->current_state_atm);
@@ -122,7 +122,7 @@ bool grpc_connectivity_state_notify_on_state_change(
}
}
if (current == NULL) {
- grpc_connectivity_state_watcher *w = tracker->watchers;
+ grpc_connectivity_state_watcher* w = tracker->watchers;
if (w != NULL && w->notify == notify) {
GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
tracker->watchers = w->next;
@@ -130,7 +130,7 @@ bool grpc_connectivity_state_notify_on_state_change(
return false;
}
while (w != NULL) {
- grpc_connectivity_state_watcher *rm_candidate = w->next;
+ grpc_connectivity_state_watcher* rm_candidate = w->next;
if (rm_candidate != NULL && rm_candidate->notify == notify) {
GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
w->next = w->next->next;
@@ -146,8 +146,8 @@ bool grpc_connectivity_state_notify_on_state_change(
GRPC_CLOSURE_SCHED(exec_ctx, notify,
GRPC_ERROR_REF(tracker->current_error));
} else {
- grpc_connectivity_state_watcher *w =
- (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w));
+ grpc_connectivity_state_watcher* w =
+ (grpc_connectivity_state_watcher*)gpr_malloc(sizeof(*w));
w->current = current;
w->notify = notify;
w->next = tracker->watchers;
@@ -157,16 +157,16 @@ bool grpc_connectivity_state_notify_on_state_change(
}
}
-void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
- grpc_connectivity_state_tracker *tracker,
+void grpc_connectivity_state_set(grpc_exec_ctx* exec_ctx,
+ grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state state,
- grpc_error *error, const char *reason) {
+ grpc_error* error, const char* reason) {
grpc_connectivity_state cur =
(grpc_connectivity_state)gpr_atm_no_barrier_load(
&tracker->current_state_atm);
- grpc_connectivity_state_watcher *w;
+ grpc_connectivity_state_watcher* w;
if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
- const char *error_string = grpc_error_string(error);
+ const char* error_string = grpc_error_string(error);
gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
tracker->name, grpc_connectivity_state_name(cur),
grpc_connectivity_state_name(state), reason, error, error_string);
diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h
index c0ba188148..792e27c43d 100644
--- a/src/core/lib/transport/connectivity_state.h
+++ b/src/core/lib/transport/connectivity_state.h
@@ -29,64 +29,64 @@ extern "C" {
typedef struct grpc_connectivity_state_watcher {
/** we keep watchers in a linked list */
- struct grpc_connectivity_state_watcher *next;
+ struct grpc_connectivity_state_watcher* next;
/** closure to notify on change */
- grpc_closure *notify;
+ grpc_closure* notify;
/** the current state as believed by the watcher */
- grpc_connectivity_state *current;
+ grpc_connectivity_state* current;
} grpc_connectivity_state_watcher;
typedef struct {
/** current grpc_connectivity_state */
gpr_atm current_state_atm;
/** error associated with state */
- grpc_error *current_error;
+ grpc_error* current_error;
/** all our watchers */
- grpc_connectivity_state_watcher *watchers;
+ grpc_connectivity_state_watcher* watchers;
/** a name to help debugging */
- char *name;
+ char* name;
} grpc_connectivity_state_tracker;
extern grpc_tracer_flag grpc_connectivity_state_trace;
/** enum --> string conversion */
-const char *grpc_connectivity_state_name(grpc_connectivity_state state);
+const char* grpc_connectivity_state_name(grpc_connectivity_state state);
-void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker,
+void grpc_connectivity_state_init(grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state init_state,
- const char *name);
-void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
- grpc_connectivity_state_tracker *tracker);
+ const char* name);
+void grpc_connectivity_state_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_connectivity_state_tracker* tracker);
/** Set connectivity state; not thread safe; access must be serialized with an
* external lock */
-void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
- grpc_connectivity_state_tracker *tracker,
+void grpc_connectivity_state_set(grpc_exec_ctx* exec_ctx,
+ grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state state,
- grpc_error *associated_error,
- const char *reason);
+ grpc_error* associated_error,
+ const char* reason);
/** Return true if this connectivity state has watchers.
Access must be serialized with an external lock. */
bool grpc_connectivity_state_has_watchers(
- grpc_connectivity_state_tracker *tracker);
+ grpc_connectivity_state_tracker* tracker);
/** Return the last seen connectivity state. No need to synchronize access. */
grpc_connectivity_state grpc_connectivity_state_check(
- grpc_connectivity_state_tracker *tracker);
+ grpc_connectivity_state_tracker* tracker);
/** Return the last seen connectivity state, and the associated error.
Access must be serialized with an external lock. */
grpc_connectivity_state grpc_connectivity_state_get(
- grpc_connectivity_state_tracker *tracker, grpc_error **error);
+ grpc_connectivity_state_tracker* tracker, grpc_error** error);
/** Return 1 if the channel should start connecting, 0 otherwise.
If current==NULL cancel notify if it is already queued (success==0 in that
case).
Access must be serialized with an external lock. */
bool grpc_connectivity_state_notify_on_state_change(
- grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
- grpc_connectivity_state *current, grpc_closure *notify);
+ grpc_exec_ctx* exec_ctx, grpc_connectivity_state_tracker* tracker,
+ grpc_connectivity_state* current, grpc_closure* notify);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/error_utils.cc b/src/core/lib/transport/error_utils.cc
index 2e3b61b7ab..d968b04fd8 100644
--- a/src/core/lib/transport/error_utils.cc
+++ b/src/core/lib/transport/error_utils.cc
@@ -21,7 +21,7 @@
#include "src/core/lib/iomgr/error_internal.h"
#include "src/core/lib/transport/status_conversion.h"
-static grpc_error *recursively_find_error_with_field(grpc_error *error,
+static grpc_error* recursively_find_error_with_field(grpc_error* error,
grpc_error_ints which) {
// If the error itself has a status code, return it.
if (grpc_error_get_int(error, which, NULL)) {
@@ -31,21 +31,21 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error,
// Otherwise, search through its children.
uint8_t slot = error->first_err;
while (slot != UINT8_MAX) {
- grpc_linked_error *lerr = (grpc_linked_error *)(error->arena + slot);
- grpc_error *result = recursively_find_error_with_field(lerr->err, which);
+ grpc_linked_error* lerr = (grpc_linked_error*)(error->arena + slot);
+ grpc_error* result = recursively_find_error_with_field(lerr->err, which);
if (result) return result;
slot = lerr->next;
}
return NULL;
}
-void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
- grpc_millis deadline, grpc_status_code *code,
- grpc_slice *slice,
- grpc_http2_error_code *http_error) {
+void grpc_error_get_status(grpc_exec_ctx* exec_ctx, grpc_error* error,
+ grpc_millis deadline, grpc_status_code* code,
+ grpc_slice* slice,
+ grpc_http2_error_code* http_error) {
// Start with the parent error and recurse through the tree of children
// until we find the first one that has a status code.
- grpc_error *found_error =
+ grpc_error* found_error =
recursively_find_error_with_field(error, GRPC_ERROR_INT_GRPC_STATUS);
if (found_error == NULL) {
/// If no grpc-status exists, retry through the tree to find a http2 error
@@ -94,13 +94,13 @@ void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
if (found_error == NULL) found_error = error;
}
-bool grpc_error_has_clear_grpc_status(grpc_error *error) {
+bool grpc_error_has_clear_grpc_status(grpc_error* error) {
if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) {
return true;
}
uint8_t slot = error->first_err;
while (slot != UINT8_MAX) {
- grpc_linked_error *lerr = (grpc_linked_error *)(error->arena + slot);
+ grpc_linked_error* lerr = (grpc_linked_error*)(error->arena + slot);
if (grpc_error_has_clear_grpc_status(lerr->err)) {
return true;
}
diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h
index b4f9df4bf1..690e42058a 100644
--- a/src/core/lib/transport/error_utils.h
+++ b/src/core/lib/transport/error_utils.h
@@ -33,16 +33,16 @@ extern "C" {
/// All attributes are pulled from the same child error. If any of the
/// attributes (code, msg, http_status) are unneeded, they can be passed as
/// NULL.
-void grpc_error_get_status(grpc_exec_ctx *exec_ctx, grpc_error *error,
- grpc_millis deadline, grpc_status_code *code,
- grpc_slice *slice,
- grpc_http2_error_code *http_status);
+void grpc_error_get_status(grpc_exec_ctx* exec_ctx, grpc_error* error,
+ grpc_millis deadline, grpc_status_code* code,
+ grpc_slice* slice,
+ grpc_http2_error_code* http_status);
/// A utility function to check whether there is a clear status code that
/// doesn't need to be guessed in \a error. This means that \a error or some
/// child has GRPC_ERROR_INT_GRPC_STATUS set, or that it is GRPC_ERROR_NONE or
/// GRPC_ERROR_CANCELLED
-bool grpc_error_has_clear_grpc_status(grpc_error *error);
+bool grpc_error_has_clear_grpc_status(grpc_error* error);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/metadata.cc b/src/core/lib/transport/metadata.cc
index 2392f26c0b..ff11ddec13 100644
--- a/src/core/lib/transport/metadata.cc
+++ b/src/core/lib/transport/metadata.cc
@@ -67,7 +67,7 @@ grpc_tracer_flag grpc_trace_metadata =
#define TABLE_IDX(hash, capacity) (((hash) >> (LOG2_SHARD_COUNT)) % (capacity))
#define SHARD_IDX(hash) ((hash) & ((1 << (LOG2_SHARD_COUNT)) - 1))
-typedef void (*destroy_user_data_func)(void *user_data);
+typedef void (*destroy_user_data_func)(void* user_data);
/* Shadow structure for grpc_mdelem_data for interned elements */
typedef struct interned_metadata {
@@ -82,7 +82,7 @@ typedef struct interned_metadata {
gpr_atm destroy_user_data;
gpr_atm user_data;
- struct interned_metadata *bucket_next;
+ struct interned_metadata* bucket_next;
} interned_metadata;
/* Shadow structure for grpc_mdelem_data for allocated elements */
@@ -97,7 +97,7 @@ typedef struct allocated_metadata {
typedef struct mdtab_shard {
gpr_mu mu;
- interned_metadata **elems;
+ interned_metadata** elems;
size_t count;
size_t capacity;
/** Estimate of the number of unreferenced mdelems in the hash table.
@@ -108,24 +108,24 @@ typedef struct mdtab_shard {
static mdtab_shard g_shards[SHARD_COUNT];
-static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard);
+static void gc_mdtab(grpc_exec_ctx* exec_ctx, mdtab_shard* shard);
void grpc_mdctx_global_init(void) {
/* initialize shards */
for (size_t i = 0; i < SHARD_COUNT; i++) {
- mdtab_shard *shard = &g_shards[i];
+ mdtab_shard* shard = &g_shards[i];
gpr_mu_init(&shard->mu);
shard->count = 0;
gpr_atm_no_barrier_store(&shard->free_estimate, 0);
shard->capacity = INITIAL_SHARD_CAPACITY;
- shard->elems = (interned_metadata **)gpr_zalloc(sizeof(*shard->elems) *
- shard->capacity);
+ shard->elems = (interned_metadata**)gpr_zalloc(sizeof(*shard->elems) *
+ shard->capacity);
}
}
-void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx) {
+void grpc_mdctx_global_shutdown(grpc_exec_ctx* exec_ctx) {
for (size_t i = 0; i < SHARD_COUNT; i++) {
- mdtab_shard *shard = &g_shards[i];
+ mdtab_shard* shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
gc_mdtab(exec_ctx, shard);
/* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
@@ -146,14 +146,14 @@ static int is_mdelem_static(grpc_mdelem e) {
&grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
}
-static void ref_md_locked(mdtab_shard *shard,
- interned_metadata *md DEBUG_ARGS) {
+static void ref_md_locked(mdtab_shard* shard,
+ interned_metadata* md DEBUG_ARGS) {
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
+ char* key_str = grpc_slice_to_c_string(md->key);
+ char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
- "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void *)md,
+ "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void*)md,
gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
gpr_free(key_str);
@@ -165,9 +165,9 @@ static void ref_md_locked(mdtab_shard *shard,
}
}
-static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
+static void gc_mdtab(grpc_exec_ctx* exec_ctx, mdtab_shard* shard) {
size_t i;
- interned_metadata **prev_next;
+ interned_metadata** prev_next;
interned_metadata *md, *next;
gpr_atm num_freed = 0;
@@ -175,7 +175,7 @@ static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
for (i = 0; i < shard->capacity; i++) {
prev_next = &shard->elems[i];
for (md = shard->elems[i]; md; md = next) {
- void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data);
+ void* user_data = (void*)gpr_atm_no_barrier_load(&md->user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
grpc_slice_unref_internal(exec_ctx, md->key);
@@ -197,17 +197,17 @@ static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
GPR_TIMER_END("gc_mdtab", 0);
}
-static void grow_mdtab(mdtab_shard *shard) {
+static void grow_mdtab(mdtab_shard* shard) {
size_t capacity = shard->capacity * 2;
size_t i;
- interned_metadata **mdtab;
+ interned_metadata** mdtab;
interned_metadata *md, *next;
uint32_t hash;
GPR_TIMER_BEGIN("grow_mdtab", 0);
mdtab =
- (interned_metadata **)gpr_zalloc(sizeof(interned_metadata *) * capacity);
+ (interned_metadata**)gpr_zalloc(sizeof(interned_metadata*) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (md = shard->elems[i]; md; md = next) {
@@ -228,7 +228,7 @@ static void grow_mdtab(mdtab_shard *shard) {
GPR_TIMER_END("grow_mdtab", 0);
}
-static void rehash_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
+static void rehash_mdtab(grpc_exec_ctx* exec_ctx, mdtab_shard* shard) {
if (gpr_atm_no_barrier_load(&shard->free_estimate) >
(gpr_atm)(shard->capacity / 4)) {
gc_mdtab(exec_ctx, shard);
@@ -238,25 +238,25 @@ static void rehash_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) {
}
grpc_mdelem grpc_mdelem_create(
- grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value,
- grpc_mdelem_data *compatible_external_backing_store) {
+ grpc_exec_ctx* exec_ctx, grpc_slice key, grpc_slice value,
+ grpc_mdelem_data* compatible_external_backing_store) {
if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) {
if (compatible_external_backing_store != NULL) {
return GRPC_MAKE_MDELEM(compatible_external_backing_store,
GRPC_MDELEM_STORAGE_EXTERNAL);
}
- allocated_metadata *allocated =
- (allocated_metadata *)gpr_malloc(sizeof(*allocated));
+ allocated_metadata* allocated =
+ (allocated_metadata*)gpr_malloc(sizeof(*allocated));
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(allocated->key);
- char *value_str = grpc_slice_to_c_string(allocated->value);
+ char* key_str = grpc_slice_to_c_string(allocated->key);
+ char* value_str = grpc_slice_to_c_string(allocated->value);
gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'",
- (void *)allocated, gpr_atm_no_barrier_load(&allocated->refcnt),
+ (void*)allocated, gpr_atm_no_barrier_load(&allocated->refcnt),
key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@@ -276,8 +276,8 @@ grpc_mdelem grpc_mdelem_create(
uint32_t hash =
GRPC_MDSTR_KV_HASH(grpc_slice_hash(key), grpc_slice_hash(value));
- interned_metadata *md;
- mdtab_shard *shard = &g_shards[SHARD_IDX(hash)];
+ interned_metadata* md;
+ mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
size_t idx;
GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
@@ -296,7 +296,7 @@ grpc_mdelem grpc_mdelem_create(
}
/* not found: create a new pair */
- md = (interned_metadata *)gpr_malloc(sizeof(interned_metadata));
+ md = (interned_metadata*)gpr_malloc(sizeof(interned_metadata));
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
@@ -307,9 +307,9 @@ grpc_mdelem grpc_mdelem_create(
gpr_mu_init(&md->mu_user_data);
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
- gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void *)md,
+ char* key_str = grpc_slice_to_c_string(md->key);
+ char* value_str = grpc_slice_to_c_string(md->value);
+ gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void*)md,
gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@@ -328,7 +328,7 @@ grpc_mdelem grpc_mdelem_create(
return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
}
-grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
+grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx* exec_ctx, grpc_slice key,
grpc_slice value) {
grpc_mdelem out = grpc_mdelem_create(exec_ctx, key, value, NULL);
grpc_slice_unref_internal(exec_ctx, key);
@@ -336,15 +336,15 @@ grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
return out;
}
-grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx,
- grpc_metadata *metadata) {
+grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_metadata* metadata) {
bool changed = false;
grpc_slice key_slice =
grpc_slice_maybe_static_intern(metadata->key, &changed);
grpc_slice value_slice =
grpc_slice_maybe_static_intern(metadata->value, &changed);
return grpc_mdelem_create(exec_ctx, key_slice, value_slice,
- changed ? NULL : (grpc_mdelem_data *)metadata);
+ changed ? NULL : (grpc_mdelem_data*)metadata);
}
static size_t get_base64_encoded_size(size_t raw_length) {
@@ -371,14 +371,14 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
case GRPC_MDELEM_STORAGE_STATIC:
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
- interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
+ interned_metadata* md = (interned_metadata*)GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
+ char* key_str = grpc_slice_to_c_string(md->key);
+ char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
- (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ (void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@@ -393,14 +393,14 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
break;
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
- allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
+ allocated_metadata* md = (allocated_metadata*)GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
+ char* key_str = grpc_slice_to_c_string(md->key);
+ char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
- (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ (void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@@ -417,20 +417,20 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) {
return gmd;
}
-void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
+void grpc_mdelem_unref(grpc_exec_ctx* exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
switch (GRPC_MDELEM_STORAGE(gmd)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_STATIC:
break;
case GRPC_MDELEM_STORAGE_INTERNED: {
- interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd);
+ interned_metadata* md = (interned_metadata*)GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
+ char* key_str = grpc_slice_to_c_string(md->key);
+ char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
- (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ (void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@@ -443,20 +443,20 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
if (1 == prev_refcount) {
/* once the refcount hits zero, some other thread can come along and
free md at any time: it's unsafe from this point on to access it */
- mdtab_shard *shard = &g_shards[SHARD_IDX(hash)];
+ mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
}
break;
}
case GRPC_MDELEM_STORAGE_ALLOCATED: {
- allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd);
+ allocated_metadata* md = (allocated_metadata*)GRPC_MDELEM_DATA(gmd);
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_metadata)) {
- char *key_str = grpc_slice_to_c_string(md->key);
- char *value_str = grpc_slice_to_c_string(md->value);
+ char* key_str = grpc_slice_to_c_string(md->key);
+ char* value_str = grpc_slice_to_c_string(md->value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'",
- (void *)md, gpr_atm_no_barrier_load(&md->refcnt),
+ (void*)md, gpr_atm_no_barrier_load(&md->refcnt),
gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@@ -474,19 +474,19 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
}
}
-void *grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void *)) {
+void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_ALLOCATED:
return NULL;
case GRPC_MDELEM_STORAGE_STATIC:
- return (void *)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
- grpc_static_mdelem_table];
+ return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
+ grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_INTERNED: {
- interned_metadata *im = (interned_metadata *)GRPC_MDELEM_DATA(md);
- void *result;
+ interned_metadata* im = (interned_metadata*)GRPC_MDELEM_DATA(md);
+ void* result;
if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
- return (void *)gpr_atm_no_barrier_load(&im->user_data);
+ return (void*)gpr_atm_no_barrier_load(&im->user_data);
} else {
return NULL;
}
@@ -496,8 +496,8 @@ void *grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void *)) {
GPR_UNREACHABLE_CODE(return NULL);
}
-void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
- void *user_data) {
+void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
+ void* user_data) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_ALLOCATED:
@@ -505,10 +505,10 @@ void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
return NULL;
case GRPC_MDELEM_STORAGE_STATIC:
destroy_func(user_data);
- return (void *)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
- grpc_static_mdelem_table];
+ return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
+ grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_INTERNED: {
- interned_metadata *im = (interned_metadata *)GRPC_MDELEM_DATA(md);
+ interned_metadata* im = (interned_metadata*)GRPC_MDELEM_DATA(md);
GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
gpr_mu_lock(&im->mu_user_data);
@@ -518,7 +518,7 @@ void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
if (destroy_func != NULL) {
destroy_func(user_data);
}
- return (void *)gpr_atm_no_barrier_load(&im->user_data);
+ return (void*)gpr_atm_no_barrier_load(&im->user_data);
}
gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 3f1032ab8a..7e7e7b4c14 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -98,8 +98,7 @@ struct grpc_mdelem {
uintptr_t payload;
};
-#define GRPC_MDELEM_DATA(md) \
- ((grpc_mdelem_data *)((md).payload & ~(uintptr_t)3))
+#define GRPC_MDELEM_DATA(md) ((grpc_mdelem_data*)((md).payload & ~(uintptr_t)3))
#define GRPC_MDELEM_STORAGE(md) \
((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
#ifdef __cplusplus
@@ -114,21 +113,21 @@ struct grpc_mdelem {
(uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
/* Unrefs the slices. */
-grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key,
+grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx* exec_ctx, grpc_slice key,
grpc_slice value);
/* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata
object as backing storage (so lifetimes should align) */
-grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx,
- grpc_metadata *metadata);
+grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx* exec_ctx,
+ grpc_metadata* metadata);
/* Does not unref the slices; if a new non-interned mdelem is needed, allocates
one if compatible_external_backing_store is NULL, or uses
compatible_external_backing_store if it is non-NULL (in which case it's the
users responsibility to ensure that it outlives usage) */
grpc_mdelem grpc_mdelem_create(
- grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value,
- grpc_mdelem_data *compatible_external_backing_store);
+ grpc_exec_ctx* exec_ctx, grpc_slice key, grpc_slice value,
+ grpc_mdelem_data* compatible_external_backing_store);
bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b);
@@ -137,23 +136,22 @@ size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem,
/* Mutator and accessor for grpc_mdelem user data. The destructor function
is used as a type tag and is checked during user_data fetch. */
-void *grpc_mdelem_get_user_data(grpc_mdelem md,
- void (*if_destroy_func)(void *));
-void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *),
- void *user_data);
+void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*));
+void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
+ void* user_data);
#ifndef NDEBUG
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
#define GRPC_MDELEM_UNREF(exec_ctx, s) \
grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__)
-grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char *file, int line);
-void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md,
- const char *file, int line);
+grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char* file, int line);
+void grpc_mdelem_unref(grpc_exec_ctx* exec_ctx, grpc_mdelem md,
+ const char* file, int line);
#else
#define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
#define GRPC_MDELEM_UNREF(exec_ctx, s) grpc_mdelem_unref((exec_ctx), (s))
grpc_mdelem grpc_mdelem_ref(grpc_mdelem md);
-void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md);
+void grpc_mdelem_unref(grpc_exec_ctx* exec_ctx, grpc_mdelem md);
#endif
#define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key)
@@ -170,7 +168,7 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md);
#define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
void grpc_mdctx_global_init(void);
-void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx);
+void grpc_mdctx_global_shutdown(grpc_exec_ctx* exec_ctx);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/metadata_batch.cc b/src/core/lib/transport/metadata_batch.cc
index 2df9c9189c..90e84cd1e2 100644
--- a/src/core/lib/transport/metadata_batch.cc
+++ b/src/core/lib/transport/metadata_batch.cc
@@ -28,9 +28,9 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
-static void assert_valid_list(grpc_mdelem_list *list) {
+static void assert_valid_list(grpc_mdelem_list* list) {
#ifndef NDEBUG
- grpc_linked_mdelem *l;
+ grpc_linked_mdelem* l;
GPR_ASSERT((list->head == NULL) == (list->tail == NULL));
if (!list->head) return;
@@ -51,10 +51,10 @@ static void assert_valid_list(grpc_mdelem_list *list) {
#endif /* NDEBUG */
}
-static void assert_valid_callouts(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch) {
+static void assert_valid_callouts(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch) {
#ifndef NDEBUG
- for (grpc_linked_mdelem *l = batch->list.head; l != NULL; l = l->next) {
+ for (grpc_linked_mdelem* l = batch->list.head; l != NULL; l = l->next) {
grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md));
grpc_metadata_batch_callouts_index callout_idx =
GRPC_BATCH_INDEX_OF(key_interned);
@@ -67,38 +67,38 @@ static void assert_valid_callouts(grpc_exec_ctx *exec_ctx,
}
#ifndef NDEBUG
-void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
+void grpc_metadata_batch_assert_ok(grpc_metadata_batch* batch) {
assert_valid_list(&batch->list);
}
#endif /* NDEBUG */
-void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
+void grpc_metadata_batch_init(grpc_metadata_batch* batch) {
memset(batch, 0, sizeof(*batch));
batch->deadline = GRPC_MILLIS_INF_FUTURE;
}
-void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch) {
- grpc_linked_mdelem *l;
+void grpc_metadata_batch_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch) {
+ grpc_linked_mdelem* l;
for (l = batch->list.head; l; l = l->next) {
GRPC_MDELEM_UNREF(exec_ctx, l->md);
}
}
-grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md) {
- grpc_error *out = grpc_error_set_str(
+grpc_error* grpc_attach_md_to_error(grpc_error* src, grpc_mdelem md) {
+ grpc_error* out = grpc_error_set_str(
grpc_error_set_str(src, GRPC_ERROR_STR_KEY,
grpc_slice_ref_internal(GRPC_MDKEY(md))),
GRPC_ERROR_STR_VALUE, grpc_slice_ref_internal(GRPC_MDVALUE(md)));
return out;
}
-static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage)
+static grpc_error* maybe_link_callout(grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage)
GRPC_MUST_USE_RESULT;
-static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+static grpc_error* maybe_link_callout(grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) {
grpc_metadata_batch_callouts_index idx =
GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md));
if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
@@ -114,8 +114,8 @@ static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
storage->md);
}
-static void maybe_unlink_callout(grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+static void maybe_unlink_callout(grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) {
grpc_metadata_batch_callouts_index idx =
GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md));
if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
@@ -126,16 +126,16 @@ static void maybe_unlink_callout(grpc_metadata_batch *batch,
batch->idx.array[idx] = NULL;
}
-grpc_error *grpc_metadata_batch_add_head(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
+grpc_error* grpc_metadata_batch_add_head(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage,
grpc_mdelem elem_to_add) {
GPR_ASSERT(!GRPC_MDISNULL(elem_to_add));
storage->md = elem_to_add;
return grpc_metadata_batch_link_head(exec_ctx, batch, storage);
}
-static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
+static void link_head(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
assert_valid_list(list);
GPR_ASSERT(!GRPC_MDISNULL(storage->md));
storage->prev = NULL;
@@ -150,11 +150,11 @@ static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
}
-grpc_error *grpc_metadata_batch_link_head(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+grpc_error* grpc_metadata_batch_link_head(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) {
assert_valid_callouts(exec_ctx, batch);
- grpc_error *err = maybe_link_callout(batch, storage);
+ grpc_error* err = maybe_link_callout(batch, storage);
if (err != GRPC_ERROR_NONE) {
assert_valid_callouts(exec_ctx, batch);
return err;
@@ -164,16 +164,16 @@ grpc_error *grpc_metadata_batch_link_head(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-grpc_error *grpc_metadata_batch_add_tail(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
+grpc_error* grpc_metadata_batch_add_tail(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage,
grpc_mdelem elem_to_add) {
GPR_ASSERT(!GRPC_MDISNULL(elem_to_add));
storage->md = elem_to_add;
return grpc_metadata_batch_link_tail(exec_ctx, batch, storage);
}
-static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
+static void link_tail(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
assert_valid_list(list);
GPR_ASSERT(!GRPC_MDISNULL(storage->md));
storage->prev = list->tail;
@@ -189,11 +189,11 @@ static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
assert_valid_list(list);
}
-grpc_error *grpc_metadata_batch_link_tail(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+grpc_error* grpc_metadata_batch_link_tail(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) {
assert_valid_callouts(exec_ctx, batch);
- grpc_error *err = maybe_link_callout(batch, storage);
+ grpc_error* err = maybe_link_callout(batch, storage);
if (err != GRPC_ERROR_NONE) {
assert_valid_callouts(exec_ctx, batch);
return err;
@@ -203,8 +203,8 @@ grpc_error *grpc_metadata_batch_link_tail(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
-static void unlink_storage(grpc_mdelem_list *list,
- grpc_linked_mdelem *storage) {
+static void unlink_storage(grpc_mdelem_list* list,
+ grpc_linked_mdelem* storage) {
assert_valid_list(list);
if (storage->prev != NULL) {
storage->prev->next = storage->next;
@@ -220,9 +220,9 @@ static void unlink_storage(grpc_mdelem_list *list,
assert_valid_list(list);
}
-void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) {
+void grpc_metadata_batch_remove(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) {
assert_valid_callouts(exec_ctx, batch);
maybe_unlink_callout(batch, storage);
unlink_storage(&batch->list, storage);
@@ -230,8 +230,8 @@ void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
assert_valid_callouts(exec_ctx, batch);
}
-void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
- grpc_linked_mdelem *storage,
+void grpc_metadata_batch_set_value(grpc_exec_ctx* exec_ctx,
+ grpc_linked_mdelem* storage,
grpc_slice value) {
grpc_mdelem old_mdelem = storage->md;
grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
@@ -240,12 +240,12 @@ void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
}
-grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
+grpc_error* grpc_metadata_batch_substitute(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage,
grpc_mdelem new_mdelem) {
assert_valid_callouts(exec_ctx, batch);
- grpc_error *error = GRPC_ERROR_NONE;
+ grpc_error* error = GRPC_ERROR_NONE;
grpc_mdelem old_mdelem = storage->md;
if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
maybe_unlink_callout(batch, storage);
@@ -263,27 +263,27 @@ grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
return error;
}
-void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch) {
+void grpc_metadata_batch_clear(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch) {
grpc_metadata_batch_destroy(exec_ctx, batch);
grpc_metadata_batch_init(batch);
}
-bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) {
+bool grpc_metadata_batch_is_empty(grpc_metadata_batch* batch) {
return batch->list.head == NULL && batch->deadline == GRPC_MILLIS_INF_FUTURE;
}
-size_t grpc_metadata_batch_size(grpc_metadata_batch *batch) {
+size_t grpc_metadata_batch_size(grpc_metadata_batch* batch) {
size_t size = 0;
- for (grpc_linked_mdelem *elem = batch->list.head; elem != NULL;
+ for (grpc_linked_mdelem* elem = batch->list.head; elem != NULL;
elem = elem->next) {
size += GRPC_MDELEM_LENGTH(elem->md);
}
return size;
}
-static void add_error(grpc_error **composite, grpc_error *error,
- const char *composite_error_string) {
+static void add_error(grpc_error** composite, grpc_error* error,
+ const char* composite_error_string) {
if (error == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(composite_error_string);
@@ -291,15 +291,15 @@ static void add_error(grpc_error **composite, grpc_error *error,
*composite = grpc_error_add_child(*composite, error);
}
-grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
+grpc_error* grpc_metadata_batch_filter(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
grpc_metadata_batch_filter_func func,
- void *user_data,
- const char *composite_error_string) {
- grpc_linked_mdelem *l = batch->list.head;
- grpc_error *error = GRPC_ERROR_NONE;
+ void* user_data,
+ const char* composite_error_string) {
+ grpc_linked_mdelem* l = batch->list.head;
+ grpc_error* error = GRPC_ERROR_NONE;
while (l) {
- grpc_linked_mdelem *next = l->next;
+ grpc_linked_mdelem* next = l->next;
grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md);
add_error(&error, new_mdelem.error, composite_error_string);
if (GRPC_MDISNULL(new_mdelem.md)) {
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index a2b4b92385..7d17393249 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -34,16 +34,16 @@ extern "C" {
typedef struct grpc_linked_mdelem {
grpc_mdelem md;
- struct grpc_linked_mdelem *next;
- struct grpc_linked_mdelem *prev;
- void *reserved;
+ struct grpc_linked_mdelem* next;
+ struct grpc_linked_mdelem* prev;
+ void* reserved;
} grpc_linked_mdelem;
typedef struct grpc_mdelem_list {
size_t count;
size_t default_count; // Number of default keys.
- grpc_linked_mdelem *head;
- grpc_linked_mdelem *tail;
+ grpc_linked_mdelem* head;
+ grpc_linked_mdelem* tail;
} grpc_mdelem_list;
typedef struct grpc_metadata_batch {
@@ -56,29 +56,29 @@ typedef struct grpc_metadata_batch {
grpc_millis deadline;
} grpc_metadata_batch;
-void grpc_metadata_batch_init(grpc_metadata_batch *batch);
-void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch);
-void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch);
-bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch);
+void grpc_metadata_batch_init(grpc_metadata_batch* batch);
+void grpc_metadata_batch_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch);
+void grpc_metadata_batch_clear(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch);
+bool grpc_metadata_batch_is_empty(grpc_metadata_batch* batch);
/* Returns the transport size of the batch. */
-size_t grpc_metadata_batch_size(grpc_metadata_batch *batch);
+size_t grpc_metadata_batch_size(grpc_metadata_batch* batch);
/** Remove \a storage from the batch, unreffing the mdelem contained */
-void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage);
+void grpc_metadata_batch_remove(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage);
/** Substitute a new mdelem for an old value */
-grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx,
- grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage,
+grpc_error* grpc_metadata_batch_substitute(grpc_exec_ctx* exec_ctx,
+ grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage,
grpc_mdelem new_value);
-void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
- grpc_linked_mdelem *storage,
+void grpc_metadata_batch_set_value(grpc_exec_ctx* exec_ctx,
+ grpc_linked_mdelem* storage,
grpc_slice value);
/** Add \a storage to the beginning of \a batch. storage->md is
@@ -86,17 +86,17 @@ void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx,
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
-grpc_error *grpc_metadata_batch_link_head(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_metadata_batch_link_head(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) GRPC_MUST_USE_RESULT;
/** Add \a storage to the end of \a batch. storage->md is
assumed to be valid.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call. */
-grpc_error *grpc_metadata_batch_link_tail(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_metadata_batch_link_tail(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage) GRPC_MUST_USE_RESULT;
/** Add \a elem_to_add as the first element in \a batch, using
\a storage as backing storage for the linked list element.
@@ -104,23 +104,23 @@ grpc_error *grpc_metadata_batch_link_tail(
lifetime of batch. This usually means it should be around
for the lifetime of the call.
Takes ownership of \a elem_to_add */
-grpc_error *grpc_metadata_batch_add_head(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_metadata_batch_add_head(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
/** Add \a elem_to_add as the last element in \a batch, using
\a storage as backing storage for the linked list element.
\a storage is owned by the caller and must survive for the
lifetime of batch. This usually means it should be around
for the lifetime of the call.
Takes ownership of \a elem_to_add */
-grpc_error *grpc_metadata_batch_add_tail(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
- grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_metadata_batch_add_tail(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
+ grpc_linked_mdelem* storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
-grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md);
+grpc_error* grpc_attach_md_to_error(grpc_error* src, grpc_mdelem md);
typedef struct {
- grpc_error *error;
+ grpc_error* error;
grpc_mdelem md;
} grpc_filtered_mdelem;
@@ -132,14 +132,14 @@ typedef struct {
{ GRPC_ERROR_NONE, GRPC_MDNULL }
typedef grpc_filtered_mdelem (*grpc_metadata_batch_filter_func)(
- grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem elem);
-grpc_error *grpc_metadata_batch_filter(
- grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch,
- grpc_metadata_batch_filter_func func, void *user_data,
- const char *composite_error_string) GRPC_MUST_USE_RESULT;
+ grpc_exec_ctx* exec_ctx, void* user_data, grpc_mdelem elem);
+grpc_error* grpc_metadata_batch_filter(
+ grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
+ grpc_metadata_batch_filter_func func, void* user_data,
+ const char* composite_error_string) GRPC_MUST_USE_RESULT;
#ifndef NDEBUG
-void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
+void grpc_metadata_batch_assert_ok(grpc_metadata_batch* comd);
#else
#define grpc_metadata_batch_assert_ok(comd) \
do { \
diff --git a/src/core/lib/transport/pid_controller.cc b/src/core/lib/transport/pid_controller.cc
index 9f7750d693..e31cc85f76 100644
--- a/src/core/lib/transport/pid_controller.cc
+++ b/src/core/lib/transport/pid_controller.cc
@@ -21,7 +21,7 @@
namespace grpc_core {
-PidController::PidController(const Args &args)
+PidController::PidController(const Args& args)
: last_control_value_(args.initial_control_value()), args_(args) {}
double PidController::Update(double error, double dt) {
diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc
index 472cf888ea..844724cbeb 100644
--- a/src/core/lib/transport/static_metadata.cc
+++ b/src/core/lib/transport/static_metadata.cc
@@ -103,8 +103,8 @@ static uint8_t g_bytes[] = {
105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116,
101, 44, 103, 122, 105, 112};
-static void static_ref(void *unused) {}
-static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}
+static void static_ref(void* unused) {}
+static void static_unref(grpc_exec_ctx* exec_ctx, void* unused) {}
static const grpc_slice_refcount_vtable static_sub_vtable = {
static_ref, static_unref, grpc_slice_default_eq_impl,
grpc_slice_default_hash_impl};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 299410f22c..8e73d5f278 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -541,30 +541,30 @@ typedef enum {
} grpc_metadata_batch_callouts_index;
typedef union {
- struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];
+ struct grpc_linked_mdelem* array[GRPC_BATCH_CALLOUTS_COUNT];
struct {
- struct grpc_linked_mdelem *path;
- struct grpc_linked_mdelem *method;
- struct grpc_linked_mdelem *status;
- struct grpc_linked_mdelem *authority;
- struct grpc_linked_mdelem *scheme;
- struct grpc_linked_mdelem *te;
- struct grpc_linked_mdelem *grpc_message;
- struct grpc_linked_mdelem *grpc_status;
- struct grpc_linked_mdelem *grpc_payload_bin;
- struct grpc_linked_mdelem *grpc_encoding;
- struct grpc_linked_mdelem *grpc_accept_encoding;
- struct grpc_linked_mdelem *grpc_server_stats_bin;
- struct grpc_linked_mdelem *grpc_tags_bin;
- struct grpc_linked_mdelem *grpc_trace_bin;
- struct grpc_linked_mdelem *content_type;
- struct grpc_linked_mdelem *content_encoding;
- struct grpc_linked_mdelem *accept_encoding;
- struct grpc_linked_mdelem *grpc_internal_encoding_request;
- struct grpc_linked_mdelem *grpc_internal_stream_encoding_request;
- struct grpc_linked_mdelem *user_agent;
- struct grpc_linked_mdelem *host;
- struct grpc_linked_mdelem *lb_token;
+ struct grpc_linked_mdelem* path;
+ struct grpc_linked_mdelem* method;
+ struct grpc_linked_mdelem* status;
+ struct grpc_linked_mdelem* authority;
+ struct grpc_linked_mdelem* scheme;
+ struct grpc_linked_mdelem* te;
+ struct grpc_linked_mdelem* grpc_message;
+ struct grpc_linked_mdelem* grpc_status;
+ struct grpc_linked_mdelem* grpc_payload_bin;
+ struct grpc_linked_mdelem* grpc_encoding;
+ struct grpc_linked_mdelem* grpc_accept_encoding;
+ struct grpc_linked_mdelem* grpc_server_stats_bin;
+ struct grpc_linked_mdelem* grpc_tags_bin;
+ struct grpc_linked_mdelem* grpc_trace_bin;
+ struct grpc_linked_mdelem* content_type;
+ struct grpc_linked_mdelem* content_encoding;
+ struct grpc_linked_mdelem* accept_encoding;
+ struct grpc_linked_mdelem* grpc_internal_encoding_request;
+ struct grpc_linked_mdelem* grpc_internal_stream_encoding_request;
+ struct grpc_linked_mdelem* user_agent;
+ struct grpc_linked_mdelem* host;
+ struct grpc_linked_mdelem* lb_token;
} named;
} grpc_metadata_batch_callouts;
diff --git a/src/core/lib/transport/status_conversion.cc b/src/core/lib/transport/status_conversion.cc
index 891c4427d7..a0a5f1ba4b 100644
--- a/src/core/lib/transport/status_conversion.cc
+++ b/src/core/lib/transport/status_conversion.cc
@@ -37,7 +37,7 @@ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) {
}
}
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx,
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx* exec_ctx,
grpc_http2_error_code error,
grpc_millis deadline) {
switch (error) {
diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h
index 8ef91aecfe..b6fcebd4fa 100644
--- a/src/core/lib/transport/status_conversion.h
+++ b/src/core/lib/transport/status_conversion.h
@@ -29,7 +29,7 @@ extern "C" {
/* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */
grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status);
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx *exec_ctx,
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx* exec_ctx,
grpc_http2_error_code error,
grpc_millis deadline);
diff --git a/src/core/lib/transport/timeout_encoding.cc b/src/core/lib/transport/timeout_encoding.cc
index 23a9ef308f..86db6c8344 100644
--- a/src/core/lib/transport/timeout_encoding.cc
+++ b/src/core/lib/transport/timeout_encoding.cc
@@ -41,15 +41,15 @@ static int64_t round_up_to_three_sig_figs(int64_t x) {
}
/* encode our minimum viable timeout value */
-static void enc_tiny(char *buffer) { memcpy(buffer, "1n", 3); }
+static void enc_tiny(char* buffer) { memcpy(buffer, "1n", 3); }
-static void enc_ext(char *buffer, int64_t value, char ext) {
+static void enc_ext(char* buffer, int64_t value, char ext) {
int n = int64_ttoa(value, buffer);
buffer[n] = ext;
buffer[n + 1] = 0;
}
-static void enc_seconds(char *buffer, int64_t sec) {
+static void enc_seconds(char* buffer, int64_t sec) {
if (sec % 3600 == 0) {
enc_ext(buffer, sec / 3600, 'H');
} else if (sec % 60 == 0) {
@@ -59,7 +59,7 @@ static void enc_seconds(char *buffer, int64_t sec) {
}
}
-static void enc_millis(char *buffer, int64_t x) {
+static void enc_millis(char* buffer, int64_t x) {
x = round_up_to_three_sig_figs(x);
if (x < GPR_MS_PER_SEC) {
enc_ext(buffer, x, 'm');
@@ -72,7 +72,7 @@ static void enc_millis(char *buffer, int64_t x) {
}
}
-void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer) {
+void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer) {
if (timeout <= 0) {
enc_tiny(buffer);
} else if (timeout < 1000 * GPR_MS_PER_SEC) {
@@ -83,15 +83,15 @@ void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer) {
}
}
-static int is_all_whitespace(const char *p, const char *end) {
+static int is_all_whitespace(const char* p, const char* end) {
while (p != end && *p == ' ') p++;
return p == end;
}
-int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout) {
+int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout) {
grpc_millis x = 0;
- const uint8_t *p = GRPC_SLICE_START_PTR(text);
- const uint8_t *end = GRPC_SLICE_END_PTR(text);
+ const uint8_t* p = GRPC_SLICE_START_PTR(text);
+ const uint8_t* end = GRPC_SLICE_END_PTR(text);
int have_digit = 0;
/* skip whitespace */
for (; p != end && *p == ' '; p++)
@@ -138,5 +138,5 @@ int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout) {
return 0;
}
p++;
- return is_all_whitespace((const char *)p, (const char *)end);
+ return is_all_whitespace((const char*)p, (const char*)end);
}
diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h
index 91cdf0f728..9c3c4599c9 100644
--- a/src/core/lib/transport/timeout_encoding.h
+++ b/src/core/lib/transport/timeout_encoding.h
@@ -33,8 +33,8 @@ extern "C" {
/* Encode/decode timeouts to the GRPC over HTTP/2 format;
encoding may round up arbitrarily */
-void grpc_http2_encode_timeout(grpc_millis timeout, char *buffer);
-int grpc_http2_decode_timeout(grpc_slice text, grpc_millis *timeout);
+void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer);
+int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc
index ab4f938e7b..021f1b799b 100644
--- a/src/core/lib/transport/transport.cc
+++ b/src/core/lib/transport/transport.cc
@@ -37,7 +37,7 @@ grpc_tracer_flag grpc_trace_stream_refcount =
#endif
#ifndef NDEBUG
-void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
+void grpc_stream_ref(grpc_stream_refcount* refcount, const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s",
@@ -45,14 +45,14 @@ void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
val + 1, reason);
}
#else
-void grpc_stream_ref(grpc_stream_refcount *refcount) {
+void grpc_stream_ref(grpc_stream_refcount* refcount) {
#endif
gpr_ref_non_zero(&refcount->refs);
}
#ifndef NDEBUG
-void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
- const char *reason) {
+void grpc_stream_unref(grpc_exec_ctx* exec_ctx, grpc_stream_refcount* refcount,
+ const char* reason) {
if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
@@ -60,8 +60,8 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
val - 1, reason);
}
#else
-void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
- grpc_stream_refcount *refcount) {
+void grpc_stream_unref(grpc_exec_ctx* exec_ctx,
+ grpc_stream_refcount* refcount) {
#endif
if (gpr_unref(&refcount->refs)) {
if (exec_ctx->flags & GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) {
@@ -79,11 +79,11 @@ void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
}
}
-#define STREAM_REF_FROM_SLICE_REF(p) \
- ((grpc_stream_refcount *)(((uint8_t *)p) - \
- offsetof(grpc_stream_refcount, slice_refcount)))
+#define STREAM_REF_FROM_SLICE_REF(p) \
+ ((grpc_stream_refcount*)(((uint8_t*)p) - \
+ offsetof(grpc_stream_refcount, slice_refcount)))
-static void slice_stream_ref(void *p) {
+static void slice_stream_ref(void* p) {
#ifndef NDEBUG
grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p), "slice");
#else
@@ -91,7 +91,7 @@ static void slice_stream_ref(void *p) {
#endif
}
-static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
+static void slice_stream_unref(grpc_exec_ctx* exec_ctx, void* p) {
#ifndef NDEBUG
grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p), "slice");
#else
@@ -99,12 +99,12 @@ static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) {
#endif
}
-grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
- void *buffer, size_t length) {
+grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount,
+ void* buffer, size_t length) {
slice_stream_ref(&refcount->slice_refcount);
grpc_slice res;
res.refcount = &refcount->slice_refcount,
- res.data.refcounted.bytes = (uint8_t *)buffer;
+ res.data.refcounted.bytes = (uint8_t*)buffer;
res.data.refcounted.length = length;
return res;
}
@@ -117,13 +117,13 @@ static const grpc_slice_refcount_vtable stream_ref_slice_vtable = {
};
#ifndef NDEBUG
-void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
- grpc_iomgr_cb_func cb, void *cb_arg,
- const char *object_type) {
+void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void* cb_arg,
+ const char* object_type) {
refcount->object_type = object_type;
#else
-void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
- grpc_iomgr_cb_func cb, void *cb_arg) {
+void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void* cb_arg) {
#endif
gpr_ref_init(&refcount->refs, initial_refs);
GRPC_CLOSURE_INIT(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
@@ -131,59 +131,59 @@ void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
refcount->slice_refcount.sub_refcount = &refcount->slice_refcount;
}
-static void move64(uint64_t *from, uint64_t *to) {
+static void move64(uint64_t* from, uint64_t* to) {
*to += *from;
*from = 0;
}
-void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats *from,
- grpc_transport_one_way_stats *to) {
+void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from,
+ grpc_transport_one_way_stats* to) {
move64(&from->framing_bytes, &to->framing_bytes);
move64(&from->data_bytes, &to->data_bytes);
move64(&from->header_bytes, &to->header_bytes);
}
-void grpc_transport_move_stats(grpc_transport_stream_stats *from,
- grpc_transport_stream_stats *to) {
+void grpc_transport_move_stats(grpc_transport_stream_stats* from,
+ grpc_transport_stream_stats* to) {
grpc_transport_move_one_way_stats(&from->incoming, &to->incoming);
grpc_transport_move_one_way_stats(&from->outgoing, &to->outgoing);
}
-size_t grpc_transport_stream_size(grpc_transport *transport) {
+size_t grpc_transport_stream_size(grpc_transport* transport) {
return transport->vtable->sizeof_stream;
}
-void grpc_transport_destroy(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport) {
+void grpc_transport_destroy(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport) {
transport->vtable->destroy(exec_ctx, transport);
}
-int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport, grpc_stream *stream,
- grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena) {
+int grpc_transport_init_stream(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport, grpc_stream* stream,
+ grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena) {
return transport->vtable->init_stream(exec_ctx, transport, stream, refcount,
server_data, arena);
}
-void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_stream *stream,
- grpc_transport_stream_op_batch *op) {
+void grpc_transport_perform_stream_op(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_stream* stream,
+ grpc_transport_stream_op_batch* op) {
transport->vtable->perform_stream_op(exec_ctx, transport, stream, op);
}
-void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_transport_op *op) {
+void grpc_transport_perform_op(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_transport_op* op) {
transport->vtable->perform_op(exec_ctx, transport, op);
}
-void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
- grpc_stream *stream,
- grpc_polling_entity *pollent) {
- grpc_pollset *pollset;
- grpc_pollset_set *pollset_set;
+void grpc_transport_set_pops(grpc_exec_ctx* exec_ctx, grpc_transport* transport,
+ grpc_stream* stream,
+ grpc_polling_entity* pollent) {
+ grpc_pollset* pollset;
+ grpc_pollset_set* pollset_set;
if ((pollset = grpc_polling_entity_pollset(pollent)) != NULL) {
transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
} else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) != NULL) {
@@ -194,16 +194,16 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
}
}
-void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_stream *stream,
- grpc_closure *then_schedule_closure) {
+void grpc_transport_destroy_stream(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_stream* stream,
+ grpc_closure* then_schedule_closure) {
transport->vtable->destroy_stream(exec_ctx, transport, stream,
then_schedule_closure);
}
-grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport) {
+grpc_endpoint* grpc_transport_get_endpoint(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport) {
return transport->vtable->get_endpoint(exec_ctx, transport);
}
@@ -215,8 +215,8 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
// though it lives in lib, it handles transport stream ops sure
// it's grpc_transport_stream_op_batch_finish_with_failure
void grpc_transport_stream_op_batch_finish_with_failure(
- grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
- grpc_error *error, grpc_call_combiner *call_combiner) {
+ grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* batch,
+ grpc_error* error, grpc_call_combiner* call_combiner) {
if (batch->send_message) {
grpc_byte_stream_destroy(exec_ctx,
batch->payload->send_message.send_message);
@@ -241,19 +241,19 @@ void grpc_transport_stream_op_batch_finish_with_failure(
typedef struct {
grpc_closure outer_on_complete;
- grpc_closure *inner_on_complete;
+ grpc_closure* inner_on_complete;
grpc_transport_op op;
} made_transport_op;
-static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- made_transport_op *op = (made_transport_op *)arg;
+static void destroy_made_transport_op(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ made_transport_op* op = (made_transport_op*)arg;
GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
gpr_free(op);
}
-grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
- made_transport_op *op = (made_transport_op *)gpr_malloc(sizeof(*op));
+grpc_transport_op* grpc_make_transport_op(grpc_closure* on_complete) {
+ made_transport_op* op = (made_transport_op*)gpr_malloc(sizeof(*op));
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
grpc_schedule_on_exec_ctx);
op->inner_on_complete = on_complete;
@@ -264,23 +264,23 @@ grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
typedef struct {
grpc_closure outer_on_complete;
- grpc_closure *inner_on_complete;
+ grpc_closure* inner_on_complete;
grpc_transport_stream_op_batch op;
grpc_transport_stream_op_batch_payload payload;
} made_transport_stream_op;
-static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- made_transport_stream_op *op = (made_transport_stream_op *)arg;
- grpc_closure *c = op->inner_on_complete;
+static void destroy_made_transport_stream_op(grpc_exec_ctx* exec_ctx, void* arg,
+ grpc_error* error) {
+ made_transport_stream_op* op = (made_transport_stream_op*)arg;
+ grpc_closure* c = op->inner_on_complete;
gpr_free(op);
GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
}
-grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
- grpc_closure *on_complete) {
- made_transport_stream_op *op =
- (made_transport_stream_op *)gpr_zalloc(sizeof(*op));
+grpc_transport_stream_op_batch* grpc_make_transport_stream_op(
+ grpc_closure* on_complete) {
+ made_transport_stream_op* op =
+ (made_transport_stream_op*)gpr_zalloc(sizeof(*op));
op->op.payload = &op->payload;
GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
op, grpc_schedule_on_exec_ctx);
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index fbf5dcb8b5..973018e5a5 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -51,33 +51,33 @@ typedef struct grpc_stream_refcount {
gpr_refcount refs;
grpc_closure destroy;
#ifndef NDEBUG
- const char *object_type;
+ const char* object_type;
#endif
grpc_slice_refcount slice_refcount;
} grpc_stream_refcount;
#ifndef NDEBUG
-void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
- grpc_iomgr_cb_func cb, void *cb_arg,
- const char *object_type);
-void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason);
-void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
- const char *reason);
+void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void* cb_arg,
+ const char* object_type);
+void grpc_stream_ref(grpc_stream_refcount* refcount, const char* reason);
+void grpc_stream_unref(grpc_exec_ctx* exec_ctx, grpc_stream_refcount* refcount,
+ const char* reason);
#define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype)
#else
-void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
- grpc_iomgr_cb_func cb, void *cb_arg);
-void grpc_stream_ref(grpc_stream_refcount *refcount);
-void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount);
+void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
+ grpc_iomgr_cb_func cb, void* cb_arg);
+void grpc_stream_ref(grpc_stream_refcount* refcount);
+void grpc_stream_unref(grpc_exec_ctx* exec_ctx, grpc_stream_refcount* refcount);
#define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
grpc_stream_ref_init(rc, ir, cb, cb_arg)
#endif
/* Wrap a buffer that is owned by some stream object into a slice that shares
the same refcount */
-grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount,
- void *buffer, size_t length);
+grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount,
+ void* buffer, size_t length);
typedef struct {
uint64_t framing_bytes;
@@ -90,14 +90,14 @@ typedef struct grpc_transport_stream_stats {
grpc_transport_one_way_stats outgoing;
} grpc_transport_stream_stats;
-void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats *from,
- grpc_transport_one_way_stats *to);
+void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from,
+ grpc_transport_one_way_stats* to);
-void grpc_transport_move_stats(grpc_transport_stream_stats *from,
- grpc_transport_stream_stats *to);
+void grpc_transport_move_stats(grpc_transport_stream_stats* from,
+ grpc_transport_stream_stats* to);
typedef struct {
- void *extra_arg;
+ void* extra_arg;
grpc_closure closure;
} grpc_handler_private_op_data;
@@ -110,10 +110,10 @@ typedef struct grpc_transport_stream_op_batch {
/** Should be enqueued when all requested operations (excluding recv_message
and recv_initial_metadata which have their own closures) in a given batch
have been completed. */
- grpc_closure *on_complete;
+ grpc_closure* on_complete;
/** Values for the stream op (fields set are determined by flags above) */
- grpc_transport_stream_op_batch_payload *payload;
+ grpc_transport_stream_op_batch_payload* payload;
/** Send initial metadata to the peer, from the provided metadata batch. */
bool send_initial_metadata : 1;
@@ -149,17 +149,17 @@ typedef struct grpc_transport_stream_op_batch {
struct grpc_transport_stream_op_batch_payload {
struct {
- grpc_metadata_batch *send_initial_metadata;
+ grpc_metadata_batch* send_initial_metadata;
/** Iff send_initial_metadata != NULL, flags associated with
send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
uint32_t send_initial_metadata_flags;
// If non-NULL, will be set by the transport to the peer string
// (a char*, which the caller takes ownership of).
- gpr_atm *peer_string;
+ gpr_atm* peer_string;
} send_initial_metadata;
struct {
- grpc_metadata_batch *send_trailing_metadata;
+ grpc_metadata_batch* send_trailing_metadata;
} send_trailing_metadata;
struct {
@@ -168,21 +168,21 @@ struct grpc_transport_stream_op_batch_payload {
// grpc_byte_stream_destroy() on this.
// The batch's on_complete will not be called until after the byte
// stream is destroyed.
- grpc_byte_stream *send_message;
+ grpc_byte_stream* send_message;
} send_message;
struct {
- grpc_metadata_batch *recv_initial_metadata;
- uint32_t *recv_flags;
+ grpc_metadata_batch* recv_initial_metadata;
+ uint32_t* recv_flags;
/** Should be enqueued when initial metadata is ready to be processed. */
- grpc_closure *recv_initial_metadata_ready;
+ grpc_closure* recv_initial_metadata_ready;
// If not NULL, will be set to true if trailing metadata is
// immediately available. This may be a signal that we received a
// Trailers-Only response.
- bool *trailing_metadata_available;
+ bool* trailing_metadata_available;
// If non-NULL, will be set by the transport to the peer string
// (a char*, which the caller takes ownership of).
- gpr_atm *peer_string;
+ gpr_atm* peer_string;
} recv_initial_metadata;
struct {
@@ -190,17 +190,17 @@ struct grpc_transport_stream_op_batch_payload {
// containing a received message.
// The caller is responsible for calling grpc_byte_stream_destroy()
// on this byte stream.
- grpc_byte_stream **recv_message;
+ grpc_byte_stream** recv_message;
/** Should be enqueued when one message is ready to be processed. */
- grpc_closure *recv_message_ready;
+ grpc_closure* recv_message_ready;
} recv_message;
struct {
- grpc_metadata_batch *recv_trailing_metadata;
+ grpc_metadata_batch* recv_trailing_metadata;
} recv_trailing_metadata;
struct {
- grpc_transport_stream_stats *collect_stats;
+ grpc_transport_stream_stats* collect_stats;
} collect_stats;
/** Forcefully close this stream.
@@ -216,43 +216,43 @@ struct grpc_transport_stream_op_batch_payload {
struct {
// Error contract: the transport that gets this op must cause cancel_error
// to be unref'ed after processing it
- grpc_error *cancel_error;
+ grpc_error* cancel_error;
} cancel_stream;
/* Indexes correspond to grpc_context_index enum values */
- grpc_call_context_element *context;
+ grpc_call_context_element* context;
};
/** Transport op: a set of operations to perform on a transport as a whole */
typedef struct grpc_transport_op {
/** Called when processing of this op is done. */
- grpc_closure *on_consumed;
+ grpc_closure* on_consumed;
/** connectivity monitoring - set connectivity_state to NULL to unsubscribe */
- grpc_closure *on_connectivity_state_change;
- grpc_connectivity_state *connectivity_state;
+ grpc_closure* on_connectivity_state_change;
+ grpc_connectivity_state* connectivity_state;
/** should the transport be disconnected
* Error contract: the transport that gets this op must cause
* disconnect_with_error to be unref'ed after processing it */
- grpc_error *disconnect_with_error;
+ grpc_error* disconnect_with_error;
/** what should the goaway contain?
* Error contract: the transport that gets this op must cause
* goaway_error to be unref'ed after processing it */
- grpc_error *goaway_error;
+ grpc_error* goaway_error;
/** set the callback for accepting new streams;
this is a permanent callback, unlike the other one-shot closures.
If true, the callback is set to set_accept_stream_fn, with its
user_data argument set to set_accept_stream_user_data */
bool set_accept_stream;
- void (*set_accept_stream_fn)(grpc_exec_ctx *exec_ctx, void *user_data,
- grpc_transport *transport,
- const void *server_data);
- void *set_accept_stream_user_data;
+ void (*set_accept_stream_fn)(grpc_exec_ctx* exec_ctx, void* user_data,
+ grpc_transport* transport,
+ const void* server_data);
+ void* set_accept_stream_user_data;
/** add this transport to a pollset */
- grpc_pollset *bind_pollset;
+ grpc_pollset* bind_pollset;
/** add this transport to a pollset_set */
- grpc_pollset_set *bind_pollset_set;
+ grpc_pollset_set* bind_pollset_set;
/** send a ping, call this back if not NULL */
- grpc_closure *send_ping;
+ grpc_closure* send_ping;
/***************************************************************************
* remaining fields are initialized and used at the discretion of the
@@ -263,7 +263,7 @@ typedef struct grpc_transport_op {
/* Returns the amount of memory required to store a grpc_stream for this
transport */
-size_t grpc_transport_stream_size(grpc_transport *transport);
+size_t grpc_transport_stream_size(grpc_transport* transport);
/* Initialize transport data for a stream.
@@ -275,13 +275,13 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
stream - a pointer to uninitialized memory to initialize
server_data - either NULL for a client initiated stream, or a pointer
supplied from the accept_stream callback function */
-int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport, grpc_stream *stream,
- grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena);
+int grpc_transport_init_stream(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport, grpc_stream* stream,
+ grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena);
-void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
- grpc_stream *stream, grpc_polling_entity *pollent);
+void grpc_transport_set_pops(grpc_exec_ctx* exec_ctx, grpc_transport* transport,
+ grpc_stream* stream, grpc_polling_entity* pollent);
/* Destroy transport data for a stream.
@@ -293,17 +293,17 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
transport - the transport on which to create this stream
stream - the grpc_stream to destroy (memory is still owned by the
caller, but any child memory must be cleaned up) */
-void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_stream *stream,
- grpc_closure *then_schedule_closure);
+void grpc_transport_destroy_stream(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_stream* stream,
+ grpc_closure* then_schedule_closure);
void grpc_transport_stream_op_batch_finish_with_failure(
- grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op,
- grpc_error *error, grpc_call_combiner *call_combiner);
+ grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* op,
+ grpc_error* error, grpc_call_combiner* call_combiner);
-char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op);
-char *grpc_transport_op_string(grpc_transport_op *op);
+char* grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch* op);
+char* grpc_transport_op_string(grpc_transport_op* op);
/* Send a batch of operations on a transport
@@ -315,42 +315,42 @@ char *grpc_transport_op_string(grpc_transport_op *op);
non-NULL and previously initialized by the same transport.
op - a grpc_transport_stream_op_batch specifying the op to perform
*/
-void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_stream *stream,
- grpc_transport_stream_op_batch *op);
+void grpc_transport_perform_stream_op(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_stream* stream,
+ grpc_transport_stream_op_batch* op);
-void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport,
- grpc_transport_op *op);
+void grpc_transport_perform_op(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport,
+ grpc_transport_op* op);
/* Send a ping on a transport
Calls cb with user data when a response is received. */
-void grpc_transport_ping(grpc_transport *transport, grpc_closure *cb);
+void grpc_transport_ping(grpc_transport* transport, grpc_closure* cb);
/* Advise peer of pending connection termination. */
-void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
+void grpc_transport_goaway(grpc_transport* transport, grpc_status_code status,
grpc_slice debug_data);
/* Close a transport. Aborts all open streams. */
-void grpc_transport_close(grpc_transport *transport);
+void grpc_transport_close(grpc_transport* transport);
/* Destroy the transport */
-void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport);
+void grpc_transport_destroy(grpc_exec_ctx* exec_ctx, grpc_transport* transport);
/* Get the endpoint used by \a transport */
-grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx,
- grpc_transport *transport);
+grpc_endpoint* grpc_transport_get_endpoint(grpc_exec_ctx* exec_ctx,
+ grpc_transport* transport);
/* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
\a on_consumed and then delete the returned transport op */
-grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed);
+grpc_transport_op* grpc_make_transport_op(grpc_closure* on_consumed);
/* Allocate a grpc_transport_stream_op_batch, and preconfigure the on_consumed
closure
to \a on_consumed and then delete the returned transport op */
-grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
- grpc_closure *on_consumed);
+grpc_transport_stream_op_batch* grpc_make_transport_stream_op(
+ grpc_closure* on_consumed);
#ifdef __cplusplus
}
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index 445fb41ab1..22ad599e2e 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -31,46 +31,46 @@ typedef struct grpc_transport_vtable {
size_t sizeof_stream; /* = sizeof(transport stream) */
/* name of this transport implementation */
- const char *name;
+ const char* name;
/* implementation of grpc_transport_init_stream */
- int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_stream_refcount *refcount,
- const void *server_data, gpr_arena *arena);
+ int (*init_stream)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_stream_refcount* refcount,
+ const void* server_data, gpr_arena* arena);
/* implementation of grpc_transport_set_pollset */
- void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_pollset *pollset);
+ void (*set_pollset)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_pollset* pollset);
/* implementation of grpc_transport_set_pollset */
- void (*set_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, grpc_pollset_set *pollset_set);
+ void (*set_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream, grpc_pollset_set* pollset_set);
/* implementation of grpc_transport_perform_stream_op */
- void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream,
- grpc_transport_stream_op_batch *op);
+ void (*perform_stream_op)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream,
+ grpc_transport_stream_op_batch* op);
/* implementation of grpc_transport_perform_op */
- void (*perform_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_transport_op *op);
+ void (*perform_op)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_transport_op* op);
/* implementation of grpc_transport_destroy_stream */
- void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream,
- grpc_closure *then_schedule_closure);
+ void (*destroy_stream)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
+ grpc_stream* stream,
+ grpc_closure* then_schedule_closure);
/* implementation of grpc_transport_destroy */
- void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_transport* self);
/* implementation of grpc_transport_get_endpoint */
- grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
+ grpc_endpoint* (*get_endpoint)(grpc_exec_ctx* exec_ctx, grpc_transport* self);
} grpc_transport_vtable;
/* an instance of a grpc transport */
struct grpc_transport {
/* pointer to a vtable defining operations on this transport */
- const grpc_transport_vtable *vtable;
+ const grpc_transport_vtable* vtable;
};
#ifdef __cplusplus
diff --git a/src/core/lib/transport/transport_op_string.cc b/src/core/lib/transport/transport_op_string.cc
index cc11b0cc49..24e74c10c5 100644
--- a/src/core/lib/transport/transport_op_string.cc
+++ b/src/core/lib/transport/transport_op_string.cc
@@ -35,7 +35,7 @@
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
-static void put_metadata(gpr_strvec *b, grpc_mdelem md) {
+static void put_metadata(gpr_strvec* b, grpc_mdelem md) {
gpr_strvec_add(b, gpr_strdup("key="));
gpr_strvec_add(
b, grpc_dump_slice(GRPC_MDKEY(md), GPR_DUMP_HEX | GPR_DUMP_ASCII));
@@ -45,23 +45,23 @@ static void put_metadata(gpr_strvec *b, grpc_mdelem md) {
b, grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII));
}
-static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
- grpc_linked_mdelem *m;
+static void put_metadata_list(gpr_strvec* b, grpc_metadata_batch md) {
+ grpc_linked_mdelem* m;
for (m = md.list.head; m != NULL; m = m->next) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
if (md.deadline != GRPC_MILLIS_INF_FUTURE) {
- char *tmp;
+ char* tmp;
gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline);
gpr_strvec_add(b, tmp);
}
}
-char *grpc_transport_stream_op_batch_string(
- grpc_transport_stream_op_batch *op) {
- char *tmp;
- char *out;
+char* grpc_transport_stream_op_batch_string(
+ grpc_transport_stream_op_batch* op) {
+ char* tmp;
+ char* out;
gpr_strvec b;
gpr_strvec_init(&b);
@@ -107,7 +107,7 @@ char *grpc_transport_stream_op_batch_string(
if (op->cancel_stream) {
gpr_strvec_add(&b, gpr_strdup(" "));
- const char *msg =
+ const char* msg =
grpc_error_string(op->payload->cancel_stream.cancel_error);
gpr_asprintf(&tmp, "CANCEL:%s", msg);
@@ -127,9 +127,9 @@ char *grpc_transport_stream_op_batch_string(
return out;
}
-char *grpc_transport_op_string(grpc_transport_op *op) {
- char *tmp;
- char *out;
+char* grpc_transport_op_string(grpc_transport_op* op) {
+ char* tmp;
+ char* out;
bool first = true;
gpr_strvec b;
@@ -153,7 +153,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
- const char *err = grpc_error_string(op->disconnect_with_error);
+ const char* err = grpc_error_string(op->disconnect_with_error);
gpr_asprintf(&tmp, "DISCONNECT:%s", err);
gpr_strvec_add(&b, tmp);
}
@@ -161,7 +161,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
if (op->goaway_error) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = false;
- const char *msg = grpc_error_string(op->goaway_error);
+ const char* msg = grpc_error_string(op->goaway_error);
gpr_asprintf(&tmp, "SEND_GOAWAY:%s", msg);
gpr_strvec_add(&b, tmp);
@@ -199,10 +199,10 @@ char *grpc_transport_op_string(grpc_transport_op *op) {
return out;
}
-void grpc_call_log_op(const char *file, int line, gpr_log_severity severity,
- grpc_call_element *elem,
- grpc_transport_stream_op_batch *op) {
- char *str = grpc_transport_stream_op_batch_string(op);
+void grpc_call_log_op(const char* file, int line, gpr_log_severity severity,
+ grpc_call_element* elem,
+ grpc_transport_stream_op_batch* op) {
+ char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(file, line, severity, "OP[%s:%p]: %s", elem->filter->name, elem, str);
gpr_free(str);
}
diff --git a/src/core/tsi/fake_transport_security.cc b/src/core/tsi/fake_transport_security.cc
index 349dcf5cb8..b12dde31fb 100644
--- a/src/core/tsi/fake_transport_security.cc
+++ b/src/core/tsi/fake_transport_security.cc
@@ -41,7 +41,7 @@
where the size field value is the size of the size field plus the size of
the data encoded in little endian on 4 bytes. */
typedef struct {
- unsigned char *data;
+ unsigned char* data;
size_t size;
size_t allocated_size;
size_t offset;
@@ -63,7 +63,7 @@ typedef struct {
int needs_incoming_message;
tsi_fake_frame incoming_frame;
tsi_fake_frame outgoing_frame;
- unsigned char *outgoing_bytes_buffer;
+ unsigned char* outgoing_bytes_buffer;
size_t outgoing_bytes_buffer_size;
tsi_result result;
} tsi_fake_handshaker;
@@ -85,10 +85,10 @@ typedef struct {
/* --- Utils. ---*/
-static const char *tsi_fake_handshake_message_strings[] = {
+static const char* tsi_fake_handshake_message_strings[] = {
"CLIENT_INIT", "SERVER_INIT", "CLIENT_FINISHED", "SERVER_FINISHED"};
-static const char *tsi_fake_handshake_message_to_string(int msg) {
+static const char* tsi_fake_handshake_message_to_string(int msg) {
if (msg < 0 || msg >= TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
gpr_log(GPR_ERROR, "Invalid message %d", msg);
return "UNKNOWN";
@@ -97,7 +97,7 @@ static const char *tsi_fake_handshake_message_to_string(int msg) {
}
static tsi_result tsi_fake_handshake_message_from_string(
- const char *msg_string, tsi_fake_handshake_message *msg) {
+ const char* msg_string, tsi_fake_handshake_message* msg) {
for (int i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) {
if (strncmp(msg_string, tsi_fake_handshake_message_strings[i],
strlen(tsi_fake_handshake_message_strings[i])) == 0) {
@@ -109,22 +109,22 @@ static tsi_result tsi_fake_handshake_message_from_string(
return TSI_DATA_CORRUPTED;
}
-static uint32_t load32_little_endian(const unsigned char *buf) {
+static uint32_t load32_little_endian(const unsigned char* buf) {
return ((uint32_t)(buf[0]) | (uint32_t)(buf[1] << 8) |
(uint32_t)(buf[2] << 16) | (uint32_t)(buf[3] << 24));
}
-static void store32_little_endian(uint32_t value, unsigned char *buf) {
+static void store32_little_endian(uint32_t value, unsigned char* buf) {
buf[3] = (unsigned char)((value >> 24) & 0xFF);
buf[2] = (unsigned char)((value >> 16) & 0xFF);
buf[1] = (unsigned char)((value >> 8) & 0xFF);
buf[0] = (unsigned char)((value)&0xFF);
}
-static uint32_t read_frame_size(const grpc_slice_buffer *sb) {
+static uint32_t read_frame_size(const grpc_slice_buffer* sb) {
GPR_ASSERT(sb != NULL && sb->length >= TSI_FAKE_FRAME_HEADER_SIZE);
uint8_t frame_size_buffer[TSI_FAKE_FRAME_HEADER_SIZE];
- uint8_t *buf = frame_size_buffer;
+ uint8_t* buf = frame_size_buffer;
/* Copies the first 4 bytes to a temporary buffer. */
size_t remaining = TSI_FAKE_FRAME_HEADER_SIZE;
for (size_t i = 0; i < sb->count; i++) {
@@ -143,7 +143,7 @@ static uint32_t read_frame_size(const grpc_slice_buffer *sb) {
return load32_little_endian(frame_size_buffer);
}
-static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
+static void tsi_fake_frame_reset(tsi_fake_frame* frame, int needs_draining) {
frame->offset = 0;
frame->needs_draining = needs_draining;
if (!needs_draining) frame->size = 0;
@@ -151,13 +151,13 @@ static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {
/* Checks if the frame's allocated size is at least frame->size, and reallocs
* more memory if necessary. */
-static void tsi_fake_frame_ensure_size(tsi_fake_frame *frame) {
+static void tsi_fake_frame_ensure_size(tsi_fake_frame* frame) {
if (frame->data == NULL) {
frame->allocated_size = frame->size;
- frame->data = (unsigned char *)gpr_malloc(frame->allocated_size);
+ frame->data = (unsigned char*)gpr_malloc(frame->allocated_size);
} else if (frame->size > frame->allocated_size) {
- unsigned char *new_data =
- (unsigned char *)gpr_realloc(frame->data, frame->size);
+ unsigned char* new_data =
+ (unsigned char*)gpr_realloc(frame->data, frame->size);
frame->data = new_data;
frame->allocated_size = frame->size;
}
@@ -166,17 +166,17 @@ static void tsi_fake_frame_ensure_size(tsi_fake_frame *frame) {
/* Decodes the serialized fake frame contained in incoming_bytes, and fills
* frame with the contents of the decoded frame.
* This method should not be called if frame->needs_framing is not 0. */
-static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes,
- size_t *incoming_bytes_size,
- tsi_fake_frame *frame) {
+static tsi_result tsi_fake_frame_decode(const unsigned char* incoming_bytes,
+ size_t* incoming_bytes_size,
+ tsi_fake_frame* frame) {
size_t available_size = *incoming_bytes_size;
size_t to_read_size = 0;
- const unsigned char *bytes_cursor = incoming_bytes;
+ const unsigned char* bytes_cursor = incoming_bytes;
if (frame->needs_draining) return TSI_INTERNAL_ERROR;
if (frame->data == NULL) {
frame->allocated_size = TSI_FAKE_FRAME_INITIAL_ALLOCATED_SIZE;
- frame->data = (unsigned char *)gpr_malloc(frame->allocated_size);
+ frame->data = (unsigned char*)gpr_malloc(frame->allocated_size);
}
if (frame->offset < TSI_FAKE_FRAME_HEADER_SIZE) {
@@ -215,9 +215,9 @@ static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes,
/* Encodes a fake frame into its wire format and places the result in
* outgoing_bytes. outgoing_bytes_size indicates the size of the encoded frame.
* This method should not be called if frame->needs_framing is 0. */
-static tsi_result tsi_fake_frame_encode(unsigned char *outgoing_bytes,
- size_t *outgoing_bytes_size,
- tsi_fake_frame *frame) {
+static tsi_result tsi_fake_frame_encode(unsigned char* outgoing_bytes,
+ size_t* outgoing_bytes_size,
+ tsi_fake_frame* frame) {
size_t to_write_size = frame->size - frame->offset;
if (!frame->needs_draining) return TSI_INTERNAL_ERROR;
if (*outgoing_bytes_size < to_write_size) {
@@ -233,8 +233,8 @@ static tsi_result tsi_fake_frame_encode(unsigned char *outgoing_bytes,
/* Sets the payload of a fake frame to contain the given data blob, where
* data_size indicates the size of data. */
-static tsi_result tsi_fake_frame_set_data(unsigned char *data, size_t data_size,
- tsi_fake_frame *frame) {
+static tsi_result tsi_fake_frame_set_data(unsigned char* data, size_t data_size,
+ tsi_fake_frame* frame) {
frame->offset = 0;
frame->size = data_size + TSI_FAKE_FRAME_HEADER_SIZE;
tsi_fake_frame_ensure_size(frame);
@@ -245,24 +245,24 @@ static tsi_result tsi_fake_frame_set_data(unsigned char *data, size_t data_size,
}
/* Destroys the contents of a fake frame. */
-static void tsi_fake_frame_destruct(tsi_fake_frame *frame) {
+static void tsi_fake_frame_destruct(tsi_fake_frame* frame) {
if (frame->data != NULL) gpr_free(frame->data);
}
/* --- tsi_frame_protector methods implementation. ---*/
-static tsi_result fake_protector_protect(tsi_frame_protector *self,
- const unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size,
- unsigned char *protected_output_frames,
- size_t *protected_output_frames_size) {
+static tsi_result fake_protector_protect(tsi_frame_protector* self,
+ const unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size,
+ unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
+ tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
unsigned char frame_header[TSI_FAKE_FRAME_HEADER_SIZE];
- tsi_fake_frame *frame = &impl->protect_frame;
+ tsi_fake_frame* frame = &impl->protect_frame;
size_t saved_output_size = *protected_output_frames_size;
size_t drained_size = 0;
- size_t *num_bytes_written = protected_output_frames_size;
+ size_t* num_bytes_written = protected_output_frames_size;
*num_bytes_written = 0;
/* Try to drain first. */
@@ -313,11 +313,11 @@ static tsi_result fake_protector_protect(tsi_frame_protector *self,
}
static tsi_result fake_protector_protect_flush(
- tsi_frame_protector *self, unsigned char *protected_output_frames,
- size_t *protected_output_frames_size, size_t *still_pending_size) {
+ tsi_frame_protector* self, unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size, size_t* still_pending_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
- tsi_fake_frame *frame = &impl->protect_frame;
+ tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
+ tsi_fake_frame* frame = &impl->protect_frame;
if (!frame->needs_draining) {
/* Create a short frame. */
frame->size = frame->offset;
@@ -334,15 +334,15 @@ static tsi_result fake_protector_protect_flush(
}
static tsi_result fake_protector_unprotect(
- tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
- size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size) {
+ tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
+ size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size) {
tsi_result result = TSI_OK;
- tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
- tsi_fake_frame *frame = &impl->unprotect_frame;
+ tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
+ tsi_fake_frame* frame = &impl->unprotect_frame;
size_t saved_output_size = *unprotected_bytes_size;
size_t drained_size = 0;
- size_t *num_bytes_written = unprotected_bytes_size;
+ size_t* num_bytes_written = unprotected_bytes_size;
*num_bytes_written = 0;
/* Try to drain first. */
@@ -382,29 +382,31 @@ static tsi_result fake_protector_unprotect(
return result;
}
-static void fake_protector_destroy(tsi_frame_protector *self) {
- tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self;
+static void fake_protector_destroy(tsi_frame_protector* self) {
+ tsi_fake_frame_protector* impl = (tsi_fake_frame_protector*)self;
tsi_fake_frame_destruct(&impl->protect_frame);
tsi_fake_frame_destruct(&impl->unprotect_frame);
gpr_free(self);
}
static const tsi_frame_protector_vtable frame_protector_vtable = {
- fake_protector_protect, fake_protector_protect_flush,
- fake_protector_unprotect, fake_protector_destroy,
+ fake_protector_protect,
+ fake_protector_protect_flush,
+ fake_protector_unprotect,
+ fake_protector_destroy,
};
/* --- tsi_zero_copy_grpc_protector methods implementation. ---*/
static tsi_result fake_zero_copy_grpc_protector_protect(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *unprotected_slices,
- grpc_slice_buffer *protected_slices) {
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* unprotected_slices,
+ grpc_slice_buffer* protected_slices) {
if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
}
- tsi_fake_zero_copy_grpc_protector *impl =
- (tsi_fake_zero_copy_grpc_protector *)self;
+ tsi_fake_zero_copy_grpc_protector* impl =
+ (tsi_fake_zero_copy_grpc_protector*)self;
/* Protects each frame. */
while (unprotected_slices->length > 0) {
size_t frame_length =
@@ -421,14 +423,14 @@ static tsi_result fake_zero_copy_grpc_protector_protect(
}
static tsi_result fake_zero_copy_grpc_protector_unprotect(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *protected_slices,
- grpc_slice_buffer *unprotected_slices) {
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* protected_slices,
+ grpc_slice_buffer* unprotected_slices) {
if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
}
- tsi_fake_zero_copy_grpc_protector *impl =
- (tsi_fake_zero_copy_grpc_protector *)self;
+ tsi_fake_zero_copy_grpc_protector* impl =
+ (tsi_fake_zero_copy_grpc_protector*)self;
grpc_slice_buffer_move_into(protected_slices, &impl->protected_sb);
/* Unprotect each frame, if we get a full frame. */
while (impl->protected_sb.length >= TSI_FAKE_FRAME_HEADER_SIZE) {
@@ -456,10 +458,10 @@ static tsi_result fake_zero_copy_grpc_protector_unprotect(
}
static void fake_zero_copy_grpc_protector_destroy(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self) {
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self) {
if (self == NULL) return;
- tsi_fake_zero_copy_grpc_protector *impl =
- (tsi_fake_zero_copy_grpc_protector *)self;
+ tsi_fake_zero_copy_grpc_protector* impl =
+ (tsi_fake_zero_copy_grpc_protector*)self;
grpc_slice_buffer_destroy_internal(exec_ctx, &impl->header_sb);
grpc_slice_buffer_destroy_internal(exec_ctx, &impl->protected_sb);
gpr_free(impl);
@@ -476,12 +478,12 @@ static const tsi_zero_copy_grpc_protector_vtable
typedef struct {
tsi_handshaker_result base;
- unsigned char *unused_bytes;
+ unsigned char* unused_bytes;
size_t unused_bytes_size;
} fake_handshaker_result;
static tsi_result fake_handshaker_result_extract_peer(
- const tsi_handshaker_result *self, tsi_peer *peer) {
+ const tsi_handshaker_result* self, tsi_peer* peer) {
/* Construct a tsi_peer with 1 property: certificate type. */
tsi_result result = tsi_construct_peer(1, peer);
if (result != TSI_OK) return result;
@@ -493,32 +495,32 @@ static tsi_result fake_handshaker_result_extract_peer(
}
static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector(
- void *exec_ctx, const tsi_handshaker_result *self,
- size_t *max_output_protected_frame_size,
- tsi_zero_copy_grpc_protector **protector) {
+ void* exec_ctx, const tsi_handshaker_result* self,
+ size_t* max_output_protected_frame_size,
+ tsi_zero_copy_grpc_protector** protector) {
*protector =
tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size);
return TSI_OK;
}
static tsi_result fake_handshaker_result_create_frame_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
- tsi_frame_protector **protector) {
+ const tsi_handshaker_result* self, size_t* max_output_protected_frame_size,
+ tsi_frame_protector** protector) {
*protector = tsi_create_fake_frame_protector(max_output_protected_frame_size);
return TSI_OK;
}
static tsi_result fake_handshaker_result_get_unused_bytes(
- const tsi_handshaker_result *self, const unsigned char **bytes,
- size_t *bytes_size) {
- fake_handshaker_result *result = (fake_handshaker_result *)self;
+ const tsi_handshaker_result* self, const unsigned char** bytes,
+ size_t* bytes_size) {
+ fake_handshaker_result* result = (fake_handshaker_result*)self;
*bytes_size = result->unused_bytes_size;
*bytes = result->unused_bytes;
return TSI_OK;
}
-static void fake_handshaker_result_destroy(tsi_handshaker_result *self) {
- fake_handshaker_result *result = (fake_handshaker_result *)self;
+static void fake_handshaker_result_destroy(tsi_handshaker_result* self) {
+ fake_handshaker_result* result = (fake_handshaker_result*)self;
gpr_free(result->unused_bytes);
gpr_free(self);
}
@@ -532,17 +534,17 @@ static const tsi_handshaker_result_vtable handshaker_result_vtable = {
};
static tsi_result fake_handshaker_result_create(
- const unsigned char *unused_bytes, size_t unused_bytes_size,
- tsi_handshaker_result **handshaker_result) {
+ const unsigned char* unused_bytes, size_t unused_bytes_size,
+ tsi_handshaker_result** handshaker_result) {
if ((unused_bytes_size > 0 && unused_bytes == NULL) ||
handshaker_result == NULL) {
return TSI_INVALID_ARGUMENT;
}
- fake_handshaker_result *result =
- (fake_handshaker_result *)gpr_zalloc(sizeof(*result));
+ fake_handshaker_result* result =
+ (fake_handshaker_result*)gpr_zalloc(sizeof(*result));
result->base.vtable = &handshaker_result_vtable;
if (unused_bytes_size > 0) {
- result->unused_bytes = (unsigned char *)gpr_malloc(unused_bytes_size);
+ result->unused_bytes = (unsigned char*)gpr_malloc(unused_bytes_size);
memcpy(result->unused_bytes, unused_bytes, unused_bytes_size);
}
result->unused_bytes_size = unused_bytes_size;
@@ -553,8 +555,8 @@ static tsi_result fake_handshaker_result_create(
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
- tsi_handshaker *self, unsigned char *bytes, size_t *bytes_size) {
- tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
+ tsi_handshaker* self, unsigned char* bytes, size_t* bytes_size) {
+ tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
tsi_result result = TSI_OK;
if (impl->needs_incoming_message || impl->result == TSI_OK) {
*bytes_size = 0;
@@ -563,9 +565,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
if (!impl->outgoing_frame.needs_draining) {
tsi_fake_handshake_message next_message_to_send =
(tsi_fake_handshake_message)(impl->next_message_to_send + 2);
- const char *msg_string =
+ const char* msg_string =
tsi_fake_handshake_message_to_string(impl->next_message_to_send);
- result = tsi_fake_frame_set_data((unsigned char *)msg_string,
+ result = tsi_fake_frame_set_data((unsigned char*)msg_string,
strlen(msg_string), &impl->outgoing_frame);
if (result != TSI_OK) return result;
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
@@ -594,9 +596,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
}
static tsi_result fake_handshaker_process_bytes_from_peer(
- tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
+ tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
tsi_result result = TSI_OK;
- tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
+ tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
tsi_fake_handshake_message expected_msg =
(tsi_fake_handshake_message)(impl->next_message_to_send - 1);
tsi_fake_handshake_message received_msg;
@@ -610,7 +612,7 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
/* We now have a complete frame. */
result = tsi_fake_handshake_message_from_string(
- (const char *)impl->incoming_frame.data + TSI_FAKE_FRAME_HEADER_SIZE,
+ (const char*)impl->incoming_frame.data + TSI_FAKE_FRAME_HEADER_SIZE,
&received_msg);
if (result != TSI_OK) {
impl->result = result;
@@ -637,13 +639,13 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
return TSI_OK;
}
-static tsi_result fake_handshaker_get_result(tsi_handshaker *self) {
- tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
+static tsi_result fake_handshaker_get_result(tsi_handshaker* self) {
+ tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
return impl->result;
}
-static void fake_handshaker_destroy(tsi_handshaker *self) {
- tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self;
+static void fake_handshaker_destroy(tsi_handshaker* self) {
+ tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
tsi_fake_frame_destruct(&impl->incoming_frame);
tsi_fake_frame_destruct(&impl->outgoing_frame);
gpr_free(impl->outgoing_bytes_buffer);
@@ -651,17 +653,17 @@ static void fake_handshaker_destroy(tsi_handshaker *self) {
}
static tsi_result fake_handshaker_next(
- tsi_handshaker *self, const unsigned char *received_bytes,
- size_t received_bytes_size, const unsigned char **bytes_to_send,
- size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result,
- tsi_handshaker_on_next_done_cb cb, void *user_data) {
+ tsi_handshaker* self, const unsigned char* received_bytes,
+ size_t received_bytes_size, const unsigned char** bytes_to_send,
+ size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result,
+ tsi_handshaker_on_next_done_cb cb, void* user_data) {
/* Sanity check the arguments. */
if ((received_bytes_size > 0 && received_bytes == NULL) ||
bytes_to_send == NULL || bytes_to_send_size == NULL ||
handshaker_result == NULL) {
return TSI_INVALID_ARGUMENT;
}
- tsi_fake_handshaker *handshaker = (tsi_fake_handshaker *)self;
+ tsi_fake_handshaker* handshaker = (tsi_fake_handshaker*)self;
tsi_result result = TSI_OK;
/* Decode and process a handshake frame from the peer. */
@@ -683,8 +685,8 @@ static tsi_result fake_handshaker_next(
if (result == TSI_INCOMPLETE_DATA) {
handshaker->outgoing_bytes_buffer_size *= 2;
handshaker->outgoing_bytes_buffer =
- (unsigned char *)gpr_realloc(handshaker->outgoing_bytes_buffer,
- handshaker->outgoing_bytes_buffer_size);
+ (unsigned char*)gpr_realloc(handshaker->outgoing_bytes_buffer,
+ handshaker->outgoing_bytes_buffer_size);
}
} while (result == TSI_INCOMPLETE_DATA);
if (result != TSI_OK) return result;
@@ -696,7 +698,7 @@ static tsi_result fake_handshaker_next(
*handshaker_result = NULL;
} else {
/* Calculate the unused bytes. */
- const unsigned char *unused_bytes = NULL;
+ const unsigned char* unused_bytes = NULL;
size_t unused_bytes_size = received_bytes_size - consumed_bytes_size;
if (unused_bytes_size > 0) {
unused_bytes = received_bytes + consumed_bytes_size;
@@ -724,15 +726,15 @@ static const tsi_handshaker_vtable handshaker_vtable = {
fake_handshaker_next,
};
-tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
- tsi_fake_handshaker *impl = (tsi_fake_handshaker *)gpr_zalloc(sizeof(*impl));
+tsi_handshaker* tsi_create_fake_handshaker(int is_client) {
+ tsi_fake_handshaker* impl = (tsi_fake_handshaker*)gpr_zalloc(sizeof(*impl));
impl->base.vtable = &handshaker_vtable;
impl->is_client = is_client;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
impl->outgoing_bytes_buffer_size =
TSI_FAKE_HANDSHAKER_OUTGOING_BUFFER_INITIAL_SIZE;
impl->outgoing_bytes_buffer =
- (unsigned char *)gpr_malloc(impl->outgoing_bytes_buffer_size);
+ (unsigned char*)gpr_malloc(impl->outgoing_bytes_buffer_size);
if (is_client) {
impl->needs_incoming_message = 0;
impl->next_message_to_send = TSI_FAKE_CLIENT_INIT;
@@ -743,10 +745,10 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client) {
return &impl->base;
}
-tsi_frame_protector *tsi_create_fake_frame_protector(
- size_t *max_protected_frame_size) {
- tsi_fake_frame_protector *impl =
- (tsi_fake_frame_protector *)gpr_zalloc(sizeof(*impl));
+tsi_frame_protector* tsi_create_fake_frame_protector(
+ size_t* max_protected_frame_size) {
+ tsi_fake_frame_protector* impl =
+ (tsi_fake_frame_protector*)gpr_zalloc(sizeof(*impl));
impl->max_frame_size = (max_protected_frame_size == NULL)
? TSI_FAKE_DEFAULT_FRAME_SIZE
: *max_protected_frame_size;
@@ -754,10 +756,10 @@ tsi_frame_protector *tsi_create_fake_frame_protector(
return &impl->base;
}
-tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector(
- size_t *max_protected_frame_size) {
- tsi_fake_zero_copy_grpc_protector *impl =
- (tsi_fake_zero_copy_grpc_protector *)gpr_zalloc(sizeof(*impl));
+tsi_zero_copy_grpc_protector* tsi_create_fake_zero_copy_grpc_protector(
+ size_t* max_protected_frame_size) {
+ tsi_fake_zero_copy_grpc_protector* impl =
+ (tsi_fake_zero_copy_grpc_protector*)gpr_zalloc(sizeof(*impl));
grpc_slice_buffer_init(&impl->header_sb);
grpc_slice_buffer_init(&impl->protected_sb);
impl->max_frame_size = (max_protected_frame_size == NULL)
diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h
index 6159708a84..b90b9962f7 100644
--- a/src/core/tsi/fake_transport_security.h
+++ b/src/core/tsi/fake_transport_security.h
@@ -33,16 +33,16 @@ extern "C" {
No cryptography is performed in these objects. They just simulate handshake
messages going back and forth for the handshaker and do some framing on
cleartext data for the protector. */
-tsi_handshaker *tsi_create_fake_handshaker(int is_client);
+tsi_handshaker* tsi_create_fake_handshaker(int is_client);
/* Creates a protector directly without going through the handshake phase. */
-tsi_frame_protector *tsi_create_fake_frame_protector(
- size_t *max_protected_frame_size);
+tsi_frame_protector* tsi_create_fake_frame_protector(
+ size_t* max_protected_frame_size);
/* Creates a zero-copy protector directly without going through the handshake
* phase. */
-tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector(
- size_t *max_protected_frame_size);
+tsi_zero_copy_grpc_protector* tsi_create_fake_zero_copy_grpc_protector(
+ size_t* max_protected_frame_size);
#ifdef __cplusplus
}
diff --git a/src/core/tsi/gts_transport_security.cc b/src/core/tsi/gts_transport_security.cc
index d37f3bf8f6..1dfd8c4df0 100644
--- a/src/core/tsi/gts_transport_security.cc
+++ b/src/core/tsi/gts_transport_security.cc
@@ -22,7 +22,7 @@
static gts_shared_resource g_gts_resource;
-gts_shared_resource *gts_get_shared_resource(void) { return &g_gts_resource; }
+gts_shared_resource* gts_get_shared_resource(void) { return &g_gts_resource; }
extern "C" void grpc_tsi_gts_init() {
memset(&g_gts_resource, 0, sizeof(gts_shared_resource));
diff --git a/src/core/tsi/gts_transport_security.h b/src/core/tsi/gts_transport_security.h
index 9590038ed0..8bc2107270 100644
--- a/src/core/tsi/gts_transport_security.h
+++ b/src/core/tsi/gts_transport_security.h
@@ -29,14 +29,14 @@ extern "C" {
typedef struct gts_shared_resource {
gpr_thd_id thread_id;
- grpc_channel *channel;
- grpc_completion_queue *cq;
+ grpc_channel* channel;
+ grpc_completion_queue* cq;
gpr_mu mu;
} gts_shared_resource;
/* This method returns the address of gts_shared_resource object shared by all
* TSI handshakes. */
-gts_shared_resource *gts_get_shared_resource(void);
+gts_shared_resource* gts_get_shared_resource(void);
#ifdef __cplusplus
}
diff --git a/src/core/tsi/ssl_transport_security.cc b/src/core/tsi/ssl_transport_security.cc
index b1c69e9c7b..c1c2de6ca9 100644
--- a/src/core/tsi/ssl_transport_security.cc
+++ b/src/core/tsi/ssl_transport_security.cc
@@ -70,14 +70,14 @@ extern "C" {
/* --- Structure definitions. ---*/
struct tsi_ssl_handshaker_factory {
- const tsi_ssl_handshaker_factory_vtable *vtable;
+ const tsi_ssl_handshaker_factory_vtable* vtable;
gpr_refcount refcount;
};
struct tsi_ssl_client_handshaker_factory {
tsi_ssl_handshaker_factory base;
- SSL_CTX *ssl_context;
- unsigned char *alpn_protocol_list;
+ SSL_CTX* ssl_context;
+ unsigned char* alpn_protocol_list;
size_t alpn_protocol_list_length;
};
@@ -86,28 +86,28 @@ struct tsi_ssl_server_handshaker_factory {
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
tsi_ssl_handshaker_factory base;
- SSL_CTX **ssl_contexts;
- tsi_peer *ssl_context_x509_subject_names;
+ SSL_CTX** ssl_contexts;
+ tsi_peer* ssl_context_x509_subject_names;
size_t ssl_context_count;
- unsigned char *alpn_protocol_list;
+ unsigned char* alpn_protocol_list;
size_t alpn_protocol_list_length;
};
typedef struct {
tsi_handshaker base;
- SSL *ssl;
- BIO *into_ssl;
- BIO *from_ssl;
+ SSL* ssl;
+ BIO* into_ssl;
+ BIO* from_ssl;
tsi_result result;
- tsi_ssl_handshaker_factory *factory_ref;
+ tsi_ssl_handshaker_factory* factory_ref;
} tsi_ssl_handshaker;
typedef struct {
tsi_frame_protector base;
- SSL *ssl;
- BIO *into_ssl;
- BIO *from_ssl;
- unsigned char *buffer;
+ SSL* ssl;
+ BIO* into_ssl;
+ BIO* from_ssl;
+ unsigned char* buffer;
size_t buffer_size;
size_t buffer_offset;
} tsi_ssl_frame_protector;
@@ -115,9 +115,9 @@ typedef struct {
/* --- Library Initialization. ---*/
static gpr_once init_openssl_once = GPR_ONCE_INIT;
-static gpr_mu *openssl_mutexes = NULL;
+static gpr_mu* openssl_mutexes = NULL;
-static void openssl_locking_cb(int mode, int type, const char *file, int line) {
+static void openssl_locking_cb(int mode, int type, const char* file, int line) {
if (mode & CRYPTO_LOCK) {
gpr_mu_lock(&openssl_mutexes[type]);
} else {
@@ -137,7 +137,7 @@ static void init_openssl(void) {
OpenSSL_add_all_algorithms();
num_locks = CRYPTO_num_locks();
GPR_ASSERT(num_locks > 0);
- openssl_mutexes = (gpr_mu *)gpr_malloc((size_t)num_locks * sizeof(gpr_mu));
+ openssl_mutexes = (gpr_mu*)gpr_malloc((size_t)num_locks * sizeof(gpr_mu));
for (i = 0; i < CRYPTO_num_locks(); i++) {
gpr_mu_init(&openssl_mutexes[i]);
}
@@ -147,7 +147,7 @@ static void init_openssl(void) {
/* --- Ssl utils. ---*/
-static const char *ssl_error_string(int error) {
+static const char* ssl_error_string(int error) {
switch (error) {
case SSL_ERROR_NONE:
return "SSL_ERROR_NONE";
@@ -173,8 +173,8 @@ static const char *ssl_error_string(int error) {
}
/* TODO(jboeuf): Remove when we are past the debugging phase with this code. */
-static void ssl_log_where_info(const SSL *ssl, int where, int flag,
- const char *msg) {
+static void ssl_log_where_info(const SSL* ssl, int where, int flag,
+ const char* msg) {
if ((where & flag) && GRPC_TRACER_ON(tsi_tracing_enabled)) {
gpr_log(GPR_INFO, "%20.20s - %30.30s - %5.10s", msg,
SSL_state_string_long(ssl), SSL_state_string(ssl));
@@ -182,7 +182,7 @@ static void ssl_log_where_info(const SSL *ssl, int where, int flag,
}
/* Used for debugging. TODO(jboeuf): Remove when code is mature enough. */
-static void ssl_info_callback(const SSL *ssl, int where, int ret) {
+static void ssl_info_callback(const SSL* ssl, int where, int ret) {
if (ret == 0) {
gpr_log(GPR_ERROR, "ssl_info_callback: error occured.\n");
return;
@@ -195,7 +195,7 @@ static void ssl_info_callback(const SSL *ssl, int where, int ret) {
/* Returns 1 if name looks like an IP address, 0 otherwise.
This is a very rough heuristic, and only handles IPv6 in hexadecimal form. */
-static int looks_like_ip_address(const char *name) {
+static int looks_like_ip_address(const char* name) {
size_t i;
size_t dot_count = 0;
size_t num_size = 0;
@@ -220,12 +220,12 @@ static int looks_like_ip_address(const char *name) {
}
/* Gets the subject CN from an X509 cert. */
-static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8,
- size_t *utf8_size) {
+static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8,
+ size_t* utf8_size) {
int common_name_index = -1;
- X509_NAME_ENTRY *common_name_entry = NULL;
- ASN1_STRING *common_name_asn1 = NULL;
- X509_NAME *subject_name = X509_get_subject_name(cert);
+ X509_NAME_ENTRY* common_name_entry = NULL;
+ ASN1_STRING* common_name_asn1 = NULL;
+ X509_NAME* subject_name = X509_get_subject_name(cert);
int utf8_returned_size = 0;
if (subject_name == NULL) {
gpr_log(GPR_ERROR, "Could not get subject name from certificate.");
@@ -260,8 +260,8 @@ static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8,
/* Gets the subject CN of an X509 cert as a tsi_peer_property. */
static tsi_result peer_property_from_x509_common_name(
- X509 *cert, tsi_peer_property *property) {
- unsigned char *common_name;
+ X509* cert, tsi_peer_property* property) {
+ unsigned char* common_name;
size_t common_name_size;
tsi_result result =
ssl_get_x509_common_name(cert, &common_name, &common_name_size);
@@ -275,35 +275,34 @@ static tsi_result peer_property_from_x509_common_name(
}
result = tsi_construct_string_peer_property(
TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY,
- common_name == NULL ? "" : (const char *)common_name, common_name_size,
+ common_name == NULL ? "" : (const char*)common_name, common_name_size,
property);
OPENSSL_free(common_name);
return result;
}
/* Gets the X509 cert in PEM format as a tsi_peer_property. */
-static tsi_result add_pem_certificate(X509 *cert, tsi_peer_property *property) {
- BIO *bio = BIO_new(BIO_s_mem());
+static tsi_result add_pem_certificate(X509* cert, tsi_peer_property* property) {
+ BIO* bio = BIO_new(BIO_s_mem());
if (!PEM_write_bio_X509(bio, cert)) {
BIO_free(bio);
return TSI_INTERNAL_ERROR;
}
- char *contents;
+ char* contents;
long len = BIO_get_mem_data(bio, &contents);
if (len <= 0) {
BIO_free(bio);
return TSI_INTERNAL_ERROR;
}
tsi_result result = tsi_construct_string_peer_property(
- TSI_X509_PEM_CERT_PROPERTY, (const char *)contents, (size_t)len,
- property);
+ TSI_X509_PEM_CERT_PROPERTY, (const char*)contents, (size_t)len, property);
BIO_free(bio);
return result;
}
/* Gets the subject SANs from an X509 cert as a tsi_peer_property. */
static tsi_result add_subject_alt_names_properties_to_peer(
- tsi_peer *peer, GENERAL_NAMES *subject_alt_names,
+ tsi_peer* peer, GENERAL_NAMES* subject_alt_names,
size_t subject_alt_name_count) {
size_t i;
tsi_result result = TSI_OK;
@@ -312,11 +311,11 @@ static tsi_result add_subject_alt_names_properties_to_peer(
peer->property_count -= subject_alt_name_count;
for (i = 0; i < subject_alt_name_count; i++) {
- GENERAL_NAME *subject_alt_name =
+ GENERAL_NAME* subject_alt_name =
sk_GENERAL_NAME_value(subject_alt_names, TSI_SIZE_AS_SIZE(i));
/* Filter out the non-dns entries names. */
if (subject_alt_name->type == GEN_DNS) {
- unsigned char *name = NULL;
+ unsigned char* name = NULL;
int name_size;
name_size = ASN1_STRING_to_UTF8(&name, subject_alt_name->d.dNSName);
if (name_size < 0) {
@@ -325,7 +324,7 @@ static tsi_result add_subject_alt_names_properties_to_peer(
break;
}
result = tsi_construct_string_peer_property(
- TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, (const char *)name,
+ TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, (const char*)name,
(size_t)name_size, &peer->properties[peer->property_count++]);
OPENSSL_free(name);
} else if (subject_alt_name->type == GEN_IPADD) {
@@ -341,7 +340,7 @@ static tsi_result add_subject_alt_names_properties_to_peer(
result = TSI_INTERNAL_ERROR;
break;
}
- const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data,
+ const char* name = inet_ntop(af, subject_alt_name->d.iPAddress->data,
ntop_buf, INET6_ADDRSTRLEN);
if (name == NULL) {
gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet.");
@@ -359,11 +358,11 @@ static tsi_result add_subject_alt_names_properties_to_peer(
}
/* Gets information about the peer's X509 cert as a tsi_peer object. */
-static tsi_result peer_from_x509(X509 *cert, int include_certificate_type,
- tsi_peer *peer) {
+static tsi_result peer_from_x509(X509* cert, int include_certificate_type,
+ tsi_peer* peer) {
/* TODO(jboeuf): Maybe add more properties. */
- GENERAL_NAMES *subject_alt_names =
- (GENERAL_NAMES *)X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
+ GENERAL_NAMES* subject_alt_names =
+ (GENERAL_NAMES*)X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
int subject_alt_name_count = (subject_alt_names != NULL)
? (int)sk_GENERAL_NAME_num(subject_alt_names)
: 0;
@@ -415,8 +414,8 @@ static void log_ssl_error_stack(void) {
}
/* Performs an SSL_read and handle errors. */
-static tsi_result do_ssl_read(SSL *ssl, unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size) {
+static tsi_result do_ssl_read(SSL* ssl, unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size) {
int read_from_ssl;
GPR_ASSERT(*unprotected_bytes_size <= INT_MAX);
read_from_ssl =
@@ -448,7 +447,7 @@ static tsi_result do_ssl_read(SSL *ssl, unsigned char *unprotected_bytes,
}
/* Performs an SSL_write and handle errors. */
-static tsi_result do_ssl_write(SSL *ssl, unsigned char *unprotected_bytes,
+static tsi_result do_ssl_write(SSL* ssl, unsigned char* unprotected_bytes,
size_t unprotected_bytes_size) {
int ssl_write_result;
GPR_ASSERT(unprotected_bytes_size <= INT_MAX);
@@ -470,18 +469,18 @@ static tsi_result do_ssl_write(SSL *ssl, unsigned char *unprotected_bytes,
}
/* Loads an in-memory PEM certificate chain into the SSL context. */
-static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context,
- const char *pem_cert_chain,
+static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX* context,
+ const char* pem_cert_chain,
size_t pem_cert_chain_size) {
tsi_result result = TSI_OK;
- X509 *certificate = NULL;
- BIO *pem;
+ X509* certificate = NULL;
+ BIO* pem;
GPR_ASSERT(pem_cert_chain_size <= INT_MAX);
- pem = BIO_new_mem_buf((void *)pem_cert_chain, (int)pem_cert_chain_size);
+ pem = BIO_new_mem_buf((void*)pem_cert_chain, (int)pem_cert_chain_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
- certificate = PEM_read_bio_X509_AUX(pem, NULL, NULL, (void *)"");
+ certificate = PEM_read_bio_X509_AUX(pem, NULL, NULL, (void*)"");
if (certificate == NULL) {
result = TSI_INVALID_ARGUMENT;
break;
@@ -491,8 +490,8 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context,
break;
}
while (1) {
- X509 *certificate_authority =
- PEM_read_bio_X509(pem, NULL, NULL, (void *)"");
+ X509* certificate_authority =
+ PEM_read_bio_X509(pem, NULL, NULL, (void*)"");
if (certificate_authority == NULL) {
ERR_clear_error();
break; /* Done reading. */
@@ -514,16 +513,16 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context,
}
/* Loads an in-memory PEM private key into the SSL context. */
-static tsi_result ssl_ctx_use_private_key(SSL_CTX *context, const char *pem_key,
+static tsi_result ssl_ctx_use_private_key(SSL_CTX* context, const char* pem_key,
size_t pem_key_size) {
tsi_result result = TSI_OK;
- EVP_PKEY *private_key = NULL;
- BIO *pem;
+ EVP_PKEY* private_key = NULL;
+ BIO* pem;
GPR_ASSERT(pem_key_size <= INT_MAX);
- pem = BIO_new_mem_buf((void *)pem_key, (int)pem_key_size);
+ pem = BIO_new_mem_buf((void*)pem_key, (int)pem_key_size);
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
do {
- private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, (void *)"");
+ private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, (void*)"");
if (private_key == NULL) {
result = TSI_INVALID_ARGUMENT;
break;
@@ -540,19 +539,19 @@ static tsi_result ssl_ctx_use_private_key(SSL_CTX *context, const char *pem_key,
/* Loads in-memory PEM verification certs into the SSL context and optionally
returns the verification cert names (root_names can be NULL). */
-static tsi_result ssl_ctx_load_verification_certs(SSL_CTX *context,
- const char *pem_roots,
+static tsi_result ssl_ctx_load_verification_certs(SSL_CTX* context,
+ const char* pem_roots,
size_t pem_roots_size,
STACK_OF(X509_NAME) *
*root_names) {
tsi_result result = TSI_OK;
size_t num_roots = 0;
- X509 *root = NULL;
- X509_NAME *root_name = NULL;
- BIO *pem;
- X509_STORE *root_store;
+ X509* root = NULL;
+ X509_NAME* root_name = NULL;
+ BIO* pem;
+ X509_STORE* root_store;
GPR_ASSERT(pem_roots_size <= INT_MAX);
- pem = BIO_new_mem_buf((void *)pem_roots, (int)pem_roots_size);
+ pem = BIO_new_mem_buf((void*)pem_roots, (int)pem_roots_size);
root_store = SSL_CTX_get_cert_store(context);
if (root_store == NULL) return TSI_INVALID_ARGUMENT;
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
@@ -562,7 +561,7 @@ static tsi_result ssl_ctx_load_verification_certs(SSL_CTX *context,
}
while (1) {
- root = PEM_read_bio_X509_AUX(pem, NULL, NULL, (void *)"");
+ root = PEM_read_bio_X509_AUX(pem, NULL, NULL, (void*)"");
if (root == NULL) {
ERR_clear_error();
break; /* We're at the end of stream. */
@@ -611,8 +610,8 @@ static tsi_result ssl_ctx_load_verification_certs(SSL_CTX *context,
/* Populates the SSL context with a private key and a cert chain, and sets the
cipher list and the ephemeral ECDH key. */
static tsi_result populate_ssl_context(
- SSL_CTX *context, const tsi_ssl_pem_key_cert_pair *key_cert_pair,
- const char *cipher_list) {
+ SSL_CTX* context, const tsi_ssl_pem_key_cert_pair* key_cert_pair,
+ const char* cipher_list) {
tsi_result result = TSI_OK;
if (key_cert_pair != NULL) {
if (key_cert_pair->cert_chain != NULL) {
@@ -637,7 +636,7 @@ static tsi_result populate_ssl_context(
return TSI_INVALID_ARGUMENT;
}
{
- EC_KEY *ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+ EC_KEY* ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
if (!SSL_CTX_set_tmp_ecdh(context, ecdh)) {
gpr_log(GPR_ERROR, "Could not set ephemeral ECDH key.");
EC_KEY_free(ecdh);
@@ -650,15 +649,15 @@ static tsi_result populate_ssl_context(
}
/* Extracts the CN and the SANs from an X509 cert as a peer object. */
-static tsi_result extract_x509_subject_names_from_pem_cert(const char *pem_cert,
- tsi_peer *peer) {
+static tsi_result extract_x509_subject_names_from_pem_cert(const char* pem_cert,
+ tsi_peer* peer) {
tsi_result result = TSI_OK;
- X509 *cert = NULL;
- BIO *pem;
- pem = BIO_new_mem_buf((void *)pem_cert, (int)strlen(pem_cert));
+ X509* cert = NULL;
+ BIO* pem;
+ pem = BIO_new_mem_buf((void*)pem_cert, (int)strlen(pem_cert));
if (pem == NULL) return TSI_OUT_OF_RESOURCES;
- cert = PEM_read_bio_X509(pem, NULL, NULL, (void *)"");
+ cert = PEM_read_bio_X509(pem, NULL, NULL, (void*)"");
if (cert == NULL) {
gpr_log(GPR_ERROR, "Invalid certificate");
result = TSI_INVALID_ARGUMENT;
@@ -672,10 +671,10 @@ static tsi_result extract_x509_subject_names_from_pem_cert(const char *pem_cert,
/* Builds the alpn protocol name list according to rfc 7301. */
static tsi_result build_alpn_protocol_name_list(
- const char **alpn_protocols, uint16_t num_alpn_protocols,
- unsigned char **protocol_name_list, size_t *protocol_name_list_length) {
+ const char** alpn_protocols, uint16_t num_alpn_protocols,
+ unsigned char** protocol_name_list, size_t* protocol_name_list_length) {
uint16_t i;
- unsigned char *current;
+ unsigned char* current;
*protocol_name_list = NULL;
*protocol_name_list_length = 0;
if (num_alpn_protocols == 0) return TSI_INVALID_ARGUMENT;
@@ -687,7 +686,7 @@ static tsi_result build_alpn_protocol_name_list(
}
*protocol_name_list_length += length + 1;
}
- *protocol_name_list = (unsigned char *)gpr_malloc(*protocol_name_list_length);
+ *protocol_name_list = (unsigned char*)gpr_malloc(*protocol_name_list_length);
if (*protocol_name_list == NULL) return TSI_OUT_OF_RESOURCES;
current = *protocol_name_list;
for (i = 0; i < num_alpn_protocols; i++) {
@@ -709,18 +708,18 @@ static tsi_result build_alpn_protocol_name_list(
// the server's certificate, but we need to pull it anyway, in case a higher
// layer wants to look at it. In this case the verification may fail, but
// we don't really care.
-static int NullVerifyCallback(int preverify_ok, X509_STORE_CTX *ctx) {
+static int NullVerifyCallback(int preverify_ok, X509_STORE_CTX* ctx) {
return 1;
}
/* --- tsi_frame_protector methods implementation. ---*/
-static tsi_result ssl_protector_protect(tsi_frame_protector *self,
- const unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size,
- unsigned char *protected_output_frames,
- size_t *protected_output_frames_size) {
- tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
+static tsi_result ssl_protector_protect(tsi_frame_protector* self,
+ const unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size,
+ unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size) {
+ tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
int read_from_ssl;
size_t available;
tsi_result result = TSI_OK;
@@ -771,10 +770,10 @@ static tsi_result ssl_protector_protect(tsi_frame_protector *self,
}
static tsi_result ssl_protector_protect_flush(
- tsi_frame_protector *self, unsigned char *protected_output_frames,
- size_t *protected_output_frames_size, size_t *still_pending_size) {
+ tsi_frame_protector* self, unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size, size_t* still_pending_size) {
tsi_result result = TSI_OK;
- tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
+ tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
int read_from_ssl = 0;
int pending;
@@ -804,14 +803,14 @@ static tsi_result ssl_protector_protect_flush(
}
static tsi_result ssl_protector_unprotect(
- tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
- size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size) {
+ tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
+ size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size) {
tsi_result result = TSI_OK;
int written_into_ssl = 0;
size_t output_bytes_size = *unprotected_bytes_size;
size_t output_bytes_offset = 0;
- tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
+ tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
/* First, try to read remaining data from ssl. */
result = do_ssl_read(impl->ssl, unprotected_bytes, unprotected_bytes_size);
@@ -845,22 +844,24 @@ static tsi_result ssl_protector_unprotect(
return result;
}
-static void ssl_protector_destroy(tsi_frame_protector *self) {
- tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self;
+static void ssl_protector_destroy(tsi_frame_protector* self) {
+ tsi_ssl_frame_protector* impl = (tsi_ssl_frame_protector*)self;
if (impl->buffer != NULL) gpr_free(impl->buffer);
if (impl->ssl != NULL) SSL_free(impl->ssl);
gpr_free(self);
}
static const tsi_frame_protector_vtable frame_protector_vtable = {
- ssl_protector_protect, ssl_protector_protect_flush, ssl_protector_unprotect,
+ ssl_protector_protect,
+ ssl_protector_protect_flush,
+ ssl_protector_unprotect,
ssl_protector_destroy,
};
/* --- tsi_server_handshaker_factory methods implementation. --- */
static void tsi_ssl_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory *self) {
+ tsi_ssl_handshaker_factory* self) {
if (self == NULL) return;
if (self->vtable != NULL && self->vtable->destroy != NULL) {
@@ -871,14 +872,14 @@ static void tsi_ssl_handshaker_factory_destroy(
* any memory, it should be free'd here. */
}
-static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref(
- tsi_ssl_handshaker_factory *self) {
+static tsi_ssl_handshaker_factory* tsi_ssl_handshaker_factory_ref(
+ tsi_ssl_handshaker_factory* self) {
if (self == NULL) return NULL;
gpr_refn(&self->refcount, 1);
return self;
}
-static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) {
+static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory* self) {
if (self == NULL) return;
if (gpr_unref(&self->refcount)) {
@@ -891,7 +892,7 @@ static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL};
/* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for
* allocating memory for the factory. */
static void tsi_ssl_handshaker_factory_init(
- tsi_ssl_handshaker_factory *factory) {
+ tsi_ssl_handshaker_factory* factory) {
GPR_ASSERT(factory != NULL);
factory->vtable = &handshaker_factory_vtable;
@@ -900,10 +901,10 @@ static void tsi_ssl_handshaker_factory_init(
/* --- tsi_handshaker methods implementation. ---*/
-static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
- unsigned char *bytes,
- size_t *bytes_size) {
- tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
+ unsigned char* bytes,
+ size_t* bytes_size) {
+ tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
int bytes_read_from_ssl = 0;
if (bytes == NULL || bytes_size == NULL || *bytes_size == 0 ||
*bytes_size > INT_MAX) {
@@ -924,8 +925,8 @@ static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
return BIO_pending(impl->from_ssl) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA;
}
-static tsi_result ssl_handshaker_get_result(tsi_handshaker *self) {
- tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) {
+ tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
if ((impl->result == TSI_HANDSHAKE_IN_PROGRESS) &&
SSL_is_init_finished(impl->ssl)) {
impl->result = TSI_OK;
@@ -934,8 +935,8 @@ static tsi_result ssl_handshaker_get_result(tsi_handshaker *self) {
}
static tsi_result ssl_handshaker_process_bytes_from_peer(
- tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) {
- tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+ tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
+ tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
int bytes_written_into_ssl_size = 0;
if (bytes == NULL || bytes_size == 0 || *bytes_size > INT_MAX) {
return TSI_INVALID_ARGUMENT;
@@ -979,13 +980,13 @@ static tsi_result ssl_handshaker_process_bytes_from_peer(
}
}
-static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
- tsi_peer *peer) {
+static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self,
+ tsi_peer* peer) {
tsi_result result = TSI_OK;
- const unsigned char *alpn_selected = NULL;
+ const unsigned char* alpn_selected = NULL;
unsigned int alpn_selected_len;
- tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
- X509 *peer_cert = SSL_get_peer_certificate(impl->ssl);
+ tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+ X509* peer_cert = SSL_get_peer_certificate(impl->ssl);
if (peer_cert != NULL) {
result = peer_from_x509(peer_cert, 1, peer);
X509_free(peer_cert);
@@ -1001,13 +1002,13 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
}
if (alpn_selected != NULL) {
size_t i;
- tsi_peer_property *new_properties = (tsi_peer_property *)gpr_zalloc(
+ tsi_peer_property* new_properties = (tsi_peer_property*)gpr_zalloc(
sizeof(*new_properties) * (peer->property_count + 1));
for (i = 0; i < peer->property_count; i++) {
new_properties[i] = peer->properties[i];
}
result = tsi_construct_string_peer_property(
- TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char *)alpn_selected,
+ TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char*)alpn_selected,
alpn_selected_len, &new_properties[peer->property_count]);
if (result != TSI_OK) {
gpr_free(new_properties);
@@ -1021,13 +1022,13 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self,
}
static tsi_result ssl_handshaker_create_frame_protector(
- tsi_handshaker *self, size_t *max_output_protected_frame_size,
- tsi_frame_protector **protector) {
+ tsi_handshaker* self, size_t* max_output_protected_frame_size,
+ tsi_frame_protector** protector) {
size_t actual_max_output_protected_frame_size =
TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND;
- tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
- tsi_ssl_frame_protector *protector_impl =
- (tsi_ssl_frame_protector *)gpr_zalloc(sizeof(*protector_impl));
+ tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
+ tsi_ssl_frame_protector* protector_impl =
+ (tsi_ssl_frame_protector*)gpr_zalloc(sizeof(*protector_impl));
if (max_output_protected_frame_size != NULL) {
if (*max_output_protected_frame_size >
@@ -1044,7 +1045,7 @@ static tsi_result ssl_handshaker_create_frame_protector(
protector_impl->buffer_size =
actual_max_output_protected_frame_size - TSI_SSL_MAX_PROTECTION_OVERHEAD;
protector_impl->buffer =
- (unsigned char *)gpr_malloc(protector_impl->buffer_size);
+ (unsigned char*)gpr_malloc(protector_impl->buffer_size);
if (protector_impl->buffer == NULL) {
gpr_log(GPR_ERROR,
"Could not allocated buffer for tsi_ssl_frame_protector.");
@@ -1064,8 +1065,8 @@ static tsi_result ssl_handshaker_create_frame_protector(
return TSI_OK;
}
-static void ssl_handshaker_destroy(tsi_handshaker *self) {
- tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
+static void ssl_handshaker_destroy(tsi_handshaker* self) {
+ tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
tsi_ssl_handshaker_factory_unref(impl->factory_ref);
gpr_free(impl);
@@ -1083,14 +1084,14 @@ static const tsi_handshaker_vtable handshaker_vtable = {
/* --- tsi_ssl_handshaker_factory common methods. --- */
-static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
- const char *server_name_indication,
- tsi_ssl_handshaker_factory *factory,
- tsi_handshaker **handshaker) {
- SSL *ssl = SSL_new(ctx);
- BIO *into_ssl = NULL;
- BIO *from_ssl = NULL;
- tsi_ssl_handshaker *impl = NULL;
+static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client,
+ const char* server_name_indication,
+ tsi_ssl_handshaker_factory* factory,
+ tsi_handshaker** handshaker) {
+ SSL* ssl = SSL_new(ctx);
+ BIO* into_ssl = NULL;
+ BIO* from_ssl = NULL;
+ tsi_ssl_handshaker* impl = NULL;
*handshaker = NULL;
if (ctx == NULL) {
gpr_log(GPR_ERROR, "SSL Context is null. Should never happen.");
@@ -1135,7 +1136,7 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
SSL_set_accept_state(ssl);
}
- impl = (tsi_ssl_handshaker *)gpr_zalloc(sizeof(*impl));
+ impl = (tsi_ssl_handshaker*)gpr_zalloc(sizeof(*impl));
impl->ssl = ssl;
impl->into_ssl = into_ssl;
impl->from_ssl = from_ssl;
@@ -1147,16 +1148,16 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
return TSI_OK;
}
-static int select_protocol_list(const unsigned char **out,
- unsigned char *outlen,
- const unsigned char *client_list,
+static int select_protocol_list(const unsigned char** out,
+ unsigned char* outlen,
+ const unsigned char* client_list,
size_t client_list_len,
- const unsigned char *server_list,
+ const unsigned char* server_list,
size_t server_list_len) {
- const unsigned char *client_current = client_list;
+ const unsigned char* client_current = client_list;
while ((unsigned int)(client_current - client_list) < client_list_len) {
unsigned char client_current_len = *(client_current++);
- const unsigned char *server_current = server_list;
+ const unsigned char* server_current = server_list;
while ((server_current >= server_list) &&
(uintptr_t)(server_current - server_list) < server_list_len) {
unsigned char server_current_len = *(server_current++);
@@ -1176,36 +1177,36 @@ static int select_protocol_list(const unsigned char **out,
/* --- tsi_ssl_client_handshaker_factory methods implementation. --- */
tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
- tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
- tsi_handshaker **handshaker) {
+ tsi_ssl_client_handshaker_factory* self, const char* server_name_indication,
+ tsi_handshaker** handshaker) {
return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication,
&self->base, handshaker);
}
void tsi_ssl_client_handshaker_factory_unref(
- tsi_ssl_client_handshaker_factory *self) {
+ tsi_ssl_client_handshaker_factory* self) {
if (self == NULL) return;
tsi_ssl_handshaker_factory_unref(&self->base);
}
static void tsi_ssl_client_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory *factory) {
+ tsi_ssl_handshaker_factory* factory) {
if (factory == NULL) return;
- tsi_ssl_client_handshaker_factory *self =
- (tsi_ssl_client_handshaker_factory *)factory;
+ tsi_ssl_client_handshaker_factory* self =
+ (tsi_ssl_client_handshaker_factory*)factory;
if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context);
if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list);
gpr_free(self);
}
-static int client_handshaker_factory_npn_callback(SSL *ssl, unsigned char **out,
- unsigned char *outlen,
- const unsigned char *in,
+static int client_handshaker_factory_npn_callback(SSL* ssl, unsigned char** out,
+ unsigned char* outlen,
+ const unsigned char* in,
unsigned int inlen,
- void *arg) {
- tsi_ssl_client_handshaker_factory *factory =
- (tsi_ssl_client_handshaker_factory *)arg;
- return select_protocol_list((const unsigned char **)out, outlen,
+ void* arg) {
+ tsi_ssl_client_handshaker_factory* factory =
+ (tsi_ssl_client_handshaker_factory*)arg;
+ return select_protocol_list((const unsigned char**)out, outlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length, in, inlen);
}
@@ -1213,7 +1214,7 @@ static int client_handshaker_factory_npn_callback(SSL *ssl, unsigned char **out,
/* --- tsi_ssl_server_handshaker_factory methods implementation. --- */
tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
- tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker) {
+ tsi_ssl_server_handshaker_factory* self, tsi_handshaker** handshaker) {
if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT;
/* Create the handshaker with the first context. We will switch if needed
because of SNI in ssl_server_handshaker_factory_servername_callback. */
@@ -1222,16 +1223,16 @@ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
}
void tsi_ssl_server_handshaker_factory_unref(
- tsi_ssl_server_handshaker_factory *self) {
+ tsi_ssl_server_handshaker_factory* self) {
if (self == NULL) return;
tsi_ssl_handshaker_factory_unref(&self->base);
}
static void tsi_ssl_server_handshaker_factory_destroy(
- tsi_ssl_handshaker_factory *factory) {
+ tsi_ssl_handshaker_factory* factory) {
if (factory == NULL) return;
- tsi_ssl_server_handshaker_factory *self =
- (tsi_ssl_server_handshaker_factory *)factory;
+ tsi_ssl_server_handshaker_factory* self =
+ (tsi_ssl_server_handshaker_factory*)factory;
size_t i;
for (i = 0; i < self->ssl_context_count; i++) {
if (self->ssl_contexts[i] != NULL) {
@@ -1247,10 +1248,10 @@ static void tsi_ssl_server_handshaker_factory_destroy(
gpr_free(self);
}
-static int does_entry_match_name(const char *entry, size_t entry_length,
- const char *name) {
- const char *dot;
- const char *name_subdomain = NULL;
+static int does_entry_match_name(const char* entry, size_t entry_length,
+ const char* name) {
+ const char* dot;
+ const char* name_subdomain = NULL;
size_t name_length = strlen(name);
size_t name_subdomain_length;
if (entry_length == 0) return 0;
@@ -1295,12 +1296,12 @@ static int does_entry_match_name(const char *entry, size_t entry_length,
strncmp(entry, name_subdomain, entry_length) == 0);
}
-static int ssl_server_handshaker_factory_servername_callback(SSL *ssl, int *ap,
- void *arg) {
- tsi_ssl_server_handshaker_factory *impl =
- (tsi_ssl_server_handshaker_factory *)arg;
+static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap,
+ void* arg) {
+ tsi_ssl_server_handshaker_factory* impl =
+ (tsi_ssl_server_handshaker_factory*)arg;
size_t i = 0;
- const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ const char* servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
if (servername == NULL || strlen(servername) == 0) {
return SSL_TLSEXT_ERR_NOACK;
}
@@ -1318,10 +1319,10 @@ static int ssl_server_handshaker_factory_servername_callback(SSL *ssl, int *ap,
#if TSI_OPENSSL_ALPN_SUPPORT
static int server_handshaker_factory_alpn_callback(
- SSL *ssl, const unsigned char **out, unsigned char *outlen,
- const unsigned char *in, unsigned int inlen, void *arg) {
- tsi_ssl_server_handshaker_factory *factory =
- (tsi_ssl_server_handshaker_factory *)arg;
+ SSL* ssl, const unsigned char** out, unsigned char* outlen,
+ const unsigned char* in, unsigned int inlen, void* arg) {
+ tsi_ssl_server_handshaker_factory* factory =
+ (tsi_ssl_server_handshaker_factory*)arg;
return select_protocol_list(out, outlen, in, inlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length);
@@ -1329,9 +1330,9 @@ static int server_handshaker_factory_alpn_callback(
#endif /* TSI_OPENSSL_ALPN_SUPPORT */
static int server_handshaker_factory_npn_advertised_callback(
- SSL *ssl, const unsigned char **out, unsigned int *outlen, void *arg) {
- tsi_ssl_server_handshaker_factory *factory =
- (tsi_ssl_server_handshaker_factory *)arg;
+ SSL* ssl, const unsigned char** out, unsigned int* outlen, void* arg) {
+ tsi_ssl_server_handshaker_factory* factory =
+ (tsi_ssl_server_handshaker_factory*)arg;
*out = factory->alpn_protocol_list;
GPR_ASSERT(factory->alpn_protocol_list_length <= UINT_MAX);
*outlen = (unsigned int)factory->alpn_protocol_list_length;
@@ -1344,12 +1345,12 @@ static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = {
tsi_ssl_client_handshaker_factory_destroy};
tsi_result tsi_create_ssl_client_handshaker_factory(
- const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair,
- const char *pem_root_certs, const char *cipher_suites,
- const char **alpn_protocols, uint16_t num_alpn_protocols,
- tsi_ssl_client_handshaker_factory **factory) {
- SSL_CTX *ssl_context = NULL;
- tsi_ssl_client_handshaker_factory *impl = NULL;
+ const tsi_ssl_pem_key_cert_pair* pem_key_cert_pair,
+ const char* pem_root_certs, const char* cipher_suites,
+ const char** alpn_protocols, uint16_t num_alpn_protocols,
+ tsi_ssl_client_handshaker_factory** factory) {
+ SSL_CTX* ssl_context = NULL;
+ tsi_ssl_client_handshaker_factory* impl = NULL;
tsi_result result = TSI_OK;
gpr_once_init(&init_openssl_once, init_openssl);
@@ -1364,7 +1365,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_INVALID_ARGUMENT;
}
- impl = (tsi_ssl_client_handshaker_factory *)gpr_zalloc(sizeof(*impl));
+ impl = (tsi_ssl_client_handshaker_factory*)gpr_zalloc(sizeof(*impl));
tsi_ssl_handshaker_factory_init(&impl->base);
impl->base.vtable = &client_handshaker_factory_vtable;
@@ -1419,11 +1420,11 @@ static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = {
tsi_ssl_server_handshaker_factory_destroy};
tsi_result tsi_create_ssl_server_handshaker_factory(
- const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, const char *pem_client_root_certs,
- int force_client_auth, const char *cipher_suites,
- const char **alpn_protocols, uint16_t num_alpn_protocols,
- tsi_ssl_server_handshaker_factory **factory) {
+ const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs,
+ size_t num_key_cert_pairs, const char* pem_client_root_certs,
+ int force_client_auth, const char* cipher_suites,
+ const char** alpn_protocols, uint16_t num_alpn_protocols,
+ tsi_ssl_server_handshaker_factory** factory) {
return tsi_create_ssl_server_handshaker_factory_ex(
pem_key_cert_pairs, num_key_cert_pairs, pem_client_root_certs,
force_client_auth ? TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
@@ -1432,12 +1433,12 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
}
tsi_result tsi_create_ssl_server_handshaker_factory_ex(
- const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, const char *pem_client_root_certs,
+ const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs,
+ size_t num_key_cert_pairs, const char* pem_client_root_certs,
tsi_client_certificate_request_type client_certificate_request,
- const char *cipher_suites, const char **alpn_protocols,
- uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory **factory) {
- tsi_ssl_server_handshaker_factory *impl = NULL;
+ const char* cipher_suites, const char** alpn_protocols,
+ uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory** factory) {
+ tsi_ssl_server_handshaker_factory* impl = NULL;
tsi_result result = TSI_OK;
size_t i = 0;
@@ -1449,14 +1450,14 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
return TSI_INVALID_ARGUMENT;
}
- impl = (tsi_ssl_server_handshaker_factory *)gpr_zalloc(sizeof(*impl));
+ impl = (tsi_ssl_server_handshaker_factory*)gpr_zalloc(sizeof(*impl));
tsi_ssl_handshaker_factory_init(&impl->base);
impl->base.vtable = &server_handshaker_factory_vtable;
impl->ssl_contexts =
- (SSL_CTX **)gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *));
+ (SSL_CTX**)gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX*));
impl->ssl_context_x509_subject_names =
- (tsi_peer *)gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
+ (tsi_peer*)gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
impl->ssl_context_x509_subject_names == NULL) {
tsi_ssl_handshaker_factory_unref(&impl->base);
@@ -1487,7 +1488,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
if (result != TSI_OK) break;
if (pem_client_root_certs != NULL) {
- STACK_OF(X509_NAME) *root_names = NULL;
+ STACK_OF(X509_NAME)* root_names = NULL;
result = ssl_ctx_load_verification_certs(
impl->ssl_contexts[i], pem_client_root_certs,
strlen(pem_client_root_certs), &root_names);
@@ -1552,15 +1553,15 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
/* --- tsi_ssl utils. --- */
-int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
+int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name) {
size_t i = 0;
size_t san_count = 0;
- const tsi_peer_property *cn_property = NULL;
+ const tsi_peer_property* cn_property = NULL;
int like_ip = looks_like_ip_address(name);
/* Check the SAN first. */
for (i = 0; i < peer->property_count; i++) {
- const tsi_peer_property *property = &peer->properties[i];
+ const tsi_peer_property* property = &peer->properties[i];
if (property->name == NULL) continue;
if (strcmp(property->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
@@ -1594,13 +1595,13 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
}
/* --- Testing support. --- */
-const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
- tsi_ssl_handshaker_factory *factory,
- tsi_ssl_handshaker_factory_vtable *new_vtable) {
+const tsi_ssl_handshaker_factory_vtable* tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory* factory,
+ tsi_ssl_handshaker_factory_vtable* new_vtable) {
GPR_ASSERT(factory != NULL);
GPR_ASSERT(factory->vtable != NULL);
- const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable;
+ const tsi_ssl_handshaker_factory_vtable* orig_vtable = factory->vtable;
factory->vtable = new_vtable;
return orig_vtable;
}
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index 3abfdf5ed8..595c4ccaec 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -49,11 +49,11 @@ typedef struct tsi_ssl_client_handshaker_factory
typedef struct {
/* private_key is the NULL-terminated string containing the PEM encoding of
the client's private key. */
- const char *private_key;
+ const char* private_key;
/* cert_chain is the NULL-terminated string containing the PEM encoding of
the client's certificate chain. */
- const char *cert_chain;
+ const char* cert_chain;
} tsi_ssl_pem_key_cert_pair;
/* Creates a client handshaker factory.
@@ -78,10 +78,10 @@ typedef struct {
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_create_ssl_client_handshaker_factory(
- const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair,
- const char *pem_root_certs, const char *cipher_suites,
- const char **alpn_protocols, uint16_t num_alpn_protocols,
- tsi_ssl_client_handshaker_factory **factory);
+ const tsi_ssl_pem_key_cert_pair* pem_key_cert_pair,
+ const char* pem_root_certs, const char* cipher_suites,
+ const char** alpn_protocols, uint16_t num_alpn_protocols,
+ tsi_ssl_client_handshaker_factory** factory);
/* Creates a client handshaker.
- self is the factory from which the handshaker will be created.
@@ -93,13 +93,13 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
- tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
- tsi_handshaker **handshaker);
+ tsi_ssl_client_handshaker_factory* self, const char* server_name_indication,
+ tsi_handshaker** handshaker);
/* Decrements reference count of the handshaker factory. Handshaker factory will
* be destroyed once no references exist. */
void tsi_ssl_client_handshaker_factory_unref(
- tsi_ssl_client_handshaker_factory *factory);
+ tsi_ssl_client_handshaker_factory* factory);
/* --- tsi_ssl_server_handshaker_factory object ---
@@ -130,11 +130,11 @@ typedef struct tsi_ssl_server_handshaker_factory
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_create_ssl_server_handshaker_factory(
- const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, const char *pem_client_root_certs,
- int force_client_auth, const char *cipher_suites,
- const char **alpn_protocols, uint16_t num_alpn_protocols,
- tsi_ssl_server_handshaker_factory **factory);
+ const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs,
+ size_t num_key_cert_pairs, const char* pem_client_root_certs,
+ int force_client_auth, const char* cipher_suites,
+ const char** alpn_protocols, uint16_t num_alpn_protocols,
+ tsi_ssl_server_handshaker_factory** factory);
/* Same as tsi_create_ssl_server_handshaker_factory method except uses
tsi_client_certificate_request_type to support more ways to handle client
@@ -143,11 +143,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory(
authenticate with an SSL cert. Note that this option is ignored if
pem_client_root_certs is NULL or pem_client_roots_certs_size is 0 */
tsi_result tsi_create_ssl_server_handshaker_factory_ex(
- const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
- size_t num_key_cert_pairs, const char *pem_client_root_certs,
+ const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs,
+ size_t num_key_cert_pairs, const char* pem_client_root_certs,
tsi_client_certificate_request_type client_certificate_request,
- const char *cipher_suites, const char **alpn_protocols,
- uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory **factory);
+ const char* cipher_suites, const char** alpn_protocols,
+ uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory** factory);
/* Creates a server handshaker.
- self is the factory from which the handshaker will be created.
@@ -156,19 +156,19 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
- This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case
where a parameter is invalid. */
tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
- tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker);
+ tsi_ssl_server_handshaker_factory* self, tsi_handshaker** handshaker);
/* Decrements reference count of the handshaker factory. Handshaker factory will
* be destroyed once no references exist. */
void tsi_ssl_server_handshaker_factory_unref(
- tsi_ssl_server_handshaker_factory *self);
+ tsi_ssl_server_handshaker_factory* self);
/* Util that checks that an ssl peer matches a specific name.
Still TODO(jboeuf):
- handle mixed case.
- handle %encoded chars.
- handle public suffix wildchar more strictly (e.g. *.co.uk) */
-int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
+int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name);
/* --- Testing support. ---
@@ -180,7 +180,7 @@ typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
/* Function pointer to handshaker_factory destructor. */
typedef void (*tsi_ssl_handshaker_factory_destructor)(
- tsi_ssl_handshaker_factory *factory);
+ tsi_ssl_handshaker_factory* factory);
/* Virtual table for tsi_ssl_handshaker_factory. */
typedef struct {
@@ -189,9 +189,9 @@ typedef struct {
/* Set destructor of handshaker_factory to new_destructor, returns previous
destructor. */
-const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
- tsi_ssl_handshaker_factory *factory,
- tsi_ssl_handshaker_factory_vtable *new_vtable);
+const tsi_ssl_handshaker_factory_vtable* tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory* factory,
+ tsi_ssl_handshaker_factory_vtable* new_vtable);
#ifdef __cplusplus
}
diff --git a/src/core/tsi/transport_security.cc b/src/core/tsi/transport_security.cc
index 21bd8eba78..78e7be249c 100644
--- a/src/core/tsi/transport_security.cc
+++ b/src/core/tsi/transport_security.cc
@@ -30,7 +30,7 @@ grpc_tracer_flag tsi_tracing_enabled = GRPC_TRACER_INITIALIZER(false, "tsi");
/* --- tsi_result common implementation. --- */
-const char *tsi_result_to_string(tsi_result result) {
+const char* tsi_result_to_string(tsi_result result) {
switch (result) {
case TSI_OK:
return "TSI_OK";
@@ -69,11 +69,11 @@ const char *tsi_result_to_string(tsi_result result) {
Calls specific implementation after state/input validation. */
-tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
- const unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size,
- unsigned char *protected_output_frames,
- size_t *protected_output_frames_size) {
+tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
+ const unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size,
+ unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size) {
if (self == NULL || self->vtable == NULL || unprotected_bytes == NULL ||
unprotected_bytes_size == NULL || protected_output_frames == NULL ||
protected_output_frames_size == NULL) {
@@ -86,8 +86,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
}
tsi_result tsi_frame_protector_protect_flush(
- tsi_frame_protector *self, unsigned char *protected_output_frames,
- size_t *protected_output_frames_size, size_t *still_pending_size) {
+ tsi_frame_protector* self, unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size, size_t* still_pending_size) {
if (self == NULL || self->vtable == NULL || protected_output_frames == NULL ||
protected_output_frames_size == NULL || still_pending_size == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -99,9 +99,9 @@ tsi_result tsi_frame_protector_protect_flush(
}
tsi_result tsi_frame_protector_unprotect(
- tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
- size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size) {
+ tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
+ size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size) {
if (self == NULL || self->vtable == NULL || protected_frames_bytes == NULL ||
protected_frames_bytes_size == NULL || unprotected_bytes == NULL ||
unprotected_bytes_size == NULL) {
@@ -113,7 +113,7 @@ tsi_result tsi_frame_protector_unprotect(
unprotected_bytes_size);
}
-void tsi_frame_protector_destroy(tsi_frame_protector *self) {
+void tsi_frame_protector_destroy(tsi_frame_protector* self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
@@ -122,9 +122,9 @@ void tsi_frame_protector_destroy(tsi_frame_protector *self) {
Calls specific implementation after state/input validation. */
-tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
- unsigned char *bytes,
- size_t *bytes_size) {
+tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
+ unsigned char* bytes,
+ size_t* bytes_size) {
if (self == NULL || self->vtable == NULL || bytes == NULL ||
bytes_size == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -134,9 +134,9 @@ tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
return self->vtable->get_bytes_to_send_to_peer(self, bytes, bytes_size);
}
-tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
- const unsigned char *bytes,
- size_t *bytes_size) {
+tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self,
+ const unsigned char* bytes,
+ size_t* bytes_size) {
if (self == NULL || self->vtable == NULL || bytes == NULL ||
bytes_size == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -146,14 +146,14 @@ tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
return self->vtable->process_bytes_from_peer(self, bytes, bytes_size);
}
-tsi_result tsi_handshaker_get_result(tsi_handshaker *self) {
+tsi_result tsi_handshaker_get_result(tsi_handshaker* self) {
if (self == NULL || self->vtable == NULL) return TSI_INVALID_ARGUMENT;
if (self->frame_protector_created) return TSI_FAILED_PRECONDITION;
if (self->vtable->get_result == NULL) return TSI_UNIMPLEMENTED;
return self->vtable->get_result(self);
}
-tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer) {
+tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) {
if (self == NULL || self->vtable == NULL || peer == NULL) {
return TSI_INVALID_ARGUMENT;
}
@@ -167,8 +167,8 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer) {
}
tsi_result tsi_handshaker_create_frame_protector(
- tsi_handshaker *self, size_t *max_protected_frame_size,
- tsi_frame_protector **protector) {
+ tsi_handshaker* self, size_t* max_protected_frame_size,
+ tsi_frame_protector** protector) {
tsi_result result;
if (self == NULL || self->vtable == NULL || protector == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -185,10 +185,10 @@ tsi_result tsi_handshaker_create_frame_protector(
}
tsi_result tsi_handshaker_next(
- tsi_handshaker *self, const unsigned char *received_bytes,
- size_t received_bytes_size, const unsigned char **bytes_to_send,
- size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result,
- tsi_handshaker_on_next_done_cb cb, void *user_data) {
+ tsi_handshaker* self, const unsigned char* received_bytes,
+ size_t received_bytes_size, const unsigned char** bytes_to_send,
+ size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result,
+ tsi_handshaker_on_next_done_cb cb, void* user_data) {
if (self == NULL || self->vtable == NULL) return TSI_INVALID_ARGUMENT;
if (self->handshaker_result_created) return TSI_FAILED_PRECONDITION;
if (self->vtable->next == NULL) return TSI_UNIMPLEMENTED;
@@ -197,15 +197,15 @@ tsi_result tsi_handshaker_next(
handshaker_result, cb, user_data);
}
-void tsi_handshaker_destroy(tsi_handshaker *self) {
+void tsi_handshaker_destroy(tsi_handshaker* self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
/* --- tsi_handshaker_result implementation. --- */
-tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result *self,
- tsi_peer *peer) {
+tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result* self,
+ tsi_peer* peer) {
if (self == NULL || self->vtable == NULL || peer == NULL) {
return TSI_INVALID_ARGUMENT;
}
@@ -215,8 +215,8 @@ tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result *self,
}
tsi_result tsi_handshaker_result_create_frame_protector(
- const tsi_handshaker_result *self, size_t *max_protected_frame_size,
- tsi_frame_protector **protector) {
+ const tsi_handshaker_result* self, size_t* max_protected_frame_size,
+ tsi_frame_protector** protector) {
if (self == NULL || self->vtable == NULL || protector == NULL) {
return TSI_INVALID_ARGUMENT;
}
@@ -226,8 +226,8 @@ tsi_result tsi_handshaker_result_create_frame_protector(
}
tsi_result tsi_handshaker_result_get_unused_bytes(
- const tsi_handshaker_result *self, const unsigned char **bytes,
- size_t *bytes_size) {
+ const tsi_handshaker_result* self, const unsigned char** bytes,
+ size_t* bytes_size) {
if (self == NULL || self->vtable == NULL || bytes == NULL ||
bytes_size == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -236,7 +236,7 @@ tsi_result tsi_handshaker_result_get_unused_bytes(
return self->vtable->get_unused_bytes(self, bytes, bytes_size);
}
-void tsi_handshaker_result_destroy(tsi_handshaker_result *self) {
+void tsi_handshaker_result_destroy(tsi_handshaker_result* self) {
if (self == NULL) return;
self->vtable->destroy(self);
}
@@ -249,7 +249,7 @@ tsi_peer_property tsi_init_peer_property(void) {
return property;
}
-static void tsi_peer_destroy_list_property(tsi_peer_property *children,
+static void tsi_peer_destroy_list_property(tsi_peer_property* children,
size_t child_count) {
size_t i;
for (i = 0; i < child_count; i++) {
@@ -258,7 +258,7 @@ static void tsi_peer_destroy_list_property(tsi_peer_property *children,
gpr_free(children);
}
-void tsi_peer_property_destruct(tsi_peer_property *property) {
+void tsi_peer_property_destruct(tsi_peer_property* property) {
if (property->name != NULL) {
gpr_free(property->name);
}
@@ -268,7 +268,7 @@ void tsi_peer_property_destruct(tsi_peer_property *property) {
*property = tsi_init_peer_property(); /* Reset everything to 0. */
}
-void tsi_peer_destruct(tsi_peer *self) {
+void tsi_peer_destruct(tsi_peer* self) {
if (self == NULL) return;
if (self->properties != NULL) {
tsi_peer_destroy_list_property(self->properties, self->property_count);
@@ -278,26 +278,26 @@ void tsi_peer_destruct(tsi_peer *self) {
}
tsi_result tsi_construct_allocated_string_peer_property(
- const char *name, size_t value_length, tsi_peer_property *property) {
+ const char* name, size_t value_length, tsi_peer_property* property) {
*property = tsi_init_peer_property();
if (name != NULL) property->name = gpr_strdup(name);
if (value_length > 0) {
- property->value.data = (char *)gpr_zalloc(value_length);
+ property->value.data = (char*)gpr_zalloc(value_length);
property->value.length = value_length;
}
return TSI_OK;
}
tsi_result tsi_construct_string_peer_property_from_cstring(
- const char *name, const char *value, tsi_peer_property *property) {
+ const char* name, const char* value, tsi_peer_property* property) {
return tsi_construct_string_peer_property(name, value, strlen(value),
property);
}
-tsi_result tsi_construct_string_peer_property(const char *name,
- const char *value,
+tsi_result tsi_construct_string_peer_property(const char* name,
+ const char* value,
size_t value_length,
- tsi_peer_property *property) {
+ tsi_peer_property* property) {
tsi_result result = tsi_construct_allocated_string_peer_property(
name, value_length, property);
if (result != TSI_OK) return result;
@@ -307,10 +307,10 @@ tsi_result tsi_construct_string_peer_property(const char *name,
return TSI_OK;
}
-tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer) {
+tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer) {
memset(peer, 0, sizeof(tsi_peer));
if (property_count > 0) {
- peer->properties = (tsi_peer_property *)gpr_zalloc(
+ peer->properties = (tsi_peer_property*)gpr_zalloc(
property_count * sizeof(tsi_peer_property));
peer->property_count = property_count;
}
diff --git a/src/core/tsi/transport_security.h b/src/core/tsi/transport_security.h
index 3bba38149c..d639f857fe 100644
--- a/src/core/tsi/transport_security.h
+++ b/src/core/tsi/transport_security.h
@@ -33,52 +33,52 @@ extern grpc_tracer_flag tsi_tracing_enabled;
/* Base for tsi_frame_protector implementations.
See transport_security_interface.h for documentation. */
typedef struct {
- tsi_result (*protect)(tsi_frame_protector *self,
- const unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size,
- unsigned char *protected_output_frames,
- size_t *protected_output_frames_size);
- tsi_result (*protect_flush)(tsi_frame_protector *self,
- unsigned char *protected_output_frames,
- size_t *protected_output_frames_size,
- size_t *still_pending_size);
- tsi_result (*unprotect)(tsi_frame_protector *self,
- const unsigned char *protected_frames_bytes,
- size_t *protected_frames_bytes_size,
- unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size);
- void (*destroy)(tsi_frame_protector *self);
+ tsi_result (*protect)(tsi_frame_protector* self,
+ const unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size,
+ unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size);
+ tsi_result (*protect_flush)(tsi_frame_protector* self,
+ unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size,
+ size_t* still_pending_size);
+ tsi_result (*unprotect)(tsi_frame_protector* self,
+ const unsigned char* protected_frames_bytes,
+ size_t* protected_frames_bytes_size,
+ unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size);
+ void (*destroy)(tsi_frame_protector* self);
} tsi_frame_protector_vtable;
struct tsi_frame_protector {
- const tsi_frame_protector_vtable *vtable;
+ const tsi_frame_protector_vtable* vtable;
};
/* Base for tsi_handshaker implementations.
See transport_security_interface.h for documentation. */
typedef struct {
- tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker *self,
- unsigned char *bytes,
- size_t *bytes_size);
- tsi_result (*process_bytes_from_peer)(tsi_handshaker *self,
- const unsigned char *bytes,
- size_t *bytes_size);
- tsi_result (*get_result)(tsi_handshaker *self);
- tsi_result (*extract_peer)(tsi_handshaker *self, tsi_peer *peer);
- tsi_result (*create_frame_protector)(tsi_handshaker *self,
- size_t *max_protected_frame_size,
- tsi_frame_protector **protector);
- void (*destroy)(tsi_handshaker *self);
- tsi_result (*next)(tsi_handshaker *self, const unsigned char *received_bytes,
+ tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker* self,
+ unsigned char* bytes,
+ size_t* bytes_size);
+ tsi_result (*process_bytes_from_peer)(tsi_handshaker* self,
+ const unsigned char* bytes,
+ size_t* bytes_size);
+ tsi_result (*get_result)(tsi_handshaker* self);
+ tsi_result (*extract_peer)(tsi_handshaker* self, tsi_peer* peer);
+ tsi_result (*create_frame_protector)(tsi_handshaker* self,
+ size_t* max_protected_frame_size,
+ tsi_frame_protector** protector);
+ void (*destroy)(tsi_handshaker* self);
+ tsi_result (*next)(tsi_handshaker* self, const unsigned char* received_bytes,
size_t received_bytes_size,
- const unsigned char **bytes_to_send,
- size_t *bytes_to_send_size,
- tsi_handshaker_result **handshaker_result,
- tsi_handshaker_on_next_done_cb cb, void *user_data);
+ const unsigned char** bytes_to_send,
+ size_t* bytes_to_send_size,
+ tsi_handshaker_result** handshaker_result,
+ tsi_handshaker_on_next_done_cb cb, void* user_data);
} tsi_handshaker_vtable;
struct tsi_handshaker {
- const tsi_handshaker_vtable *vtable;
+ const tsi_handshaker_vtable* vtable;
bool frame_protector_created;
bool handshaker_result_created;
};
@@ -92,39 +92,39 @@ struct tsi_handshaker {
needs to compile in other applications, where grpc_exec_ctx is not defined.
*/
typedef struct {
- tsi_result (*extract_peer)(const tsi_handshaker_result *self, tsi_peer *peer);
+ tsi_result (*extract_peer)(const tsi_handshaker_result* self, tsi_peer* peer);
tsi_result (*create_zero_copy_grpc_protector)(
- void *exec_ctx, const tsi_handshaker_result *self,
- size_t *max_output_protected_frame_size,
- tsi_zero_copy_grpc_protector **protector);
- tsi_result (*create_frame_protector)(const tsi_handshaker_result *self,
- size_t *max_output_protected_frame_size,
- tsi_frame_protector **protector);
- tsi_result (*get_unused_bytes)(const tsi_handshaker_result *self,
- const unsigned char **bytes,
- size_t *bytes_size);
- void (*destroy)(tsi_handshaker_result *self);
+ void* exec_ctx, const tsi_handshaker_result* self,
+ size_t* max_output_protected_frame_size,
+ tsi_zero_copy_grpc_protector** protector);
+ tsi_result (*create_frame_protector)(const tsi_handshaker_result* self,
+ size_t* max_output_protected_frame_size,
+ tsi_frame_protector** protector);
+ tsi_result (*get_unused_bytes)(const tsi_handshaker_result* self,
+ const unsigned char** bytes,
+ size_t* bytes_size);
+ void (*destroy)(tsi_handshaker_result* self);
} tsi_handshaker_result_vtable;
struct tsi_handshaker_result {
- const tsi_handshaker_result_vtable *vtable;
+ const tsi_handshaker_result_vtable* vtable;
};
/* Peer and property construction/destruction functions. */
-tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer);
+tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer);
tsi_peer_property tsi_init_peer_property(void);
-void tsi_peer_property_destruct(tsi_peer_property *property);
-tsi_result tsi_construct_string_peer_property(const char *name,
- const char *value,
+void tsi_peer_property_destruct(tsi_peer_property* property);
+tsi_result tsi_construct_string_peer_property(const char* name,
+ const char* value,
size_t value_length,
- tsi_peer_property *property);
+ tsi_peer_property* property);
tsi_result tsi_construct_allocated_string_peer_property(
- const char *name, size_t value_length, tsi_peer_property *property);
+ const char* name, size_t value_length, tsi_peer_property* property);
tsi_result tsi_construct_string_peer_property_from_cstring(
- const char *name, const char *value, tsi_peer_property *property);
+ const char* name, const char* value, tsi_peer_property* property);
/* Utils. */
-char *tsi_strdup(const char *src); /* Sadly, no strdup in C89. */
+char* tsi_strdup(const char* src); /* Sadly, no strdup in C89. */
#ifdef __cplusplus
}
diff --git a/src/core/tsi/transport_security_adapter.cc b/src/core/tsi/transport_security_adapter.cc
index e399e42758..ec4e7d8cef 100644
--- a/src/core/tsi/transport_security_adapter.cc
+++ b/src/core/tsi/transport_security_adapter.cc
@@ -30,36 +30,36 @@
typedef struct {
tsi_handshaker_result base;
- tsi_handshaker *wrapped;
- unsigned char *unused_bytes;
+ tsi_handshaker* wrapped;
+ unsigned char* unused_bytes;
size_t unused_bytes_size;
} tsi_adapter_handshaker_result;
-static tsi_result adapter_result_extract_peer(const tsi_handshaker_result *self,
- tsi_peer *peer) {
- tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self;
+static tsi_result adapter_result_extract_peer(const tsi_handshaker_result* self,
+ tsi_peer* peer) {
+ tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self;
return tsi_handshaker_extract_peer(impl->wrapped, peer);
}
static tsi_result adapter_result_create_frame_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
- tsi_frame_protector **protector) {
- tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self;
+ const tsi_handshaker_result* self, size_t* max_output_protected_frame_size,
+ tsi_frame_protector** protector) {
+ tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self;
return tsi_handshaker_create_frame_protector(
impl->wrapped, max_output_protected_frame_size, protector);
}
static tsi_result adapter_result_get_unused_bytes(
- const tsi_handshaker_result *self, const unsigned char **bytes,
- size_t *byte_size) {
- tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self;
+ const tsi_handshaker_result* self, const unsigned char** bytes,
+ size_t* byte_size) {
+ tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self;
*bytes = impl->unused_bytes;
*byte_size = impl->unused_bytes_size;
return TSI_OK;
}
-static void adapter_result_destroy(tsi_handshaker_result *self) {
- tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self;
+static void adapter_result_destroy(tsi_handshaker_result* self) {
+ tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self;
tsi_handshaker_destroy(impl->wrapped);
gpr_free(impl->unused_bytes);
gpr_free(self);
@@ -75,18 +75,18 @@ static const tsi_handshaker_result_vtable result_vtable = {
/* Ownership of wrapped tsi_handshaker is transferred to the result object. */
static tsi_result tsi_adapter_create_handshaker_result(
- tsi_handshaker *wrapped, const unsigned char *unused_bytes,
- size_t unused_bytes_size, tsi_handshaker_result **handshaker_result) {
+ tsi_handshaker* wrapped, const unsigned char* unused_bytes,
+ size_t unused_bytes_size, tsi_handshaker_result** handshaker_result) {
if (wrapped == NULL || (unused_bytes_size > 0 && unused_bytes == NULL)) {
return TSI_INVALID_ARGUMENT;
}
- tsi_adapter_handshaker_result *impl =
- (tsi_adapter_handshaker_result *)gpr_zalloc(sizeof(*impl));
+ tsi_adapter_handshaker_result* impl =
+ (tsi_adapter_handshaker_result*)gpr_zalloc(sizeof(*impl));
impl->base.vtable = &result_vtable;
impl->wrapped = wrapped;
impl->unused_bytes_size = unused_bytes_size;
if (unused_bytes_size > 0) {
- impl->unused_bytes = (unsigned char *)gpr_malloc(unused_bytes_size);
+ impl->unused_bytes = (unsigned char*)gpr_malloc(unused_bytes_size);
memcpy(impl->unused_bytes, unused_bytes, unused_bytes_size);
} else {
impl->unused_bytes = NULL;
@@ -99,54 +99,54 @@ static tsi_result tsi_adapter_create_handshaker_result(
typedef struct {
tsi_handshaker base;
- tsi_handshaker *wrapped;
- unsigned char *adapter_buffer;
+ tsi_handshaker* wrapped;
+ unsigned char* adapter_buffer;
size_t adapter_buffer_size;
} tsi_adapter_handshaker;
-static tsi_result adapter_get_bytes_to_send_to_peer(tsi_handshaker *self,
- unsigned char *bytes,
- size_t *bytes_size) {
+static tsi_result adapter_get_bytes_to_send_to_peer(tsi_handshaker* self,
+ unsigned char* bytes,
+ size_t* bytes_size) {
return tsi_handshaker_get_bytes_to_send_to_peer(
tsi_adapter_handshaker_get_wrapped(self), bytes, bytes_size);
}
-static tsi_result adapter_process_bytes_from_peer(tsi_handshaker *self,
- const unsigned char *bytes,
- size_t *bytes_size) {
+static tsi_result adapter_process_bytes_from_peer(tsi_handshaker* self,
+ const unsigned char* bytes,
+ size_t* bytes_size) {
return tsi_handshaker_process_bytes_from_peer(
tsi_adapter_handshaker_get_wrapped(self), bytes, bytes_size);
}
-static tsi_result adapter_get_result(tsi_handshaker *self) {
+static tsi_result adapter_get_result(tsi_handshaker* self) {
return tsi_handshaker_get_result(tsi_adapter_handshaker_get_wrapped(self));
}
-static tsi_result adapter_extract_peer(tsi_handshaker *self, tsi_peer *peer) {
+static tsi_result adapter_extract_peer(tsi_handshaker* self, tsi_peer* peer) {
return tsi_handshaker_extract_peer(tsi_adapter_handshaker_get_wrapped(self),
peer);
}
static tsi_result adapter_create_frame_protector(
- tsi_handshaker *self, size_t *max_protected_frame_size,
- tsi_frame_protector **protector) {
+ tsi_handshaker* self, size_t* max_protected_frame_size,
+ tsi_frame_protector** protector) {
return tsi_handshaker_create_frame_protector(
tsi_adapter_handshaker_get_wrapped(self), max_protected_frame_size,
protector);
}
-static void adapter_destroy(tsi_handshaker *self) {
- tsi_adapter_handshaker *impl = (tsi_adapter_handshaker *)self;
+static void adapter_destroy(tsi_handshaker* self) {
+ tsi_adapter_handshaker* impl = (tsi_adapter_handshaker*)self;
tsi_handshaker_destroy(impl->wrapped);
gpr_free(impl->adapter_buffer);
gpr_free(self);
}
static tsi_result adapter_next(
- tsi_handshaker *self, const unsigned char *received_bytes,
- size_t received_bytes_size, const unsigned char **bytes_to_send,
- size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result,
- tsi_handshaker_on_next_done_cb cb, void *user_data) {
+ tsi_handshaker* self, const unsigned char* received_bytes,
+ size_t received_bytes_size, const unsigned char** bytes_to_send,
+ size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result,
+ tsi_handshaker_on_next_done_cb cb, void* user_data) {
/* Input sanity check. */
if ((received_bytes_size > 0 && received_bytes == NULL) ||
bytes_to_send == NULL || bytes_to_send_size == NULL ||
@@ -155,7 +155,7 @@ static tsi_result adapter_next(
}
/* If there are received bytes, process them first. */
- tsi_adapter_handshaker *impl = (tsi_adapter_handshaker *)self;
+ tsi_adapter_handshaker* impl = (tsi_adapter_handshaker*)self;
tsi_result status = TSI_OK;
size_t bytes_consumed = received_bytes_size;
if (received_bytes_size > 0) {
@@ -173,7 +173,7 @@ static tsi_result adapter_next(
offset += to_send_size;
if (status == TSI_INCOMPLETE_DATA) {
impl->adapter_buffer_size *= 2;
- impl->adapter_buffer = (unsigned char *)gpr_realloc(
+ impl->adapter_buffer = (unsigned char*)gpr_realloc(
impl->adapter_buffer, impl->adapter_buffer_size);
}
} while (status == TSI_INCOMPLETE_DATA);
@@ -186,7 +186,7 @@ static tsi_result adapter_next(
*handshaker_result = NULL;
} else {
size_t unused_bytes_size = received_bytes_size - bytes_consumed;
- const unsigned char *unused_bytes =
+ const unsigned char* unused_bytes =
unused_bytes_size == 0 ? NULL : received_bytes + bytes_consumed;
status = tsi_adapter_create_handshaker_result(
impl->wrapped, unused_bytes, unused_bytes_size, handshaker_result);
@@ -208,19 +208,19 @@ static const tsi_handshaker_vtable handshaker_vtable = {
adapter_next,
};
-tsi_handshaker *tsi_create_adapter_handshaker(tsi_handshaker *wrapped) {
+tsi_handshaker* tsi_create_adapter_handshaker(tsi_handshaker* wrapped) {
GPR_ASSERT(wrapped != NULL);
- tsi_adapter_handshaker *impl =
- (tsi_adapter_handshaker *)gpr_zalloc(sizeof(*impl));
+ tsi_adapter_handshaker* impl =
+ (tsi_adapter_handshaker*)gpr_zalloc(sizeof(*impl));
impl->base.vtable = &handshaker_vtable;
impl->wrapped = wrapped;
impl->adapter_buffer_size = TSI_ADAPTER_INITIAL_BUFFER_SIZE;
- impl->adapter_buffer = (unsigned char *)gpr_malloc(impl->adapter_buffer_size);
+ impl->adapter_buffer = (unsigned char*)gpr_malloc(impl->adapter_buffer_size);
return &impl->base;
}
-tsi_handshaker *tsi_adapter_handshaker_get_wrapped(tsi_handshaker *adapter) {
+tsi_handshaker* tsi_adapter_handshaker_get_wrapped(tsi_handshaker* adapter) {
if (adapter == NULL) return NULL;
- tsi_adapter_handshaker *impl = (tsi_adapter_handshaker *)adapter;
+ tsi_adapter_handshaker* impl = (tsi_adapter_handshaker*)adapter;
return impl->wrapped;
}
diff --git a/src/core/tsi/transport_security_adapter.h b/src/core/tsi/transport_security_adapter.h
index 02f33d4c1c..232705f02c 100644
--- a/src/core/tsi/transport_security_adapter.h
+++ b/src/core/tsi/transport_security_adapter.h
@@ -33,12 +33,12 @@ extern "C" {
this tsi adapter handshaker is temporary. It will be removed once TSI has
been fully migrated to the new interface.
Ownership of input tsi_handshaker is transferred to this new adapter. */
-tsi_handshaker *tsi_create_adapter_handshaker(tsi_handshaker *wrapped);
+tsi_handshaker* tsi_create_adapter_handshaker(tsi_handshaker* wrapped);
/* Given a tsi adapter handshaker, return the original wrapped handshaker. The
adapter still owns the wrapped handshaker which should not be destroyed by
the caller. */
-tsi_handshaker *tsi_adapter_handshaker_get_wrapped(tsi_handshaker *adapter);
+tsi_handshaker* tsi_adapter_handshaker_get_wrapped(tsi_handshaker* adapter);
#ifdef __cplusplus
}
diff --git a/src/core/tsi/transport_security_grpc.cc b/src/core/tsi/transport_security_grpc.cc
index affd995230..3c986475c4 100644
--- a/src/core/tsi/transport_security_grpc.cc
+++ b/src/core/tsi/transport_security_grpc.cc
@@ -20,9 +20,9 @@
/* This method creates a tsi_zero_copy_grpc_protector object. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
- size_t *max_output_protected_frame_size,
- tsi_zero_copy_grpc_protector **protector) {
+ grpc_exec_ctx* exec_ctx, const tsi_handshaker_result* self,
+ size_t* max_output_protected_frame_size,
+ tsi_zero_copy_grpc_protector** protector) {
if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
protector == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -39,9 +39,9 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
Calls specific implementation after state/input validation. */
tsi_result tsi_zero_copy_grpc_protector_protect(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *unprotected_slices,
- grpc_slice_buffer *protected_slices) {
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* unprotected_slices,
+ grpc_slice_buffer* protected_slices) {
if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
unprotected_slices == NULL || protected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -52,9 +52,9 @@ tsi_result tsi_zero_copy_grpc_protector_protect(
}
tsi_result tsi_zero_copy_grpc_protector_unprotect(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *protected_slices,
- grpc_slice_buffer *unprotected_slices) {
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* protected_slices,
+ grpc_slice_buffer* unprotected_slices) {
if (exec_ctx == NULL || self == NULL || self->vtable == NULL ||
protected_slices == NULL || unprotected_slices == NULL) {
return TSI_INVALID_ARGUMENT;
@@ -64,8 +64,8 @@ tsi_result tsi_zero_copy_grpc_protector_unprotect(
unprotected_slices);
}
-void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx,
- tsi_zero_copy_grpc_protector *self) {
+void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx* exec_ctx,
+ tsi_zero_copy_grpc_protector* self) {
if (self == NULL) return;
self->vtable->destroy(exec_ctx, self);
}
diff --git a/src/core/tsi/transport_security_grpc.h b/src/core/tsi/transport_security_grpc.h
index ca6755c12f..1c54693ec9 100644
--- a/src/core/tsi/transport_security_grpc.h
+++ b/src/core/tsi/transport_security_grpc.h
@@ -30,9 +30,9 @@ extern "C" {
assuming there is no fatal error.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self,
- size_t *max_output_protected_frame_size,
- tsi_zero_copy_grpc_protector **protector);
+ grpc_exec_ctx* exec_ctx, const tsi_handshaker_result* self,
+ size_t* max_output_protected_frame_size,
+ tsi_zero_copy_grpc_protector** protector);
/* -- tsi_zero_copy_grpc_protector object -- */
@@ -43,8 +43,8 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
- This method returns TSI_OK in case of success or a specific error code in
case of failure. */
tsi_result tsi_zero_copy_grpc_protector_protect(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices);
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* unprotected_slices, grpc_slice_buffer* protected_slices);
/* Outputs unprotected bytes.
- protected_slices is the bytes of protected frames.
@@ -53,28 +53,28 @@ tsi_result tsi_zero_copy_grpc_protector_protect(
there is not enough data to output in which case unprotected_slices has 0
bytes. */
tsi_result tsi_zero_copy_grpc_protector_unprotect(
- grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices);
+ grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* protected_slices, grpc_slice_buffer* unprotected_slices);
/* Destroys the tsi_zero_copy_grpc_protector object. */
-void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx,
- tsi_zero_copy_grpc_protector *self);
+void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx* exec_ctx,
+ tsi_zero_copy_grpc_protector* self);
/* Base for tsi_zero_copy_grpc_protector implementations. */
typedef struct {
- tsi_result (*protect)(grpc_exec_ctx *exec_ctx,
- tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *unprotected_slices,
- grpc_slice_buffer *protected_slices);
- tsi_result (*unprotect)(grpc_exec_ctx *exec_ctx,
- tsi_zero_copy_grpc_protector *self,
- grpc_slice_buffer *protected_slices,
- grpc_slice_buffer *unprotected_slices);
- void (*destroy)(grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self);
+ tsi_result (*protect)(grpc_exec_ctx* exec_ctx,
+ tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* unprotected_slices,
+ grpc_slice_buffer* protected_slices);
+ tsi_result (*unprotect)(grpc_exec_ctx* exec_ctx,
+ tsi_zero_copy_grpc_protector* self,
+ grpc_slice_buffer* protected_slices,
+ grpc_slice_buffer* unprotected_slices);
+ void (*destroy)(grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self);
} tsi_zero_copy_grpc_protector_vtable;
struct tsi_zero_copy_grpc_protector {
- const tsi_zero_copy_grpc_protector_vtable *vtable;
+ const tsi_zero_copy_grpc_protector_vtable* vtable;
};
#ifdef __cplusplus
diff --git a/src/core/tsi/transport_security_interface.h b/src/core/tsi/transport_security_interface.h
index 80c426bbdb..54942a6b2a 100644
--- a/src/core/tsi/transport_security_interface.h
+++ b/src/core/tsi/transport_security_interface.h
@@ -56,7 +56,7 @@ typedef enum {
TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY,
} tsi_client_certificate_request_type;
-const char *tsi_result_to_string(tsi_result result);
+const char* tsi_result_to_string(tsi_result result);
/* --- tsi tracing --- */
@@ -131,11 +131,11 @@ typedef struct tsi_frame_protector tsi_frame_protector;
if (result != TSI_OK) HandleError(result);
------------------------------------------------------------------------ */
-tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
- const unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size,
- unsigned char *protected_output_frames,
- size_t *protected_output_frames_size);
+tsi_result tsi_frame_protector_protect(tsi_frame_protector* self,
+ const unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size,
+ unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size);
/* Indicates that we need to flush the bytes buffered in the protector and get
the resulting frame.
@@ -146,8 +146,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector *self,
- still_pending_bytes is an output parameter indicating the number of bytes
that still need to be flushed from the protector.*/
tsi_result tsi_frame_protector_protect_flush(
- tsi_frame_protector *self, unsigned char *protected_output_frames,
- size_t *protected_output_frames_size, size_t *still_pending_size);
+ tsi_frame_protector* self, unsigned char* protected_output_frames,
+ size_t* protected_output_frames_size, size_t* still_pending_size);
/* Outputs unprotected bytes.
- protected_frames_bytes is an input only parameter and points to the
@@ -172,12 +172,12 @@ tsi_result tsi_frame_protector_protect_flush(
needs to be read before new protected data can be processed in which case
protected_frames_size will be set to 0. */
tsi_result tsi_frame_protector_unprotect(
- tsi_frame_protector *self, const unsigned char *protected_frames_bytes,
- size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes,
- size_t *unprotected_bytes_size);
+ tsi_frame_protector* self, const unsigned char* protected_frames_bytes,
+ size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes,
+ size_t* unprotected_bytes_size);
/* Destroys the tsi_frame_protector object. */
-void tsi_frame_protector_destroy(tsi_frame_protector *self);
+void tsi_frame_protector_destroy(tsi_frame_protector* self);
/* --- tsi_peer objects ---
@@ -189,20 +189,20 @@ void tsi_frame_protector_destroy(tsi_frame_protector *self);
/* Property values may contain NULL characters just like C++ strings.
The length field gives the length of the string. */
typedef struct tsi_peer_property {
- char *name;
+ char* name;
struct {
- char *data;
+ char* data;
size_t length;
} value;
} tsi_peer_property;
typedef struct {
- tsi_peer_property *properties;
+ tsi_peer_property* properties;
size_t property_count;
} tsi_peer;
/* Destructs the tsi_peer object. */
-void tsi_peer_destruct(tsi_peer *self);
+void tsi_peer_destruct(tsi_peer* self);
/* --- tsi_handshaker_result object ---
@@ -215,27 +215,27 @@ typedef struct tsi_handshaker_result tsi_handshaker_result;
/* This method extracts tsi peer. It returns TSI_OK assuming there is no fatal
error.
The caller is responsible for destructing the peer. */
-tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result *self,
- tsi_peer *peer);
+tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result* self,
+ tsi_peer* peer);
/* This method creates a tsi_frame_protector object. It returns TSI_OK assuming
there is no fatal error.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_result_create_frame_protector(
- const tsi_handshaker_result *self, size_t *max_output_protected_frame_size,
- tsi_frame_protector **protector);
+ const tsi_handshaker_result* self, size_t* max_output_protected_frame_size,
+ tsi_frame_protector** protector);
/* This method returns the unused bytes from the handshake. It returns TSI_OK
assuming there is no fatal error.
Ownership of the bytes is retained by the handshaker result. As a
consequence, the caller must not free the bytes. */
tsi_result tsi_handshaker_result_get_unused_bytes(
- const tsi_handshaker_result *self, const unsigned char **bytes,
- size_t *byte_size);
+ const tsi_handshaker_result* self, const unsigned char** bytes,
+ size_t* byte_size);
/* This method releases the tsi_handshaker_handshaker object. After this method
is called, no other method can be called on the object. */
-void tsi_handshaker_result_destroy(tsi_handshaker_result *self);
+void tsi_handshaker_result_destroy(tsi_handshaker_result* self);
/* --- tsi_handshaker objects ----
@@ -346,9 +346,9 @@ typedef struct tsi_handshaker tsi_handshaker;
needs to be called again to get all the bytes to send to the peer (there
was more data to write than the specified bytes_size). In case of a fatal
error in the handshake, another specific error code is returned. */
-tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
- unsigned char *bytes,
- size_t *bytes_size);
+tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
+ unsigned char* bytes,
+ size_t* bytes_size);
/* TO BE DEPRECATED SOON. Use tsi_handshaker_next instead.
Processes bytes received from the peer.
@@ -360,9 +360,9 @@ tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
needs to be called again to complete the data needed for processing. In
case of a fatal error in the handshake, another specific error code is
returned. */
-tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
- const unsigned char *bytes,
- size_t *bytes_size);
+tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self,
+ const unsigned char* bytes,
+ size_t* bytes_size);
/* TO BE DEPRECATED SOON.
Gets the result of the handshaker.
@@ -370,7 +370,7 @@ tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self,
errors. Returns TSI_HANDSHAKE_IN_PROGRESS if the handshaker is not done yet
but no error has been encountered so far. Otherwise the handshaker failed
with the returned error. */
-tsi_result tsi_handshaker_get_result(tsi_handshaker *self);
+tsi_result tsi_handshaker_get_result(tsi_handshaker* self);
/* TO BE DEPRECATED SOON.
Returns 1 if the handshake is in progress, 0 otherwise. */
@@ -382,7 +382,7 @@ tsi_result tsi_handshaker_get_result(tsi_handshaker *self);
tsi_handshaker_is_in_progress returns 1, it returns TSI_OK otherwise
assuming the handshaker is not in a fatal error state.
The caller is responsible for destructing the peer. */
-tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer);
+tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer);
/* TO BE DEPRECATED SOON. Use tsi_handshaker_result_create_frame_protector
instead.
@@ -403,8 +403,8 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer);
the handshaker is not in a fatal error state.
The caller is responsible for destroying the protector. */
tsi_result tsi_handshaker_create_frame_protector(
- tsi_handshaker *self, size_t *max_output_protected_frame_size,
- tsi_frame_protector **protector);
+ tsi_handshaker* self, size_t* max_output_protected_frame_size,
+ tsi_frame_protector** protector);
/* Callback function definition for tsi_handshaker_next.
- status indicates the status of the next operation.
@@ -414,8 +414,8 @@ tsi_result tsi_handshaker_create_frame_protector(
- handshaker_result is the result of handshake when the handshake completes,
is NULL otherwise. */
typedef void (*tsi_handshaker_on_next_done_cb)(
- tsi_result status, void *user_data, const unsigned char *bytes_to_send,
- size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result);
+ tsi_result status, void* user_data, const unsigned char* bytes_to_send,
+ size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result);
/* Conduct a next step of the handshake.
- received_bytes is the buffer containing the data received from the peer.
@@ -437,14 +437,14 @@ typedef void (*tsi_handshaker_on_next_done_cb)(
the caller should not free bytes_to_send, as the buffer is owned by the
tsi_handshaker object. */
tsi_result tsi_handshaker_next(
- tsi_handshaker *self, const unsigned char *received_bytes,
- size_t received_bytes_size, const unsigned char **bytes_to_send,
- size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result,
- tsi_handshaker_on_next_done_cb cb, void *user_data);
+ tsi_handshaker* self, const unsigned char* received_bytes,
+ size_t received_bytes_size, const unsigned char** bytes_to_send,
+ size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result,
+ tsi_handshaker_on_next_done_cb cb, void* user_data);
/* This method releases the tsi_handshaker object. After this method is called,
no other method can be called on the object. */
-void tsi_handshaker_destroy(tsi_handshaker *self);
+void tsi_handshaker_destroy(tsi_handshaker* self);
/* This method initializes the necessary shared objects used for tsi
implementation. */